From 9bd4ab85e4a43945eef17a56d55db4cca6753569 Mon Sep 17 00:00:00 2001 From: Alexandre Gagnon Date: Mon, 7 Jul 2014 14:23:17 -0400 Subject: [PATCH] style: Clean up most comment delimeters Clean up the majority of Rust's comments so they match the preferred style. Discussion @ issue #15458. --- src/liballoc/arc.rs | 6 +- src/liballoc/rc.rs | 278 +++--- src/libcollections/bitv.rs | 76 +- src/libcollections/hash/mod.rs | 102 ++- src/libcollections/hash/sip.rs | 28 +- src/libcollections/lib.rs | 4 +- src/libcollections/ringbuf.rs | 16 +- src/libcollections/slice.rs | 211 +++-- src/libcollections/smallintmap.rs | 6 +- src/libcollections/str.rs | 148 ++- src/libcollections/vec.rs | 14 +- src/libcore/char.rs | 62 +- src/libcore/clone.rs | 22 +- src/libcore/finally.rs | 102 +-- src/libcore/fmt/float.rs | 58 +- src/libcore/intrinsics.rs | 62 +- src/libcore/iter.rs | 107 ++- src/libcore/kinds.rs | 19 +- src/libcore/lib.rs | 8 +- src/libcore/ops.rs | 839 ++++++++---------- src/libcore/slice.rs | 350 ++++---- src/libcore/str.rs | 22 +- src/libcoretest/char.rs | 4 +- src/libdebug/reflect.rs | 18 +- src/libdebug/repr.rs | 6 +- src/libflate/lib.rs | 14 +- src/libfourcc/lib.rs | 57 +- src/libgetopts/lib.rs | 35 +- src/libglob/lib.rs | 196 ++-- src/libgraphviz/lib.rs | 512 ++++++----- src/libgreen/context.rs | 11 +- src/libgreen/lib.rs | 2 +- src/libhexfloat/lib.rs | 51 +- src/liblibc/lib.rs | 133 ++- src/liblog/lib.rs | 191 ++-- src/libnative/io/process.rs | 30 +- src/libnum/bigint.rs | 61 +- src/libnum/complex.rs | 12 +- src/libnum/rational.rs | 8 +- src/librand/distributions/mod.rs | 19 +- src/librustc/back/link.rs | 102 ++- src/librustc/driver/driver.rs | 6 +- src/librustc/front/test.rs | 30 +- src/librustc/lib.rs | 14 +- src/librustc/lib/llvm.rs | 180 ++-- src/librustc/lint/builtin.rs | 2 +- src/librustc/lint/context.rs | 8 +- src/librustc/metadata/common.rs | 14 +- src/librustc/metadata/encoder.rs | 30 +- src/librustc/middle/astencode.rs | 120 ++- src/librustc/middle/borrowck/check_loans.rs | 43 +- .../borrowck/gather_loans/gather_moves.rs | 4 +- .../middle/borrowck/gather_loans/lifetime.rs | 6 +- .../middle/borrowck/gather_loans/mod.rs | 15 +- .../borrowck/gather_loans/restrictions.rs | 4 +- src/librustc/middle/borrowck/mod.rs | 2 +- src/librustc/middle/borrowck/move_data.rs | 81 +- src/librustc/middle/cfg/mod.rs | 8 +- src/librustc/middle/dataflow.rs | 12 +- src/librustc/middle/def.rs | 2 +- src/librustc/middle/expr_use_visitor.rs | 27 +- src/librustc/middle/graph.rs | 46 +- src/librustc/middle/kind.rs | 6 +- src/librustc/middle/liveness.rs | 226 +++-- src/librustc/middle/mem_categorization.rs | 188 ++-- src/librustc/middle/region.rs | 236 +++-- src/librustc/middle/resolve.rs | 24 +- src/librustc/middle/resolve_lifetime.rs | 71 +- src/librustc/middle/subst.rs | 58 +- src/librustc/middle/trans/_match.rs | 451 +++++----- src/librustc/middle/trans/adt.rs | 254 +++--- src/librustc/middle/trans/asm.rs | 4 +- src/librustc/middle/trans/base.rs | 16 +- src/librustc/middle/trans/basic_block.rs | 4 +- src/librustc/middle/trans/build.rs | 10 +- src/librustc/middle/trans/builder.rs | 10 +- src/librustc/middle/trans/callee.rs | 80 +- src/librustc/middle/trans/cleanup.rs | 229 ++--- src/librustc/middle/trans/closure.rs | 22 +- src/librustc/middle/trans/common.rs | 65 +- src/librustc/middle/trans/consts.rs | 4 +- src/librustc/middle/trans/datum.rs | 239 ++--- src/librustc/middle/trans/debuginfo.rs | 343 +++---- src/librustc/middle/trans/expr.rs | 166 ++-- src/librustc/middle/trans/foreign.rs | 51 +- src/librustc/middle/trans/meth.rs | 78 +- src/librustc/middle/trans/tvec.rs | 45 +- src/librustc/middle/trans/type_.rs | 4 +- src/librustc/middle/trans/value.rs | 8 +- src/librustc/middle/ty.rs | 270 +++--- src/librustc/middle/typeck/astconv.rs | 90 +- src/librustc/middle/typeck/check/method.rs | 259 +++--- src/librustc/middle/typeck/check/mod.rs | 265 +++--- src/librustc/middle/typeck/check/regionck.rs | 447 ++++------ .../middle/typeck/check/regionmanip.rs | 70 +- src/librustc/middle/typeck/check/vtable.rs | 55 +- src/librustc/middle/typeck/coherence.rs | 24 +- src/librustc/middle/typeck/collect.rs | 63 +- src/librustc/middle/typeck/infer/coercion.rs | 114 ++- .../middle/typeck/infer/error_reporting.rs | 104 +-- src/librustc/middle/typeck/infer/lattice.rs | 140 ++- src/librustc/middle/typeck/infer/mod.rs | 23 +- .../typeck/infer/region_inference/mod.rs | 105 +-- src/librustc/middle/typeck/infer/test.rs | 6 +- src/librustc/middle/typeck/infer/unify.rs | 188 ++-- src/librustc/middle/typeck/mod.rs | 154 ++-- src/librustc/middle/typeck/variance.rs | 427 +++++---- src/librustc/plugin/mod.rs | 94 +- src/librustc/util/ppaux.rs | 9 +- src/librustrt/c_str.rs | 109 ++- src/librustrt/local_data.rs | 68 +- src/librustuv/lib.rs | 46 +- src/librustuv/uvll.rs | 34 +- src/libserialize/base64.rs | 82 +- src/libserialize/hex.rs | 76 +- src/libserialize/json.rs | 250 +++--- src/libserialize/lib.rs | 4 +- src/libserialize/serialize.rs | 4 +- src/libstd/collections/mod.rs | 4 +- src/libstd/dynamic_lib.rs | 10 +- src/libstd/fmt.rs | 808 ++++++++--------- src/libstd/gc.rs | 13 +- src/libstd/hash.rs | 102 ++- src/libstd/io/fs.rs | 79 +- src/libstd/io/mod.rs | 401 +++++---- src/libstd/io/net/addrinfo.rs | 12 +- src/libstd/io/net/unix.rs | 22 +- src/libstd/io/signal.rs | 16 +- src/libstd/io/stdio.rs | 34 +- src/libstd/io/test.rs | 27 +- src/libstd/io/timer.rs | 13 +- src/libstd/io/util.rs | 2 +- src/libstd/lib.rs | 14 +- src/libstd/macros.rs | 2 +- src/libstd/num/strconv.rs | 202 ++--- src/libstd/os.rs | 98 +- src/libstd/path/mod.rs | 108 ++- src/libstd/rand/mod.rs | 129 +-- src/libstd/rt/mod.rs | 82 +- src/libstd/sync/future.rs | 82 +- src/libstd/task.rs | 8 +- src/libstd/to_str.rs | 6 +- src/libsync/comm/duplex.rs | 6 +- src/libsync/lock.rs | 42 +- src/libsync/raw.rs | 54 +- src/libsyntax/abi.rs | 12 +- src/libsyntax/ast.rs | 30 +- src/libsyntax/ast_util.rs | 10 +- src/libsyntax/attr.rs | 36 +- src/libsyntax/codemap.rs | 34 +- src/libsyntax/ext/asm.rs | 4 +- src/libsyntax/ext/bytes.rs | 2 +- src/libsyntax/ext/cfg.rs | 8 +- src/libsyntax/ext/deriving/cmp/ord.rs | 64 +- src/libsyntax/ext/deriving/cmp/totalord.rs | 32 +- src/libsyntax/ext/deriving/decodable.rs | 6 +- src/libsyntax/ext/deriving/encodable.rs | 143 ++- src/libsyntax/ext/deriving/generic/mod.rs | 553 ++++++------ src/libsyntax/ext/deriving/generic/ty.rs | 6 +- src/libsyntax/ext/deriving/mod.rs | 15 +- src/libsyntax/ext/env.rs | 8 +- src/libsyntax/ext/expand.rs | 4 +- src/libsyntax/ext/quote.rs | 41 +- src/libsyntax/ext/source_util.rs | 10 +- src/libsyntax/ext/tt/macro_parser.rs | 153 ++-- src/libsyntax/ext/tt/transcribe.rs | 36 +- src/libsyntax/fold.rs | 8 +- src/libsyntax/lib.rs | 14 +- src/libsyntax/parse/lexer/mod.rs | 10 +- src/libsyntax/parse/obsolete.rs | 14 +- src/libsyntax/parse/parser.rs | 107 +-- src/libsyntax/parse/token.rs | 44 +- src/libsyntax/print/pp.rs | 255 +++--- src/libsyntax/print/pprust.rs | 8 +- src/libsyntax/visit.rs | 23 +- src/libterm/terminfo/parm.rs | 20 +- src/libtime/lib.rs | 109 +-- src/libuuid/lib.rs | 88 +- src/test/auxiliary/issue-3012-1.rs | 2 +- src/test/auxiliary/kinds_in_metadata.rs | 4 +- src/test/bench/shootout-fasta.rs | 9 +- src/test/bench/shootout-pfib.rs | 13 +- src/test/bench/sudoku.rs | 10 +- src/test/run-pass/class-attributes-2.rs | 8 +- src/test/run-pass/explicit-self-generic.rs | 8 +- src/test/run-pass/hashmap-memory.rs | 9 +- src/test/run-pass/kinds-in-metadata.rs | 4 +- src/test/run-pass/rename-directory.rs | 2 +- 188 files changed, 7082 insertions(+), 8398 deletions(-) diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 9b4f879e61ea4..f520733e9b61f 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Concurrency-enabled mechanisms for sharing mutable and/or immutable state - * between tasks. - */ +//! Concurrency-enabled mechanisms for sharing mutable and/or immutable state +//! between tasks. use core::atomics; use core::clone::Clone; diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index d97bce39c2de9..3e2573e90fae2 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -8,145 +8,145 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Task-local reference-counted boxes (`Rc` type) - -The `Rc` type provides shared ownership of an immutable value. Destruction is -deterministic, and will occur as soon as the last owner is gone. It is marked -as non-sendable because it avoids the overhead of atomic reference counting. - -The `downgrade` method can be used to create a non-owning `Weak` pointer to the -box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will return -`None` if the value has already been freed. - -For example, a tree with parent pointers can be represented by putting the -nodes behind strong `Rc` pointers, and then storing the parent pointers as -`Weak` pointers. - - -## Examples - -Consider a scenario where a set of Gadgets are owned by a given Owner. We want -to have our Gadgets point to their Owner. We can't do this with unique -ownership, because more than one gadget may belong to the same Owner. Rc -allows us to share an Owner between multiple Gadgets, and have the Owner kept -alive as long as any Gadget points at it. - -```rust -use std::rc::Rc; - -struct Owner { - name: String - // ...other fields -} - -struct Gadget { - id: int, - owner: Rc - // ...other fields -} - -fn main() { - // Create a reference counted Owner. - let gadget_owner : Rc = Rc::new( - Owner { name: String::from_str("Gadget Man") } - ); - - // Create Gadgets belonging to gadget_owner. To increment the reference - // count we clone the Rc object. - let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; - let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; - - drop(gadget_owner); - - // Despite dropping gadget_owner, we're still able to print out the name of - // the Owner of the Gadgets. This is because we've only dropped the - // reference count object, not the Owner it wraps. As long as there are - // other Rc objects pointing at the same Owner, it will stay alive. Notice - // that the Rc wrapper around Gadget.owner gets automatically dereferenced - // for us. - println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); - println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); - - // At the end of the method, gadget1 and gadget2 get destroyed, and with - // them the last counted references to our Owner. Gadget Man now gets - // destroyed as well. -} -``` - -If our requirements change, and we also need to be able to traverse from -Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget -introduces a cycle between the objects. This means that their reference counts -can never reach 0, and the objects will stay alive: a memory leak. In order to -get around this, we can use `Weak` pointers. These are reference counted -pointers that don't keep an object alive if there are no normal `Rc` (or -*strong*) pointers left. - -Rust actually makes it somewhat difficult to produce this loop in the first -place: in order to end up with two objects that point at each other, one of -them needs to be mutable. This is problematic because Rc enforces memory -safety by only giving out shared references to the object it wraps, and these -don't allow direct mutation. We need to wrap the part of the object we wish to -mutate in a `RefCell`, which provides *interior mutability*: a method to -achieve mutability through a shared reference. `RefCell` enforces Rust's -borrowing rules at runtime. Read the `Cell` documentation for more details on -interior mutability. - -```rust -use std::rc::Rc; -use std::rc::Weak; -use std::cell::RefCell; - -struct Owner { - name: String, - gadgets: RefCell>> - // ...other fields -} - -struct Gadget { - id: int, - owner: Rc - // ...other fields -} - -fn main() { - // Create a reference counted Owner. Note the fact that we've put the - // Owner's vector of Gadgets inside a RefCell so that we can mutate it - // through a shared reference. - let gadget_owner : Rc = Rc::new( - Owner { - name: "Gadget Man".to_string(), - gadgets: RefCell::new(Vec::new()) - } - ); - - // Create Gadgets belonging to gadget_owner as before. - let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); - let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); - - // Add the Gadgets to their Owner. To do this we mutably borrow from - // the RefCell holding the Owner's Gadgets. - gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade()); - gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade()); - - // Iterate over our Gadgets, printing their details out - for gadget_opt in gadget_owner.gadgets.borrow().iter() { - - // gadget_opt is a Weak. Since weak pointers can't guarantee - // that their object is still alive, we need to call upgrade() on them - // to turn them into a strong reference. This returns an Option, which - // contains a reference to our object if it still exists. - let gadget = gadget_opt.upgrade().unwrap(); - println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); - } - - // At the end of the method, gadget_owner, gadget1 and gadget2 get - // destroyed. There are now no strong (Rc) references to the gadgets. - // Once they get destroyed, the Gadgets get destroyed. This zeroes the - // reference count on Gadget Man, so he gets destroyed as well. -} -``` - -*/ +//! Task-local reference-counted boxes (`Rc` type) +//! +//! The `Rc` type provides shared ownership of an immutable value. Destruction +//! is deterministic, and will occur as soon as the last owner is gone. It is +//! marked as non-sendable because it avoids the overhead of atomic reference +//! counting. +//! +//! The `downgrade` method can be used to create a non-owning `Weak` pointer to +//! the box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will +//! return `None` if the value has already been freed. +//! +//! For example, a tree with parent pointers can be represented by putting the +//! nodes behind strong `Rc` pointers, and then storing the parent pointers as +//! `Weak` pointers. +//! +//! +//! ## Examples +//! +//! Consider a scenario where a set of Gadgets are owned by a given Owner. We +//! want to have our Gadgets point to their Owner. We can't do this with unique +//! ownership, because more than one gadget may belong to the same Owner. Rc +//! allows us to share an Owner between multiple Gadgets, and have the Owner +//! kept alive as long as any Gadget points at it. +//! +//! ```rust +//! use std::rc::Rc; +//! +//! struct Owner { +//! name: String +//! // ...other fields +//! } +//! +//! struct Gadget { +//! id: int, +//! owner: Rc +//! // ...other fields +//! } +//! +//! fn main() { +//! // Create a reference counted Owner. +//! let gadget_owner : Rc = Rc::new( +//! Owner { name: String::from_str("Gadget Man") } +//! ); +//! +//! // Create Gadgets belonging to gadget_owner. To increment the reference +//! // count we clone the Rc object. +//! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; +//! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; +//! +//! drop(gadget_owner); +//! +//! // Despite dropping gadget_owner, we're still able to print out the name +//! // of the Owner of the Gadgets. This is because we've only dropped the +//! // reference count object, not the Owner it wraps. As long as there are +//! // other Rc objects pointing at the same Owner, it will stay alive. +//! // Notice that the Rc wrapper around Gadget.owner gets automatically +//! // dereferenced for us. +//! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); +//! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); +//! +//! // At the end of the method, gadget1 and gadget2 get destroyed, and with +//! // them the last counted references to our Owner. Gadget Man now gets +//! // destroyed as well. +//! } +//! ``` +//! +//! If our requirements change, and we also need to be able to traverse from +//! Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget +//! introduces a cycle between the objects. This means that their reference +//! counts can never reach 0, and the objects will stay alive: a memory leak. +//! In order to get around this, we can use `Weak` pointers. These are +//! reference counted pointers that don't keep an object alive if there are no +//! normal `Rc` (or *strong*) pointers left. +//! +//! Rust actually makes it somewhat difficult to produce this loop in the first +//! place: in order to end up with two objects that point at each other, one of +//! them needs to be mutable. This is problematic because Rc enforces memory +//! safety by only giving out shared references to the object it wraps, and +//! these don't allow direct mutation. We need to wrap the part of the object +//! we wish to mutate in a `RefCell`, which provides *interior mutability*: a +//! method to achieve mutability through a shared reference. `RefCell` enforces +//! Rust's borrowing rules at runtime. Read the `Cell` documentation for more +//! details on interior mutability. +//! +//! ```rust +//! use std::rc::Rc; +//! use std::rc::Weak; +//! use std::cell::RefCell; +//! +//! struct Owner { +//! name: String, +//! gadgets: RefCell>> +//! // ...other fields +//! } +//! +//! struct Gadget { +//! id: int, +//! owner: Rc +//! // ...other fields +//! } +//! +//! fn main() { +//! // Create a reference counted Owner. Note the fact that we've put the +//! // Owner's vector of Gadgets inside a RefCell so that we can mutate it +//! // through a shared reference. +//! let gadget_owner : Rc = Rc::new( +//! Owner { +//! name: "Gadget Man".to_string(), +//! gadgets: RefCell::new(Vec::new()) +//! } +//! ); +//! +//! // Create Gadgets belonging to gadget_owner as before. +//! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); +//! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); +//! +//! // Add the Gadgets to their Owner. To do this we mutably borrow from +//! // the RefCell holding the Owner's Gadgets. +//! gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade()); +//! gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade()); +//! +//! // Iterate over our Gadgets, printing their details out +//! for gadget_opt in gadget_owner.gadgets.borrow().iter() { +//! +//! // gadget_opt is a Weak. Since weak pointers can't guarantee +//! // that their object is still alive, we need to call upgrade() on +//! // them to turn them into a strong reference. This returns an +//! // Option, which contains a reference to our object if it still +//! // exists. +//! let gadget = gadget_opt.upgrade().unwrap(); +//! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); +//! } +//! +//! // At the end of the method, gadget_owner, gadget1 and gadget2 get +//! // destroyed. There are now no strong (Rc) references to the gadgets. +//! // Once they get destroyed, the Gadgets get destroyed. This zeroes the +//! // reference count on Gadget Man, so he gets destroyed as well. +//! } +//! ``` use core::mem::transmute; use core::cell::Cell; diff --git a/src/libcollections/bitv.rs b/src/libcollections/bitv.rs index 6d7c91ccfee77..4c70865d8044a 100644 --- a/src/libcollections/bitv.rs +++ b/src/libcollections/bitv.rs @@ -141,23 +141,19 @@ impl Bitv { } } - /** - * Calculates the union of two bitvectors - * - * Sets `self` to the union of `self` and `v1`. Both bitvectors must be - * the same length. Returns `true` if `self` changed. - */ + /// Calculates the union of two bitvectors + /// + /// Sets `self` to the union of `self` and `v1`. Both bitvectors must be + /// the same length. Returns `true` if `self` changed. #[inline] pub fn union(&mut self, other: &Bitv) -> bool { self.process(other, |w1, w2| w1 | w2) } - /** - * Calculates the intersection of two bitvectors - * - * Sets `self` to the intersection of `self` and `v1`. Both bitvectors - * must be the same length. Returns `true` if `self` changed. - */ + /// Calculates the intersection of two bitvectors + /// + /// Sets `self` to the intersection of `self` and `v1`. Both bitvectors + /// must be the same length. Returns `true` if `self` changed. #[inline] pub fn intersect(&mut self, other: &Bitv) -> bool { self.process(other, |w1, w2| w1 & w2) @@ -173,11 +169,9 @@ impl Bitv { x != 0 } - /** - * Set the value of a bit at a given index - * - * `i` must be less than the length of the bitvector. - */ + /// Set the value of a bit at a given index + /// + /// `i` must be less than the length of the bitvector. #[inline] pub fn set(&mut self, i: uint, x: bool) { assert!(i < self.nbits); @@ -200,15 +194,13 @@ impl Bitv { for w in self.storage.mut_iter() { *w = !*w; } } - /** - * Calculate the difference between two bitvectors - * - * Sets each element of `v0` to the value of that element minus the - * element of `v1` at the same index. Both bitvectors must be the same - * length. - * - * Returns `true` if `v0` was changed. - */ + /// Calculate the difference between two bitvectors + /// + /// Sets each element of `v0` to the value of that element minus the + /// element of `v1` at the same index. Both bitvectors must be the same + /// length. + /// + /// Returns `true` if `v0` was changed. #[inline] pub fn difference(&mut self, other: &Bitv) -> bool { self.process(other, |w1, w2| w1 & !w2) @@ -256,12 +248,10 @@ impl Bitv { !self.none() } - /** - * Organise the bits into bytes, such that the first bit in the - * `Bitv` becomes the high-order bit of the first byte. If the - * size of the `Bitv` is not a multiple of 8 then trailing bits - * will be filled-in with false/0 - */ + /// Organise the bits into bytes, such that the first bit in the + /// `Bitv` becomes the high-order bit of the first byte. If the + /// size of the `Bitv` is not a multiple of 8 then trailing bits + /// will be filled-in with false/0 pub fn to_bytes(&self) -> Vec { fn bit (bitv: &Bitv, byte: uint, bit: uint) -> u8 { let offset = byte * 8 + bit; @@ -286,11 +276,9 @@ impl Bitv { ) } - /** - * Compare a bitvector to a vector of `bool`. - * - * Both the bitvector and vector must have the same length. - */ + /// Compare a bitvector to a vector of `bool`. + /// + /// Both the bitvector and vector must have the same length. pub fn eq_vec(&self, v: &[bool]) -> bool { assert_eq!(self.nbits, v.len()); let mut i = 0; @@ -428,11 +416,9 @@ impl Bitv { } } -/** - * Transform a byte-vector into a `Bitv`. Each byte becomes 8 bits, - * with the most significant bits of each byte coming first. Each - * bit becomes `true` if equal to 1 or `false` if equal to 0. - */ +/// Transform a byte-vector into a `Bitv`. Each byte becomes 8 bits, +/// with the most significant bits of each byte coming first. Each +/// bit becomes `true` if equal to 1 or `false` if equal to 0. pub fn from_bytes(bytes: &[u8]) -> Bitv { from_fn(bytes.len() * 8, |i| { let b = bytes[i / 8] as uint; @@ -441,10 +427,8 @@ pub fn from_bytes(bytes: &[u8]) -> Bitv { }) } -/** - * Create a `Bitv` of the specified length where the value at each - * index is `f(index)`. - */ +/// Create a `Bitv` of the specified length where the value at each +/// index is `f(index)`. pub fn from_fn(len: uint, f: |index: uint| -> bool) -> Bitv { let mut bitv = Bitv::with_capacity(len, false); for i in range(0u, len) { diff --git a/src/libcollections/hash/mod.rs b/src/libcollections/hash/mod.rs index e3d1c9a3216bc..3ec9860bec085 100644 --- a/src/libcollections/hash/mod.rs +++ b/src/libcollections/hash/mod.rs @@ -8,58 +8,56 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Generic hashing support. - * - * This module provides a generic way to compute the hash of a value. The - * simplest way to make a type hashable is to use `#[deriving(Hash)]`: - * - * # Example - * - * ```rust - * use std::hash; - * use std::hash::Hash; - * - * #[deriving(Hash)] - * struct Person { - * id: uint, - * name: String, - * phone: u64, - * } - * - * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; - * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; - * - * assert!(hash::hash(&person1) != hash::hash(&person2)); - * ``` - * - * If you need more control over how a value is hashed, you need to implement - * the trait `Hash`: - * - * ```rust - * use std::hash; - * use std::hash::Hash; - * use std::hash::sip::SipState; - * - * struct Person { - * id: uint, - * name: String, - * phone: u64, - * } - * - * impl Hash for Person { - * fn hash(&self, state: &mut SipState) { - * self.id.hash(state); - * self.phone.hash(state); - * } - * } - * - * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; - * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; - * - * assert!(hash::hash(&person1) == hash::hash(&person2)); - * ``` - */ +//! Generic hashing support. +//! +//! This module provides a generic way to compute the hash of a value. The +//! simplest way to make a type hashable is to use `#[deriving(Hash)]`: +//! +//! # Example +//! +//! ```rust +//! use std::hash; +//! use std::hash::Hash; +//! +//! #[deriving(Hash)] +//! struct Person { +//! id: uint, +//! name: String, +//! phone: u64, +//! } +//! +//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; +//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; +//! +//! assert!(hash::hash(&person1) != hash::hash(&person2)); +//! ``` +//! +//! If you need more control over how a value is hashed, you need to implement +//! the trait `Hash`: +//! +//! ```rust +//! use std::hash; +//! use std::hash::Hash; +//! use std::hash::sip::SipState; +//! +//! struct Person { +//! id: uint, +//! name: String, +//! phone: u64, +//! } +//! +//! impl Hash for Person { +//! fn hash(&self, state: &mut SipState) { +//! self.id.hash(state); +//! self.phone.hash(state); +//! } +//! } +//! +//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; +//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; +//! +//! assert!(hash::hash(&person1) == hash::hash(&person2)); +//! ``` #![allow(unused_must_use)] diff --git a/src/libcollections/hash/sip.rs b/src/libcollections/hash/sip.rs index 4fd98538af7dd..60cd95e9bdc64 100644 --- a/src/libcollections/hash/sip.rs +++ b/src/libcollections/hash/sip.rs @@ -8,21 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Implementation of SipHash 2-4 - * - * See: http://131002.net/siphash/ - * - * Consider this as a main "general-purpose" hash for all hashtables: it - * runs at good speed (competitive with spooky and city) and permits - * strong _keyed_ hashing. Key your hashtables from a strong RNG, - * such as `rand::Rng`. - * - * Although the SipHash algorithm is considered to be cryptographically - * strong, this implementation has not been reviewed for such purposes. - * As such, all cryptographic uses of this implementation are strongly - * discouraged. - */ +//! Implementation of SipHash 2-4 +//! +//! See: http://131002.net/siphash/ +//! +//! Consider this as a main "general-purpose" hash for all hashtables: it +//! runs at good speed (competitive with spooky and city) and permits +//! strong _keyed_ hashing. Key your hashtables from a strong RNG, +//! such as `rand::Rng`. +//! +//! Although the SipHash algorithm is considered to be cryptographically +//! strong, this implementation has not been reviewed for such purposes. +//! As such, all cryptographic uses of this implementation are strongly +//! discouraged. use core::prelude::*; diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index d9a62cd9acd76..7a092df4c3e9c 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Collection types. - */ +//! Collection types. #![crate_id = "collections#0.11.0"] // NOTE: remove after stage0 #![crate_name = "collections"] diff --git a/src/libcollections/ringbuf.rs b/src/libcollections/ringbuf.rs index 5e19accdd6715..9efb6232cfae6 100644 --- a/src/libcollections/ringbuf.rs +++ b/src/libcollections/ringbuf.rs @@ -339,19 +339,17 @@ fn grow(nelts: uint, loptr: &mut uint, elts: &mut Vec>) { let newlen = nelts * 2; elts.reserve(newlen); - /* fill with None */ + // fill with None for _ in range(elts.len(), elts.capacity()) { elts.push(None); } - /* - Move the shortest half into the newly reserved area. - lo ---->| - nelts ----------->| - [o o o|o o o o o] - A [. . .|o o o o o o o o|. . . . .] - B [o o o|. . . . . . . .|o o o o o] - */ + // Move the shortest half into the newly reserved area. + // lo ---->| + // nelts ----------->| + // [o o o|o o o o o] + // A [. . .|o o o o o o o o|. . . . .] + // B [o o o|. . . . . . . .|o o o o o] assert!(newlen - nelts/2 >= nelts); if lo <= (nelts - lo) { // A diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index 40cf8495a4059..7ef1d72785834 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -8,94 +8,93 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Utilities for vector manipulation - -The `vec` module contains useful code to help work with vector values. -Vectors are Rust's list type. Vectors contain zero or more values of -homogeneous types: - -```rust -let int_vector = [1i, 2i, 3i]; -let str_vector = ["one", "two", "three"]; -``` - -This is a big module, but for a high-level overview: - -## Structs - -Several structs that are useful for vectors, such as `Items`, which -represents iteration over a vector. - -## Traits - -A number of traits add methods that allow you to accomplish tasks with vectors. - -Traits defined for the `&[T]` type (a vector slice), have methods that can be -called on either owned vectors, denoted `~[T]`, or on vector slices themselves. -These traits include `ImmutableVector`, and `MutableVector` for the `&mut [T]` -case. - -An example is the method `.slice(a, b)` that returns an immutable "view" into -a vector or a vector slice from the index interval `[a, b)`: - -```rust -let numbers = [0i, 1i, 2i]; -let last_numbers = numbers.slice(1, 3); -// last_numbers is now &[1i, 2i] -``` - -Traits defined for the `~[T]` type, like `OwnedVector`, can only be called -on such vectors. These methods deal with adding elements or otherwise changing -the allocation of the vector. - -An example is the method `.push(element)` that will add an element at the end -of the vector: - -```rust -let mut numbers = vec![0i, 1i, 2i]; -numbers.push(7); -// numbers is now vec![0i, 1i, 2i, 7i]; -``` - -## Implementations of other traits - -Vectors are a very useful type, and so there's several implementations of -traits from other modules. Some notable examples: - -* `Clone` -* `Eq`, `Ord`, `Eq`, `Ord` -- vectors can be compared, - if the element type defines the corresponding trait. - -## Iteration - -The method `iter()` returns an iteration value for a vector or a vector slice. -The iterator yields references to the vector's elements, so if the element -type of the vector is `int`, the element type of the iterator is `&int`. - -```rust -let numbers = [0i, 1i, 2i]; -for &x in numbers.iter() { - println!("{} is a number!", x); -} -``` - -* `.mut_iter()` returns an iterator that allows modifying each value. -* `.move_iter()` converts an owned vector into an iterator that - moves out a value from the vector each iteration. -* Further iterators exist that split, chunk or permute the vector. - -## Function definitions - -There are a number of free functions that create or take vectors, for example: - -* Creating a vector, like `from_elem` and `from_fn` -* Creating a vector with a given size: `with_capacity` -* Modifying a vector and returning it, like `append` -* Operations on paired elements, like `unzip`. - -*/ +//! Utilities for vector manipulation +//! +//! The `vec` module contains useful code to help work with vector values. +//! Vectors are Rust's list type. Vectors contain zero or more values of +//! homogeneous types: +//! +//! ```rust +//! let int_vector = [1i, 2i, 3i]; +//! let str_vector = ["one", "two", "three"]; +//! ``` +//! +//! This is a big module, but for a high-level overview: +//! +//! ## Structs +//! +//! Several structs that are useful for vectors, such as `Items`, which +//! represents iteration over a vector. +//! +//! ## Traits +//! +//! A number of traits add methods that allow you to accomplish tasks with +//! vectors. +//! +//! Traits defined for the `&[T]` type (a vector slice), have methods that can +//! be called on either owned vectors, denoted `~[T]`, or on vector slices +//! themselves. These traits include `ImmutableVector`, and `MutableVector` for +//! the `&mut [T]` case. +//! +//! An example is the method `.slice(a, b)` that returns an immutable "view" +//! into a vector or a vector slice from the index interval `[a, b)`: +//! +//! ```rust +//! let numbers = [0i, 1i, 2i]; +//! let last_numbers = numbers.slice(1, 3); +//! // last_numbers is now &[1i, 2i] +//! ``` +//! +//! Traits defined for the `~[T]` type, like `OwnedVector`, can only be called +//! on such vectors. These methods deal with adding elements or otherwise +//! changing the allocation of the vector. +//! +//! An example is the method `.push(element)` that will add an element at the +//! end of the vector: +//! +//! ```rust +//! let mut numbers = vec![0i, 1i, 2i]; +//! numbers.push(7); +//! // numbers is now vec![0i, 1i, 2i, 7i]; +//! ``` +//! +//! ## Implementations of other traits +//! +//! Vectors are a very useful type, and so there's several implementations of +//! traits from other modules. Some notable examples: +//! +//! * `Clone` +//! * `Eq`, `Ord`, `Eq`, `Ord` -- vectors can be compared, +//! if the element type defines the corresponding trait. +//! +//! ## Iteration +//! +//! The method `iter()` returns an iteration value for a vector or a vector +//! slice. The iterator yields references to the vector's elements, so if the +//! element type of the vector is `int`, the element type of the iterator is +//! `&int`. +//! +//! ```rust +//! let numbers = [0i, 1i, 2i]; +//! for &x in numbers.iter() { +//! println!("{} is a number!", x); +//! } +//! ``` +//! +//! * `.mut_iter()` returns an iterator that allows modifying each value. +//! * `.move_iter()` converts an owned vector into an iterator that +//! moves out a value from the vector each iteration. +//! * Further iterators exist that split, chunk or permute the vector. +//! +//! ## Function definitions +//! +//! There are a number of free functions that create or take vectors, for +//! example: +//! +//! * Creating a vector, like `from_elem` and `from_fn` +//! * Creating a vector with a given size: `with_capacity` +//! * Modifying a vector and returning it, like `append` +//! * Operations on paired elements, like `unzip`. #![doc(primitive = "slice")] @@ -556,19 +555,17 @@ pub trait MutableVectorAllocating<'a, T> { /// ``` fn sort_by(self, compare: |&T, &T| -> Ordering); - /** - * Consumes `src` and moves as many elements as it can into `self` - * from the range [start,end). - * - * Returns the number of elements copied (the shorter of self.len() - * and end - start). - * - * # Arguments - * - * * src - A mutable vector of `T` - * * start - The index into `src` to start copying from - * * end - The index into `str` to stop copying from - */ + /// Consumes `src` and moves as many elements as it can into `self` + /// from the range [start,end). + /// + /// Returns the number of elements copied (the shorter of self.len() + /// and end - start). + /// + /// # Arguments + /// + /// * src - A mutable vector of `T` + /// * start - The index into `src` to start copying from + /// * end - The index into `str` to stop copying from fn move_from(self, src: Vec, start: uint, end: uint) -> uint; } @@ -1080,10 +1077,8 @@ mod tests { v1.dedup(); let mut v2 = vec![box 1i, box 2, box 3, box 3]; v2.dedup(); - /* - * If the boxed pointers were leaked or otherwise misused, valgrind - * and/or rustrt should raise errors. - */ + // If the boxed pointers were leaked or otherwise misused, valgrind + // and/or rustrt should raise errors. } #[test] @@ -1094,10 +1089,8 @@ mod tests { v1.dedup(); let mut v2 = vec![box 1i, box 2, box 3, box 3]; v2.dedup(); - /* - * If the pointers were leaked or otherwise misused, valgrind and/or - * rustrt should raise errors. - */ + // If the pointers were leaked or otherwise misused, valgrind and/or + // rustrt should raise errors. } #[test] diff --git a/src/libcollections/smallintmap.rs b/src/libcollections/smallintmap.rs index 14e41f3fef965..1ddc45c3bd76e 100644 --- a/src/libcollections/smallintmap.rs +++ b/src/libcollections/smallintmap.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * A simple map based on a vector for small integer keys. Space requirements - * are O(highest integer key). - */ +//! A simple map based on a vector for small integer keys. Space requirements +//! are O(highest integer key). #![allow(missing_doc)] diff --git a/src/libcollections/str.rs b/src/libcollections/str.rs index ddba4b34e3a2a..222f52e6f2f3e 100644 --- a/src/libcollections/str.rs +++ b/src/libcollections/str.rs @@ -8,62 +8,58 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Unicode string manipulation (`str` type) - -# Basic Usage - -Rust's string type is one of the core primitive types of the language. While -represented by the name `str`, the name `str` is not actually a valid type in -Rust. Each string must also be decorated with a pointer. `String` is used -for an owned string, so there is only one commonly-used `str` type in Rust: -`&str`. - -`&str` is the borrowed string type. This type of string can only be created -from other strings, unless it is a static string (see below). As the word -"borrowed" implies, this type of string is owned elsewhere, and this string -cannot be moved out of. - -As an example, here's some code that uses a string. - -```rust -fn main() { - let borrowed_string = "This string is borrowed with the 'static lifetime"; -} -``` - -From the example above, you can see that Rust's string literals have the -`'static` lifetime. This is akin to C's concept of a static string. - -String literals are allocated statically in the rodata of the -executable/library. The string then has the type `&'static str` meaning that -the string is valid for the `'static` lifetime, otherwise known as the -lifetime of the entire program. As can be inferred from the type, these static -strings are not mutable. - -# Mutability - -Many languages have immutable strings by default, and Rust has a particular -flavor on this idea. As with the rest of Rust types, strings are immutable by -default. If a string is declared as `mut`, however, it may be mutated. This -works the same way as the rest of Rust's type system in the sense that if -there's a mutable reference to a string, there may only be one mutable reference -to that string. With these guarantees, strings can easily transition between -being mutable/immutable with the same benefits of having mutable strings in -other languages. - -# Representation - -Rust's string type, `str`, is a sequence of unicode codepoints encoded as a -stream of UTF-8 bytes. All safely-created strings are guaranteed to be validly -encoded UTF-8 sequences. Additionally, strings are not null-terminated -and can contain null codepoints. - -The actual representation of strings have direct mappings to vectors: `&str` -is the same as `&[u8]`. - -*/ +//! Unicode string manipulation (`str` type) +//! +//! # Basic Usage +//! +//! Rust's string type is one of the core primitive types of the language. While +//! represented by the name `str`, the name `str` is not actually a valid type +//! in Rust. Each string must also be decorated with a pointer. `String` is used +//! for an owned string, so there is only one commonly-used `str` type in Rust: +//! `&str`. +//! +//! `&str` is the borrowed string type. This type of string can only be created +//! from other strings, unless it is a static string (see below). As the word +//! "borrowed" implies, this type of string is owned elsewhere, and this string +//! cannot be moved out of. +//! +//! As an example, here's some code that uses a string. +//! +//! ```rust +//! fn main() { +//! let borrowed_string = "This string is borrowed with the 'static lifetime"; +//! } +//! ``` +//! +//! From the example above, you can see that Rust's string literals have the +//! `'static` lifetime. This is akin to C's concept of a static string. +//! +//! String literals are allocated statically in the rodata of the +//! executable/library. The string then has the type `&'static str` meaning that +//! the string is valid for the `'static` lifetime, otherwise known as the +//! lifetime of the entire program. As can be inferred from the type, these +//! static strings are not mutable. +//! +//! # Mutability +//! +//! Many languages have immutable strings by default, and Rust has a particular +//! flavor on this idea. As with the rest of Rust types, strings are immutable +//! by default. If a string is declared as `mut`, however, it may be mutated. +//! This works the same way as the rest of Rust's type system in the sense that +//! if there's a mutable reference to a string, there may only be one mutable +//! reference to that string. With these guarantees, strings can easily +//! transition between being mutable/immutable with the same benefits of having +//! mutable strings in other languages. +//! +//! # Representation +//! +//! Rust's string type, `str`, is a sequence of unicode codepoints encoded as a +//! stream of UTF-8 bytes. All safely-created strings are guaranteed to be +//! validly encoded UTF-8 sequences. Additionally, strings are not +//! null-terminated and can contain null codepoints. +//! +//! The actual representation of strings have direct mappings to vectors: `&str` +//! is the same as `&[u8]`. #![doc(primitive = "str")] @@ -89,9 +85,7 @@ pub use core::str::{Utf16Item, ScalarValue, LoneSurrogate, utf16_items}; pub use core::str::{truncate_utf16_at_nul, utf8_char_width, CharRange}; pub use core::str::{Str, StrSlice}; -/* -Section: Creating a string -*/ +// Section: Creating a string /// Consumes a vector of bytes to create a new utf-8 string. /// @@ -243,9 +237,7 @@ impl<'a, S: Str> StrVector for Vec { } } -/* -Section: Iterators -*/ +// Section: Iterators // Helper functions used for Unicode normalization fn canonical_sort(comb: &mut [(char, u8)]) { @@ -363,9 +355,7 @@ pub fn replace(s: &str, from: &str, to: &str) -> String { result } -/* -Section: Misc -*/ +// Section: Misc /// Decode a UTF-16 encoded vector `v` into a string, returning `None` /// if `v` contains any invalid data. @@ -553,9 +543,7 @@ pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> MaybeOwned<'a> { Owned(res.into_string()) } -/* -Section: MaybeOwned -*/ +// Section: MaybeOwned /// A `MaybeOwned` is a string that can hold either a `String` or a `&str`. /// This can be useful as an optimization when an allocation is sometimes @@ -767,9 +755,7 @@ pub mod raw { } } -/* -Section: Trait implementations -*/ +// Section: Trait implementations /// Any string that can be represented as a slice pub trait StrAllocating: Str { @@ -1302,16 +1288,16 @@ mod tests { assert_eq!("中", ss.slice(0u, 3u)); assert_eq!("华V", ss.slice(3u, 7u)); assert_eq!("", ss.slice(3u, 3u)); - /*0: 中 - 3: 华 - 6: V - 7: i - 8: ệ - 11: t - 12: - 13: N - 14: a - 15: m */ + // 0: 中 + // 3: 华 + // 6: V + // 7: i + // 8: ệ + // 11: t + // 12: + // 13: N + // 14: a + // 15: m } #[test] diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index d53ecabd5a9cb..46e2231cd5c33 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -1518,14 +1518,12 @@ impl Drop for MoveItems { } } -/** - * Convert an iterator of pairs into a pair of vectors. - * - * Returns a tuple containing two vectors where the i-th element of the first - * vector contains the first element of the i-th tuple of the input iterator, - * and the i-th element of the second vector contains the second element - * of the i-th tuple of the input iterator. - */ +/// Convert an iterator of pairs into a pair of vectors. +/// +/// Returns a tuple containing two vectors where the i-th element of the first +/// vector contains the first element of the i-th tuple of the input iterator, +/// and the i-th element of the second vector contains the second element +/// of the i-th tuple of the input iterator. pub fn unzip>(mut iter: V) -> (Vec, Vec) { let (lo, _) = iter.size_hint(); let mut ts = Vec::with_capacity(lo); diff --git a/src/libcore/char.rs b/src/libcore/char.rs index da67772d0f122..d59efaa3e3806 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -46,38 +46,36 @@ static MAX_TWO_B: u32 = 0x800u32; static MAX_THREE_B: u32 = 0x10000u32; static MAX_FOUR_B: u32 = 0x200000u32; -/* - Lu Uppercase_Letter an uppercase letter - Ll Lowercase_Letter a lowercase letter - Lt Titlecase_Letter a digraphic character, with first part uppercase - Lm Modifier_Letter a modifier letter - Lo Other_Letter other letters, including syllables and ideographs - Mn Nonspacing_Mark a nonspacing combining mark (zero advance width) - Mc Spacing_Mark a spacing combining mark (positive advance width) - Me Enclosing_Mark an enclosing combining mark - Nd Decimal_Number a decimal digit - Nl Letter_Number a letterlike numeric character - No Other_Number a numeric character of other type - Pc Connector_Punctuation a connecting punctuation mark, like a tie - Pd Dash_Punctuation a dash or hyphen punctuation mark - Ps Open_Punctuation an opening punctuation mark (of a pair) - Pe Close_Punctuation a closing punctuation mark (of a pair) - Pi Initial_Punctuation an initial quotation mark - Pf Final_Punctuation a final quotation mark - Po Other_Punctuation a punctuation mark of other type - Sm Math_Symbol a symbol of primarily mathematical use - Sc Currency_Symbol a currency sign - Sk Modifier_Symbol a non-letterlike modifier symbol - So Other_Symbol a symbol of other type - Zs Space_Separator a space character (of various non-zero widths) - Zl Line_Separator U+2028 LINE SEPARATOR only - Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only - Cc Control a C0 or C1 control code - Cf Format a format control character - Cs Surrogate a surrogate code point - Co Private_Use a private-use character - Cn Unassigned a reserved unassigned code point or a noncharacter -*/ +// Lu Uppercase_Letter an uppercase letter +// Ll Lowercase_Letter a lowercase letter +// Lt Titlecase_Letter a digraphic character, with first part uppercase +// Lm Modifier_Letter a modifier letter +// Lo Other_Letter other letters, including syllables and ideographs +// Mn Nonspacing_Mark a nonspacing combining mark (zero advance width) +// Mc Spacing_Mark a spacing combining mark (positive advance width) +// Me Enclosing_Mark an enclosing combining mark +// Nd Decimal_Number a decimal digit +// Nl Letter_Number a letterlike numeric character +// No Other_Number a numeric character of other type +// Pc Connector_Punctuation a connecting punctuation mark, like a tie +// Pd Dash_Punctuation a dash or hyphen punctuation mark +// Ps Open_Punctuation an opening punctuation mark (of a pair) +// Pe Close_Punctuation a closing punctuation mark (of a pair) +// Pi Initial_Punctuation an initial quotation mark +// Pf Final_Punctuation a final quotation mark +// Po Other_Punctuation a punctuation mark of other type +// Sm Math_Symbol a symbol of primarily mathematical use +// Sc Currency_Symbol a currency sign +// Sk Modifier_Symbol a non-letterlike modifier symbol +// So Other_Symbol a symbol of other type +// Zs Space_Separator a space character (of various non-zero widths) +// Zl Line_Separator U+2028 LINE SEPARATOR only +// Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only +// Cc Control a C0 or C1 control code +// Cf Format a format control character +// Cs Surrogate a surrogate code point +// Co Private_Use a private-use character +// Cn Unassigned a reserved unassigned code point or a noncharacter /// The highest valid code point pub static MAX: char = '\U0010ffff'; diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 247f63115a783..558b9c137689a 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -8,18 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! The `Clone` trait for types that cannot be 'implicitly copied' - -In Rust, some simple types are "implicitly copyable" and when you -assign them or pass them as arguments, the receiver will get a copy, -leaving the original value in place. These types do not require -allocation to copy and do not have finalizers (i.e. they do not -contain owned boxes or implement `Drop`), so the compiler considers -them cheap and safe to copy. For other types copies must be made -explicitly, by convention implementing the `Clone` trait and calling -the `clone` method. - -*/ +//! The `Clone` trait for types that cannot be 'implicitly copied' +//! +//! In Rust, some simple types are "implicitly copyable" and when you +//! assign them or pass them as arguments, the receiver will get a copy, +//! leaving the original value in place. These types do not require +//! allocation to copy and do not have finalizers (i.e. they do not +//! contain owned boxes or implement `Drop`), so the compiler considers +//! them cheap and safe to copy. For other types copies must be made +//! explicitly, by convention implementing the `Clone` trait and calling +//! the `clone` method. #![unstable] diff --git a/src/libcore/finally.rs b/src/libcore/finally.rs index 514b3f90df7c0..f03f023fd2b07 100644 --- a/src/libcore/finally.rs +++ b/src/libcore/finally.rs @@ -8,27 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -The Finally trait provides a method, `finally` on -stack closures that emulates Java-style try/finally blocks. - -Using the `finally` method is sometimes convenient, but the type rules -prohibit any shared, mutable state between the "try" case and the -"finally" case. For advanced cases, the `try_finally` function can -also be used. See that function for more details. - -# Example - -``` -use std::finally::Finally; - -(|| { - // ... -}).finally(|| { - // this code is always run -}) -``` -*/ +//! The Finally trait provides a method, `finally` on +//! stack closures that emulates Java-style try/finally blocks. +//! +//! Using the `finally` method is sometimes convenient, but the type rules +//! prohibit any shared, mutable state between the "try" case and the +//! "finally" case. For advanced cases, the `try_finally` function can +//! also be used. See that function for more details. +//! +//! # Example +//! +//! ``` +//! use std::finally::Finally; +//! +//! (|| { +//! // ... +//! }).finally(|| { +//! // this code is always run +//! }) +//! ``` #![experimental] @@ -58,38 +56,36 @@ impl Finally for fn() -> T { } } -/** - * The most general form of the `finally` functions. The function - * `try_fn` will be invoked first; whether or not it fails, the - * function `finally_fn` will be invoked next. The two parameters - * `mutate` and `drop` are used to thread state through the two - * closures. `mutate` is used for any shared, mutable state that both - * closures require access to; `drop` is used for any state that the - * `try_fn` requires ownership of. - * - * **WARNING:** While shared, mutable state between the try and finally - * function is often necessary, one must be very careful; the `try` - * function could have failed at any point, so the values of the shared - * state may be inconsistent. - * - * # Example - * - * ``` - * use std::finally::try_finally; - * - * struct State<'a> { buffer: &'a mut [u8], len: uint } - * # let mut buf = []; - * let mut state = State { buffer: buf, len: 0 }; - * try_finally( - * &mut state, (), - * |state, ()| { - * // use state.buffer, state.len - * }, - * |state| { - * // use state.buffer, state.len to cleanup - * }) - * ``` - */ +/// The most general form of the `finally` functions. The function +/// `try_fn` will be invoked first; whether or not it fails, the +/// function `finally_fn` will be invoked next. The two parameters +/// `mutate` and `drop` are used to thread state through the two +/// closures. `mutate` is used for any shared, mutable state that both +/// closures require access to; `drop` is used for any state that the +/// `try_fn` requires ownership of. +/// +/// **WARNING:** While shared, mutable state between the try and finally +/// function is often necessary, one must be very careful; the `try` +/// function could have failed at any point, so the values of the shared +/// state may be inconsistent. +/// +/// # Example +/// +/// ``` +/// use std::finally::try_finally; +/// +/// struct State<'a> { buffer: &'a mut [u8], len: uint } +/// # let mut buf = []; +/// let mut state = State { buffer: buf, len: 0 }; +/// try_finally( +/// &mut state, (), +/// |state, ()| { +/// // use state.buffer, state.len +/// }, +/// |state| { +/// // use state.buffer, state.len to cleanup +/// }) +/// ``` pub fn try_finally(mutate: &mut T, drop: U, try_fn: |&mut T, U| -> R, diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs index f326195be1607..60679b8844245 100644 --- a/src/libcore/fmt/float.rs +++ b/src/libcore/fmt/float.rs @@ -67,36 +67,34 @@ pub enum SignFormat { static DIGIT_P_RADIX: uint = ('p' as uint) - ('a' as uint) + 11u; static DIGIT_E_RADIX: uint = ('e' as uint) - ('a' as uint) + 11u; -/** - * Converts a number to its string representation as a byte vector. - * This is meant to be a common base implementation for all numeric string - * conversion functions like `to_str()` or `to_str_radix()`. - * - * # Arguments - * - `num` - The number to convert. Accepts any number that - * implements the numeric traits. - * - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation - * is used, then this base is only used for the significand. The exponent - * itself always printed using a base of 10. - * - `negative_zero` - Whether to treat the special value `-0` as - * `-0` or as `+0`. - * - `sign` - How to emit the sign. See `SignFormat`. - * - `digits` - The amount of digits to use for emitting the fractional - * part, if any. See `SignificantDigits`. - * - `exp_format` - Whether or not to use the exponential (scientific) notation. - * See `ExponentFormat`. - * - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if - * exponential notation is desired. - * - `f` - A closure to invoke with the bytes representing the - * float. - * - * # Failure - * - Fails if `radix` < 2 or `radix` > 36. - * - Fails if `radix` > 14 and `exp_format` is `ExpDec` due to conflict - * between digit and exponent sign `'e'`. - * - Fails if `radix` > 25 and `exp_format` is `ExpBin` due to conflict - * between digit and exponent sign `'p'`. - */ +/// Converts a number to its string representation as a byte vector. +/// This is meant to be a common base implementation for all numeric string +/// conversion functions like `to_str()` or `to_str_radix()`. +/// +/// # Arguments +/// - `num` - The number to convert. Accepts any number that +/// implements the numeric traits. +/// - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation +/// is used, then this base is only used for the significand. The exponent +/// itself always printed using a base of 10. +/// - `negative_zero` - Whether to treat the special value `-0` as +/// `-0` or as `+0`. +/// - `sign` - How to emit the sign. See `SignFormat`. +/// - `digits` - The amount of digits to use for emitting the fractional +/// part, if any. See `SignificantDigits`. +/// - `exp_format` - Whether or not to use the exponential (scientific) notation. +/// See `ExponentFormat`. +/// - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if +/// exponential notation is desired. +/// - `f` - A closure to invoke with the bytes representing the +/// float. +/// +/// # Failure +/// - Fails if `radix` < 2 or `radix` > 36. +/// - Fails if `radix` > 14 and `exp_format` is `ExpDec` due to conflict +/// between digit and exponent sign `'e'`. +/// - Fails if `radix` > 25 and `exp_format` is `ExpBin` due to conflict +/// between digit and exponent sign `'p'`. pub fn float_to_str_bytes_common( num: T, radix: uint, diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 161dd7cef7e13..d1f072fd05aa6 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -8,38 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! rustc compiler intrinsics. - -The corresponding definitions are in librustc/middle/trans/foreign.rs. - -# Volatiles - -The volatile intrinsics provide operations intended to act on I/O -memory, which are guaranteed to not be reordered by the compiler -across other volatile intrinsics. See the LLVM documentation on -[[volatile]]. - -[volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses - -# Atomics - -The atomic intrinsics provide common atomic operations on machine -words, with multiple possible memory orderings. They obey the same -semantics as C++11. See the LLVM documentation on [[atomics]]. - -[atomics]: http://llvm.org/docs/Atomics.html - -A quick refresher on memory ordering: - -* Acquire - a barrier for acquiring a lock. Subsequent reads and writes - take place after the barrier. -* Release - a barrier for releasing a lock. Preceding reads and writes - take place before the barrier. -* Sequentially consistent - sequentially consistent operations are - guaranteed to happen in order. This is the standard mode for working - with atomic types and is equivalent to Java's `volatile`. - -*/ +//! rustc compiler intrinsics. +//! +//! The corresponding definitions are in librustc/middle/trans/foreign.rs. +//! +//! # Volatiles +//! +//! The volatile intrinsics provide operations intended to act on I/O +//! memory, which are guaranteed to not be reordered by the compiler +//! across other volatile intrinsics. See the LLVM documentation on +//! [[volatile]]. +//! +//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses +//! +//! # Atomics +//! +//! The atomic intrinsics provide common atomic operations on machine +//! words, with multiple possible memory orderings. They obey the same +//! semantics as C++11. See the LLVM documentation on [[atomics]]. +//! +//! [atomics]: http://llvm.org/docs/Atomics.html +//! +//! A quick refresher on memory ordering: +//! +//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes +//! take place after the barrier. +//! * Release - a barrier for releasing a lock. Preceding reads and writes +//! take place before the barrier. +//! * Sequentially consistent - sequentially consistent operations are +//! guaranteed to happen in order. This is the standard mode for working +//! with atomic types and is equivalent to Java's `volatile`. #![experimental] #![allow(missing_doc)] diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs index 5895d871dbe18..7bb9eaa06105c 100644 --- a/src/libcore/iter.rs +++ b/src/libcore/iter.rs @@ -8,61 +8,58 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Composable external iterators - -# The `Iterator` trait - -This module defines Rust's core iteration trait. The `Iterator` trait has one -unimplemented method, `next`. All other methods are derived through default -methods to perform operations such as `zip`, `chain`, `enumerate`, and `fold`. - -The goal of this module is to unify iteration across all containers in Rust. -An iterator can be considered as a state machine which is used to track which -element will be yielded next. - -There are various extensions also defined in this module to assist with various -types of iteration, such as the `DoubleEndedIterator` for iterating in reverse, -the `FromIterator` trait for creating a container from an iterator, and much -more. - -## Rust's `for` loop - -The special syntax used by rust's `for` loop is based around the `Iterator` -trait defined in this module. For loops can be viewed as a syntactical expansion -into a `loop`, for example, the `for` loop in this example is essentially -translated to the `loop` below. - -```rust -let values = vec![1i, 2, 3]; - -// "Syntactical sugar" taking advantage of an iterator -for &x in values.iter() { - println!("{}", x); -} - -// Rough translation of the iteration without a `for` iterator. -let mut it = values.iter(); -loop { - match it.next() { - Some(&x) => { - println!("{}", x); - } - None => { break } - } -} -``` - -This `for` loop syntax can be applied to any iterator over any type. - -## Iteration protocol and more - -More detailed information about iterators can be found in the [container -guide](http://doc.rust-lang.org/guide-container.html) with -the rest of the rust manuals. - -*/ +//! Composable external iterators +//! +//! # The `Iterator` trait +//! +//! This module defines Rust's core iteration trait. The `Iterator` trait +//! has one unimplemented method, `next`. All other methods are derived +//! through default methods to perform operations such as `zip`, `chain`, +//! `enumerate`, and `fold`. +//! +//! The goal of this module is to unify iteration across all containers in Rust. +//! An iterator can be considered as a state machine which is used to track +//! which element will be yielded next. +//! +//! There are various extensions also defined in this module to assist with +//! various types of iteration, such as the `DoubleEndedIterator` for iterating +//! in reverse, the `FromIterator` trait for creating a container from an +//! iterator, and much more. +//! +//! ## Rust's `for` loop +//! +//! The special syntax used by rust's `for` loop is based around the `Iterator` +//! trait defined in this module. For loops can be viewed as a syntactical +//! expansion into a `loop`, for example, the `for` loop in this example is +//! essentially translated to the `loop` below. +//! +//! ```rust +//! let values = vec![1i, 2, 3]; +//! +//! // "Syntactical sugar" taking advantage of an iterator +//! for &x in values.iter() { +//! println!("{}", x); +//! } +//! +//! // Rough translation of the iteration without a `for` iterator. +//! let mut it = values.iter(); +//! loop { +//! match it.next() { +//! Some(&x) => { +//! println!("{}", x); +//! } +//! None => { break } +//! } +//! } +//! ``` +//! +//! This `for` loop syntax can be applied to any iterator over any type. +//! +//! ## Iteration protocol and more +//! +//! More detailed information about iterators can be found in the [container +//! guide](http://doc.rust-lang.org/guide-container.html) with +//! the rest of the rust manuals. use cmp; use num::{Zero, One, CheckedAdd, CheckedSub, Saturating, ToPrimitive, Int}; diff --git a/src/libcore/kinds.rs b/src/libcore/kinds.rs index 9a6cdb1c76976..21cda13e41a10 100644 --- a/src/libcore/kinds.rs +++ b/src/libcore/kinds.rs @@ -8,17 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Primitive traits representing basic 'kinds' of types - -Rust types can be classified in various useful ways according to -intrinsic properties of the type. These classifications, often called -'kinds', are represented as traits. - -They cannot be implemented by user code, but are instead implemented -by the compiler automatically for the types to which they apply. - -*/ +//! Primitive traits representing basic 'kinds' of types +//! +//! Rust types can be classified in various useful ways according to +//! intrinsic properties of the type. These classifications, often called +//! 'kinds', are represented as traits. +//! +//! They cannot be implemented by user code, but are instead implemented +//! by the compiler automatically for the types to which they apply. /// Types able to be transferred across task boundaries. #[lang="send"] diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 6966c96b30ba9..6761b8d55ab42 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -86,17 +86,17 @@ mod macros; pub mod num; -/* The libcore prelude, not as all-encompassing as the libstd prelude */ +// The libcore prelude, not as all-encompassing as the libstd prelude pub mod prelude; -/* Core modules for ownership management */ +// Core modules for ownership management pub mod intrinsics; pub mod mem; pub mod ptr; -/* Core language traits */ +// Core language traits pub mod kinds; pub mod ops; @@ -106,7 +106,7 @@ pub mod clone; pub mod default; pub mod collections; -/* Core types and methods on primitives */ +// Core types and methods on primitives mod unicode; pub mod any; diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index d42c09b8163dd..5d65182300c63 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -8,107 +8,97 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * - * Overloadable operators - * - * Implementing these traits allows you to get an effect similar to - * overloading operators. - * - * The values for the right hand side of an operator are automatically - * borrowed, so `a + b` is sugar for `a.add(&b)`. - * - * All of these traits are imported by the prelude, so they are available in - * every Rust program. - * - * # Example - * - * This example creates a `Point` struct that implements `Add` and `Sub`, and then - * demonstrates adding and subtracting two `Point`s. - * - * ```rust - * #[deriving(Show)] - * struct Point { - * x: int, - * y: int - * } - * - * impl Add for Point { - * fn add(&self, other: &Point) -> Point { - * Point {x: self.x + other.x, y: self.y + other.y} - * } - * } - * - * impl Sub for Point { - * fn sub(&self, other: &Point) -> Point { - * Point {x: self.x - other.x, y: self.y - other.y} - * } - * } - * fn main() { - * println!("{}", Point {x: 1, y: 0} + Point {x: 2, y: 3}); - * println!("{}", Point {x: 1, y: 0} - Point {x: 2, y: 3}); - * } - * ``` - * - * See the documentation for each trait for a minimum implementation that prints - * something to the screen. - * - */ - -/** - * - * The `Drop` trait is used to run some code when a value goes out of scope. This - * is sometimes called a 'destructor'. - * - * # Example - * - * A trivial implementation of `Drop`. The `drop` method is called when `_x` goes - * out of scope, and therefore `main` prints `Dropping!`. - * - * ```rust - * struct HasDrop; - * - * impl Drop for HasDrop { - * fn drop(&mut self) { - * println!("Dropping!"); - * } - * } - * - * fn main() { - * let _x = HasDrop; - * } - * ``` - */ +//! Overloadable operators +//! +//! Implementing these traits allows you to get an effect similar to +//! overloading operators. +//! +//! The values for the right hand side of an operator are automatically +//! borrowed, so `a + b` is sugar for `a.add(&b)`. +//! +//! All of these traits are imported by the prelude, so they are available in +//! every Rust program. +//! +//! # Example +//! +//! This example creates a `Point` struct that implements `Add` and `Sub`, and then +//! demonstrates adding and subtracting two `Point`s. +//! +//! ```rust +//! #[deriving(Show)] +//! struct Point { +//! x: int, +//! y: int +//! } +//! +//! impl Add for Point { +//! fn add(&self, other: &Point) -> Point { +//! Point {x: self.x + other.x, y: self.y + other.y} +//! } +//! } +//! +//! impl Sub for Point { +//! fn sub(&self, other: &Point) -> Point { +//! Point {x: self.x - other.x, y: self.y - other.y} +//! } +//! } +//! fn main() { +//! println!("{}", Point {x: 1, y: 0} + Point {x: 2, y: 3}); +//! println!("{}", Point {x: 1, y: 0} - Point {x: 2, y: 3}); +//! } +//! ``` +//! +//! See the documentation for each trait for a minimum implementation that prints +//! something to the screen. + +/// The `Drop` trait is used to run some code when a value goes out of scope. This +/// is sometimes called a 'destructor'. +/// +/// # Example +/// +/// A trivial implementation of `Drop`. The `drop` method is called when `_x` goes +/// out of scope, and therefore `main` prints `Dropping!`. +/// +/// ```rust +/// struct HasDrop; +/// +/// impl Drop for HasDrop { +/// fn drop(&mut self) { +/// println!("Dropping!"); +/// } +/// } +/// +/// fn main() { +/// let _x = HasDrop; +/// } +/// ``` #[lang="drop"] pub trait Drop { /// The `drop` method, called when the value goes out of scope. fn drop(&mut self); } -/** - * - * The `Add` trait is used to specify the functionality of `+`. - * - * # Example - * - * A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up - * calling `add`, and therefore, `main` prints `Adding!`. - * - * ```rust - * struct Foo; - * - * impl Add for Foo { - * fn add(&self, _rhs: &Foo) -> Foo { - * println!("Adding!"); - * *self - * } - * } - * - * fn main() { - * Foo + Foo; - * } - * ``` - */ +/// The `Add` trait is used to specify the functionality of `+`. +/// +/// # Example +/// +/// A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up +/// calling `add`, and therefore, `main` prints `Adding!`. +/// +/// ```rust +/// struct Foo; +/// +/// impl Add for Foo { +/// fn add(&self, _rhs: &Foo) -> Foo { +/// println!("Adding!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo + Foo; +/// } +/// ``` #[lang="add"] pub trait Add { /// The method for the `+` operator @@ -126,30 +116,27 @@ macro_rules! add_impl( add_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64) -/** - * - * The `Sub` trait is used to specify the functionality of `-`. - * - * # Example - * - * A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up - * calling `sub`, and therefore, `main` prints `Subtracting!`. - * - * ```rust - * struct Foo; - * - * impl Sub for Foo { - * fn sub(&self, _rhs: &Foo) -> Foo { - * println!("Subtracting!"); - * *self - * } - * } - * - * fn main() { - * Foo - Foo; - * } - * ``` - */ +/// The `Sub` trait is used to specify the functionality of `-`. +/// +/// # Example +/// +/// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up +/// calling `sub`, and therefore, `main` prints `Subtracting!`. +/// +/// ```rust +/// struct Foo; +/// +/// impl Sub for Foo { +/// fn sub(&self, _rhs: &Foo) -> Foo { +/// println!("Subtracting!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo - Foo; +/// } +/// ``` #[lang="sub"] pub trait Sub { /// The method for the `-` operator @@ -167,30 +154,27 @@ macro_rules! sub_impl( sub_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64) -/** - * - * The `Mul` trait is used to specify the functionality of `*`. - * - * # Example - * - * A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up - * calling `mul`, and therefore, `main` prints `Multiplying!`. - * - * ```rust - * struct Foo; - * - * impl Mul for Foo { - * fn mul(&self, _rhs: &Foo) -> Foo { - * println!("Multiplying!"); - * *self - * } - * } - * - * fn main() { - * Foo * Foo; - * } - * ``` - */ +/// The `Mul` trait is used to specify the functionality of `*`. +/// +/// # Example +/// +/// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up +/// calling `mul`, and therefore, `main` prints `Multiplying!`. +/// +/// ```rust +/// struct Foo; +/// +/// impl Mul for Foo { +/// fn mul(&self, _rhs: &Foo) -> Foo { +/// println!("Multiplying!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo * Foo; +/// } +/// ``` #[lang="mul"] pub trait Mul { /// The method for the `*` operator @@ -208,30 +192,27 @@ macro_rules! mul_impl( mul_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64) -/** - * - * The `Div` trait is used to specify the functionality of `/`. - * - * # Example - * - * A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up - * calling `div`, and therefore, `main` prints `Dividing!`. - * - * ``` - * struct Foo; - * - * impl Div for Foo { - * fn div(&self, _rhs: &Foo) -> Foo { - * println!("Dividing!"); - * *self - * } - * } - * - * fn main() { - * Foo / Foo; - * } - * ``` - */ +/// The `Div` trait is used to specify the functionality of `/`. +/// +/// # Example +/// +/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up +/// calling `div`, and therefore, `main` prints `Dividing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Div for Foo { +/// fn div(&self, _rhs: &Foo) -> Foo { +/// println!("Dividing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo / Foo; +/// } +/// ``` #[lang="div"] pub trait Div { /// The method for the `/` operator @@ -249,30 +230,27 @@ macro_rules! div_impl( div_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64) -/** - * - * The `Rem` trait is used to specify the functionality of `%`. - * - * # Example - * - * A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up - * calling `rem`, and therefore, `main` prints `Remainder-ing!`. - * - * ``` - * struct Foo; - * - * impl Rem for Foo { - * fn rem(&self, _rhs: &Foo) -> Foo { - * println!("Remainder-ing!"); - * *self - * } - * } - * - * fn main() { - * Foo % Foo; - * } - * ``` - */ +/// The `Rem` trait is used to specify the functionality of `%`. +/// +/// # Example +/// +/// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up +/// calling `rem`, and therefore, `main` prints `Remainder-ing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Rem for Foo { +/// fn rem(&self, _rhs: &Foo) -> Foo { +/// println!("Remainder-ing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo % Foo; +/// } +/// ``` #[lang="rem"] pub trait Rem { /// The method for the `%` operator @@ -304,30 +282,27 @@ rem_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64) rem_float_impl!(f32, fmodf) rem_float_impl!(f64, fmod) -/** - * - * The `Neg` trait is used to specify the functionality of unary `-`. - * - * # Example - * - * A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling - * `neg`, and therefore, `main` prints `Negating!`. - * - * ``` - * struct Foo; - * - * impl Neg for Foo { - * fn neg(&self) -> Foo { - * println!("Negating!"); - * *self - * } - * } - * - * fn main() { - * -Foo; - * } - * ``` - */ +/// The `Neg` trait is used to specify the functionality of unary `-`. +/// +/// # Example +/// +/// A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling +/// `neg`, and therefore, `main` prints `Negating!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Neg for Foo { +/// fn neg(&self) -> Foo { +/// println!("Negating!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// -Foo; +/// } +/// ``` #[lang="neg"] pub trait Neg { /// The method for the unary `-` operator @@ -360,31 +335,27 @@ neg_uint_impl!(u16, i16) neg_uint_impl!(u32, i32) neg_uint_impl!(u64, i64) - -/** - * - * The `Not` trait is used to specify the functionality of unary `!`. - * - * # Example - * - * A trivial implementation of `Not`. When `!Foo` happens, it ends up calling - * `not`, and therefore, `main` prints `Not-ing!`. - * - * ``` - * struct Foo; - * - * impl Not for Foo { - * fn not(&self) -> Foo { - * println!("Not-ing!"); - * *self - * } - * } - * - * fn main() { - * !Foo; - * } - * ``` - */ +/// The `Not` trait is used to specify the functionality of unary `!`. +/// +/// # Example +/// +/// A trivial implementation of `Not`. When `!Foo` happens, it ends up calling +/// `not`, and therefore, `main` prints `Not-ing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Not for Foo { +/// fn not(&self) -> Foo { +/// println!("Not-ing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// !Foo; +/// } +/// ``` #[lang="not"] pub trait Not { /// The method for the unary `!` operator @@ -403,30 +374,27 @@ macro_rules! not_impl( not_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `BitAnd` trait is used to specify the functionality of `&`. - * - * # Example - * - * A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up - * calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`. - * - * ``` - * struct Foo; - * - * impl BitAnd for Foo { - * fn bitand(&self, _rhs: &Foo) -> Foo { - * println!("Bitwise And-ing!"); - * *self - * } - * } - * - * fn main() { - * Foo & Foo; - * } - * ``` - */ +/// The `BitAnd` trait is used to specify the functionality of `&`. +/// +/// # Example +/// +/// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up +/// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl BitAnd for Foo { +/// fn bitand(&self, _rhs: &Foo) -> Foo { +/// println!("Bitwise And-ing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo & Foo; +/// } +/// ``` #[lang="bitand"] pub trait BitAnd { /// The method for the `&` operator @@ -444,30 +412,27 @@ macro_rules! bitand_impl( bitand_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `BitOr` trait is used to specify the functionality of `|`. - * - * # Example - * - * A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up - * calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`. - * - * ``` - * struct Foo; - * - * impl BitOr for Foo { - * fn bitor(&self, _rhs: &Foo) -> Foo { - * println!("Bitwise Or-ing!"); - * *self - * } - * } - * - * fn main() { - * Foo | Foo; - * } - * ``` - */ +/// The `BitOr` trait is used to specify the functionality of `|`. +/// +/// # Example +/// +/// A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up +/// calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl BitOr for Foo { +/// fn bitor(&self, _rhs: &Foo) -> Foo { +/// println!("Bitwise Or-ing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo | Foo; +/// } +/// ``` #[lang="bitor"] pub trait BitOr { /// The method for the `|` operator @@ -485,30 +450,27 @@ macro_rules! bitor_impl( bitor_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `BitXor` trait is used to specify the functionality of `^`. - * - * # Example - * - * A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up - * calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`. - * - * ``` - * struct Foo; - * - * impl BitXor for Foo { - * fn bitxor(&self, _rhs: &Foo) -> Foo { - * println!("Bitwise Xor-ing!"); - * *self - * } - * } - * - * fn main() { - * Foo ^ Foo; - * } - * ``` - */ +/// The `BitXor` trait is used to specify the functionality of `^`. +/// +/// # Example +/// +/// A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up +/// calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl BitXor for Foo { +/// fn bitxor(&self, _rhs: &Foo) -> Foo { +/// println!("Bitwise Xor-ing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo ^ Foo; +/// } +/// ``` #[lang="bitxor"] pub trait BitXor { /// The method for the `^` operator @@ -526,30 +488,27 @@ macro_rules! bitxor_impl( bitxor_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `Shl` trait is used to specify the functionality of `<<`. - * - * # Example - * - * A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up - * calling `shl`, and therefore, `main` prints `Shifting left!`. - * - * ``` - * struct Foo; - * - * impl Shl for Foo { - * fn shl(&self, _rhs: &Foo) -> Foo { - * println!("Shifting left!"); - * *self - * } - * } - * - * fn main() { - * Foo << Foo; - * } - * ``` - */ +/// The `Shl` trait is used to specify the functionality of `<<`. +/// +/// # Example +/// +/// A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up +/// calling `shl`, and therefore, `main` prints `Shifting left!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Shl for Foo { +/// fn shl(&self, _rhs: &Foo) -> Foo { +/// println!("Shifting left!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo << Foo; +/// } +/// ``` #[lang="shl"] pub trait Shl { /// The method for the `<<` operator @@ -569,30 +528,27 @@ macro_rules! shl_impl( shl_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `Shr` trait is used to specify the functionality of `>>`. - * - * # Example - * - * A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up - * calling `shr`, and therefore, `main` prints `Shifting right!`. - * - * ``` - * struct Foo; - * - * impl Shr for Foo { - * fn shr(&self, _rhs: &Foo) -> Foo { - * println!("Shifting right!"); - * *self - * } - * } - * - * fn main() { - * Foo >> Foo; - * } - * ``` - */ +/// The `Shr` trait is used to specify the functionality of `>>`. +/// +/// # Example +/// +/// A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up +/// calling `shr`, and therefore, `main` prints `Shifting right!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Shr for Foo { +/// fn shr(&self, _rhs: &Foo) -> Foo { +/// println!("Shifting right!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo >> Foo; +/// } +/// ``` #[lang="shr"] pub trait Shr { /// The method for the `>>` operator @@ -610,104 +566,95 @@ macro_rules! shr_impl( shr_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64) -/** - * - * The `Index` trait is used to specify the functionality of indexing operations - * like `arr[idx]`. - * - * # Example - * - * A trivial implementation of `Index`. When `Foo[Foo]` happens, it ends up - * calling `index`, and therefore, `main` prints `Indexing!`. - * - * ``` - * struct Foo; - * - * impl Index for Foo { - * fn index(&self, _rhs: &Foo) -> Foo { - * println!("Indexing!"); - * *self - * } - * } - * - * fn main() { - * Foo[Foo]; - * } - * ``` - */ +/// The `Index` trait is used to specify the functionality of indexing operations +/// like `arr[idx]`. +/// +/// # Example +/// +/// A trivial implementation of `Index`. When `Foo[Foo]` happens, it ends up +/// calling `index`, and therefore, `main` prints `Indexing!`. +/// +/// ``` +/// struct Foo; +/// +/// impl Index for Foo { +/// fn index(&self, _rhs: &Foo) -> Foo { +/// println!("Indexing!"); +/// *self +/// } +/// } +/// +/// fn main() { +/// Foo[Foo]; +/// } +/// ``` #[lang="index"] pub trait Index { /// The method for the indexing (`Foo[Bar]`) operation fn index(&self, index: &Index) -> Result; } -/** - * - * The `Deref` trait is used to specify the functionality of dereferencing - * operations like `*v`. - * - * # Example - * - * A struct with a single field which is accessible via dereferencing the - * struct. - * - * ``` - * struct DerefExample { - * value: T - * } - * - * impl Deref for DerefExample { - * fn deref<'a>(&'a self) -> &'a T { - * &self.value - * } - * } - * - * fn main() { - * let x = DerefExample { value: 'a' }; - * assert_eq!('a', *x); - * } - * ``` - */ +/// The `Deref` trait is used to specify the functionality of dereferencing +/// operations like `*v`. +/// +/// # Example +/// +/// A struct with a single field which is accessible via dereferencing the +/// struct. +/// +/// ``` +/// struct DerefExample { +/// value: T +/// } +/// +/// impl Deref for DerefExample { +/// fn deref<'a>(&'a self) -> &'a T { +/// &self.value +/// } +/// } +/// +/// fn main() { +/// let x = DerefExample { value: 'a' }; +/// assert_eq!('a', *x); +/// } +/// ``` #[lang="deref"] pub trait Deref { /// The method called to dereference a value fn deref<'a>(&'a self) -> &'a Result; } -/** - * - * The `DerefMut` trait is used to specify the functionality of dereferencing - * mutably like `*v = 1;` - * - * # Example - * - * A struct with a single field which is modifiable via dereferencing the - * struct. - * - * ``` - * struct DerefMutExample { - * value: T - * } - * - * impl Deref for DerefMutExample { - * fn deref<'a>(&'a self) -> &'a T { - * &self.value - * } - * } - * - * impl DerefMut for DerefMutExample { - * fn deref_mut<'a>(&'a mut self) -> &'a mut T { - * &mut self.value - * } - * } - * - * fn main() { - * let mut x = DerefMutExample { value: 'a' }; - * *x = 'b'; - * assert_eq!('b', *x); - * } - * ``` - */ +/// The `DerefMut` trait is used to specify the functionality of dereferencing +/// mutably like `*v = 1;` +/// +/// # Example +/// +/// A struct with a single field which is modifiable via dereferencing the +/// struct. +/// +/// ``` +/// struct DerefMutExample { +/// value: T +/// } +/// +/// impl Deref for DerefMutExample { +/// fn deref<'a>(&'a self) -> &'a T { +/// &self.value +/// } +/// } +/// +/// impl DerefMut for DerefMutExample { +/// fn deref_mut<'a>(&'a mut self) -> &'a mut T { +/// &mut self.value +/// } +/// } +/// +/// fn main() { +/// let mut x = DerefMutExample { value: 'a' }; +/// *x = 'b'; +/// assert_eq!('b', *x); +/// } +/// ``` #[lang="deref_mut"] pub trait DerefMut: Deref { /// The method called to mutably dereference a value diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index 0178c0318b81c..befbdd0320b26 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -55,26 +55,20 @@ use raw::{Repr, Slice}; /// Extension methods for vectors pub trait ImmutableVector<'a, T> { - /** - * Returns a slice of self spanning the interval [`start`, `end`). - * - * Fails when the slice (or part of it) is outside the bounds of self, - * or when `start` > `end`. - */ + /// Returns a slice of self spanning the interval [`start`, `end`). + /// + /// Fails when the slice (or part of it) is outside the bounds of self, + /// or when `start` > `end`. fn slice(&self, start: uint, end: uint) -> &'a [T]; - /** - * Returns a slice of self from `start` to the end of the vec. - * - * Fails when `start` points outside the bounds of self. - */ + /// Returns a slice of self from `start` to the end of the vec. + /// + /// Fails when `start` points outside the bounds of self. fn slice_from(&self, start: uint) -> &'a [T]; - /** - * Returns a slice of self from the start of the vec to `end`. - * - * Fails when `end` points outside the bounds of self. - */ + /// Returns a slice of self from the start of the vec to `end`. + /// + /// Fails when `end` points outside the bounds of self. fn slice_to(&self, end: uint) -> &'a [T]; /// Returns an iterator over the vector fn iter(self) -> Items<'a, T>; @@ -94,53 +88,47 @@ pub trait ImmutableVector<'a, T> { /// subslices. fn rsplitn(self, n: uint, pred: |&T|: 'a -> bool) -> SplitsN<'a, T>; - /** - * Returns an iterator over all contiguous windows of length - * `size`. The windows overlap. If the vector is shorter than - * `size`, the iterator returns no values. - * - * # Failure - * - * Fails if `size` is 0. - * - * # Example - * - * Print the adjacent pairs of a vector (i.e. `[1,2]`, `[2,3]`, - * `[3,4]`): - * - * ```rust - * let v = &[1i, 2, 3, 4]; - * for win in v.windows(2) { - * println!("{}", win); - * } - * ``` - * - */ + /// Returns an iterator over all contiguous windows of length + /// `size`. The windows overlap. If the vector is shorter than + /// `size`, the iterator returns no values. + /// + /// # Failure + /// + /// Fails if `size` is 0. + /// + /// # Example + /// + /// Print the adjacent pairs of a vector (i.e. `[1,2]`, `[2,3]`, + /// `[3,4]`): + /// + /// ```rust + /// let v = &[1i, 2, 3, 4]; + /// for win in v.windows(2) { + /// println!("{}", win); + /// } + /// ``` fn windows(self, size: uint) -> Windows<'a, T>; - /** - * - * Returns an iterator over `size` elements of the vector at a - * time. The chunks do not overlap. If `size` does not divide the - * length of the vector, then the last chunk will not have length - * `size`. - * - * # Failure - * - * Fails if `size` is 0. - * - * # Example - * - * Print the vector two elements at a time (i.e. `[1,2]`, - * `[3,4]`, `[5]`): - * - * ```rust - * let v = &[1i, 2, 3, 4, 5]; - * for win in v.chunks(2) { - * println!("{}", win); - * } - * ``` - * - */ + /// Returns an iterator over `size` elements of the vector at a + /// time. The chunks do not overlap. If `size` does not divide the + /// length of the vector, then the last chunk will not have length + /// `size`. + /// + /// # Failure + /// + /// Fails if `size` is 0. + /// + /// # Example + /// + /// Print the vector two elements at a time (i.e. `[1,2]`, + /// `[3,4]`, `[5]`): + /// + /// ```rust + /// let v = &[1i, 2, 3, 4, 5]; + /// for win in v.chunks(2) { + /// println!("{}", win); + /// } + /// ``` + /// fn chunks(self, size: uint) -> Chunks<'a, T>; /// Returns the element of a vector at the given index, or `None` if the @@ -163,64 +151,56 @@ pub trait ImmutableVector<'a, T> { /// bounds checking. unsafe fn unsafe_ref(self, index: uint) -> &'a T; - /** - * Returns an unsafe pointer to the vector's buffer - * - * The caller must ensure that the vector outlives the pointer this - * function returns, or else it will end up pointing to garbage. - * - * Modifying the vector may cause its buffer to be reallocated, which - * would also make any pointers to it invalid. - */ + /// Returns an unsafe pointer to the vector's buffer + /// + /// The caller must ensure that the vector outlives the pointer this + /// function returns, or else it will end up pointing to garbage. + /// + /// Modifying the vector may cause its buffer to be reallocated, which + /// would also make any pointers to it invalid. fn as_ptr(&self) -> *const T; - /** - * Binary search a sorted vector with a comparator function. - * - * The comparator function should implement an order consistent - * with the sort order of the underlying vector, returning an - * order code that indicates whether its argument is `Less`, - * `Equal` or `Greater` the desired target. - * - * Returns the index where the comparator returned `Equal`, or `None` if - * not found. - */ + /// Binary search a sorted vector with a comparator function. + /// + /// The comparator function should implement an order consistent + /// with the sort order of the underlying vector, returning an + /// order code that indicates whether its argument is `Less`, + /// `Equal` or `Greater` the desired target. + /// + /// Returns the index where the comparator returned `Equal`, or `None` if + /// not found. fn bsearch(&self, f: |&T| -> Ordering) -> Option; - /** - * Returns an immutable reference to the first element in this slice - * and adjusts the slice in place so that it no longer contains - * that element. O(1). - * - * Equivalent to: - * - * ```ignore - * if self.len() == 0 { return None } - * let head = &self[0]; - * *self = self.slice_from(1); - * Some(head) - * ``` - * - * Returns `None` if vector is empty - */ + /// Returns an immutable reference to the first element in this slice + /// and adjusts the slice in place so that it no longer contains + /// that element. O(1). + /// + /// Equivalent to: + /// + /// ```ignore + /// if self.len() == 0 { return None } + /// let head = &self[0]; + /// *self = self.slice_from(1); + /// Some(head) + /// ``` + /// + /// Returns `None` if vector is empty fn shift_ref(&mut self) -> Option<&'a T>; - /** - * Returns an immutable reference to the last element in this slice - * and adjusts the slice in place so that it no longer contains - * that element. O(1). - * - * Equivalent to: - * - * ```ignore - * if self.len() == 0 { return None; } - * let tail = &self[self.len() - 1]; - * *self = self.slice_to(self.len() - 1); - * Some(tail) - * ``` - * - * Returns `None` if slice is empty. - */ + /// Returns an immutable reference to the last element in this slice + /// and adjusts the slice in place so that it no longer contains + /// that element. O(1). + /// + /// Equivalent to: + /// + /// ```ignore + /// if self.len() == 0 { return None; } + /// let tail = &self[self.len() - 1]; + /// *self = self.slice_to(self.len() - 1); + /// Some(tail) + /// ``` + /// + /// Returns `None` if slice is empty. fn pop_ref(&mut self) -> Option<&'a T>; } @@ -397,18 +377,14 @@ pub trait MutableVector<'a, T> { /// Return a slice that points into another slice. fn mut_slice(self, start: uint, end: uint) -> &'a mut [T]; - /** - * Returns a slice of self from `start` to the end of the vec. - * - * Fails when `start` points outside the bounds of self. - */ + /// Returns a slice of self from `start` to the end of the vec. + /// + /// Fails when `start` points outside the bounds of self. fn mut_slice_from(self, start: uint) -> &'a mut [T]; - /** - * Returns a slice of self from the start of the vec to `end`. - * - * Fails when `end` points outside the bounds of self. - */ + /// Returns a slice of self from the start of the vec to `end`. + /// + /// Fails when `end` points outside the bounds of self. fn mut_slice_to(self, end: uint) -> &'a mut [T]; /// Returns an iterator that allows modifying each value @@ -422,52 +398,46 @@ pub trait MutableVector<'a, T> { /// matched element is not contained in the subslices. fn mut_split(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T>; - /** - * Returns an iterator over `size` elements of the vector at a time. - * The chunks are mutable and do not overlap. If `size` does not divide the - * length of the vector, then the last chunk will not have length - * `size`. - * - * # Failure - * - * Fails if `size` is 0. - */ + /// Returns an iterator over `size` elements of the vector at a time. + /// The chunks are mutable and do not overlap. If `size` does not divide the + /// length of the vector, then the last chunk will not have length + /// `size`. + /// + /// # Failure + /// + /// Fails if `size` is 0. fn mut_chunks(self, chunk_size: uint) -> MutChunks<'a, T>; - /** - * Returns a mutable reference to the first element in this slice - * and adjusts the slice in place so that it no longer contains - * that element. O(1). - * - * Equivalent to: - * - * ```ignore - * if self.len() == 0 { return None; } - * let head = &mut self[0]; - * *self = self.mut_slice_from(1); - * Some(head) - * ``` - * - * Returns `None` if slice is empty - */ + /// Returns a mutable reference to the first element in this slice + /// and adjusts the slice in place so that it no longer contains + /// that element. O(1). + /// + /// Equivalent to: + /// + /// ```ignore + /// if self.len() == 0 { return None; } + /// let head = &mut self[0]; + /// *self = self.mut_slice_from(1); + /// Some(head) + /// ``` + /// + /// Returns `None` if slice is empty fn mut_shift_ref(&mut self) -> Option<&'a mut T>; - /** - * Returns a mutable reference to the last element in this slice - * and adjusts the slice in place so that it no longer contains - * that element. O(1). - * - * Equivalent to: - * - * ```ignore - * if self.len() == 0 { return None; } - * let tail = &mut self[self.len() - 1]; - * *self = self.mut_slice_to(self.len() - 1); - * Some(tail) - * ``` - * - * Returns `None` if slice is empty. - */ + /// Returns a mutable reference to the last element in this slice + /// and adjusts the slice in place so that it no longer contains + /// that element. O(1). + /// + /// Equivalent to: + /// + /// ```ignore + /// if self.len() == 0 { return None; } + /// let tail = &mut self[self.len() - 1]; + /// *self = self.mut_slice_to(self.len() - 1); + /// Some(tail) + /// ``` + /// + /// Returns `None` if slice is empty. fn mut_pop_ref(&mut self) -> Option<&'a mut T>; /// Swaps two elements in a vector. @@ -788,11 +758,9 @@ impl<'a,T:PartialEq> ImmutableEqVector for &'a [T] { /// Extension methods for vectors containing `Ord` elements. pub trait ImmutableOrdVector { - /** - * Binary search a sorted vector for a given element. - * - * Returns the index of the element or None if not found. - */ + /// Binary search a sorted vector for a given element. + /// + /// Returns the index of the element or None if not found. fn bsearch_elem(&self, x: &T) -> Option; } @@ -1293,18 +1261,14 @@ impl<'a, T> DoubleEndedIterator<&'a mut [T]> for MutChunks<'a, T> { // Free functions // -/** - * Converts a pointer to A into a slice of length 1 (without copying). - */ +/// Converts a pointer to A into a slice of length 1 (without copying). pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] { unsafe { transmute(Slice { data: s, len: 1 }) } } -/** - * Converts a pointer to A into a slice of length 1 (without copying). - */ +/// Converts a pointer to A into a slice of length 1 (without copying). pub fn mut_ref_slice<'a, A>(s: &'a mut A) -> &'a mut [A] { unsafe { let ptr: *const A = transmute(s); @@ -1326,10 +1290,8 @@ pub mod raw { use raw::Slice; use option::{None, Option, Some}; - /** - * Form a slice from a pointer and length (as a number of units, - * not bytes). - */ + /// Form a slice from a pointer and length (as a number of units, + /// not bytes). #[inline] pub unsafe fn buf_as_slice(p: *const T, len: uint, f: |v: &[T]| -> U) -> U { @@ -1339,10 +1301,8 @@ pub mod raw { })) } - /** - * Form a slice from a pointer and length (as a number of units, - * not bytes). - */ + /// Form a slice from a pointer and length (as a number of units, + /// not bytes). #[inline] pub unsafe fn mut_buf_as_slice( @@ -1356,12 +1316,10 @@ pub mod raw { })) } - /** - * Returns a pointer to first element in slice and adjusts - * slice so it no longer contains that element. Returns None - * if the slice is empty. O(1). - */ - #[inline] + /// Returns a pointer to first element in slice and adjusts + /// slice so it no longer contains that element. Returns None + /// if the slice is empty. O(1). + #[inline] pub unsafe fn shift_ptr(slice: &mut Slice) -> Option<*const T> { if slice.len == 0 { return None; } let head: *const T = slice.data; @@ -1370,12 +1328,10 @@ pub mod raw { Some(head) } - /** - * Returns a pointer to last element in slice and adjusts - * slice so it no longer contains that element. Returns None - * if the slice is empty. O(1). - */ - #[inline] + /// Returns a pointer to last element in slice and adjusts + /// slice so it no longer contains that element. Returns None + /// if the slice is empty. O(1). + #[inline] pub unsafe fn pop_ptr(slice: &mut Slice) -> Option<*const T> { if slice.len == 0 { return None; } let tail: *const T = slice.data.offset((slice.len - 1) as int); diff --git a/src/libcore/str.rs b/src/libcore/str.rs index 94df7a5a6c2d9..642c59c9bd15f 100644 --- a/src/libcore/str.rs +++ b/src/libcore/str.rs @@ -32,9 +32,7 @@ use slice::ImmutableVector; use slice; use uint; -/* -Section: Creating a string -*/ +// Section: Creating a string /// Converts a vector to a string slice without performing any allocations. /// @@ -93,9 +91,7 @@ impl<'a> CharEq for &'a [char] { } } -/* -Section: Iterators -*/ +// Section: Iterators /// External iterator for a string's characters. /// Use with the `std::iter` module. @@ -577,7 +573,7 @@ impl<'a> Iterator for Utf16CodeUnits<'a> { let mut buf = [0u16, ..2]; self.chars.next().map(|ch| { - let n = ch.encode_utf16(buf /* as mut slice! */); + let n = ch.encode_utf16(buf); // as mut slice! if n == 2 { self.extra = buf[1]; } buf[0] }) @@ -593,9 +589,7 @@ impl<'a> Iterator for Utf16CodeUnits<'a> { } } -/* -Section: Comparing strings -*/ +// Section: Comparing strings // share the implementation of the lang-item vs. non-lang-item // eq_slice. @@ -621,9 +615,7 @@ pub fn eq_slice(a: &str, b: &str) -> bool { eq_slice_(a, b) } -/* -Section: Misc -*/ +// Section: Misc /// Walk through `iter` checking that it's a valid UTF-8 sequence, /// returning `true` in that case, or, if it is invalid, `false` with @@ -958,9 +950,7 @@ pub mod raw { } } -/* -Section: Trait implementations -*/ +// Section: Trait implementations #[allow(missing_doc)] pub mod traits { diff --git a/src/libcoretest/char.rs b/src/libcoretest/char.rs index 852edd90b0f3f..bbd31375dd2cf 100644 --- a/src/libcoretest/char.rs +++ b/src/libcoretest/char.rs @@ -177,7 +177,7 @@ fn test_to_str() { fn test_encode_utf8() { fn check(input: char, expect: &[u8]) { let mut buf = [0u8, ..4]; - let n = input.encode_utf8(buf /* as mut slice! */); + let n = input.encode_utf8(buf); // as mut slice! assert_eq!(buf.slice_to(n), expect); } @@ -191,7 +191,7 @@ fn test_encode_utf8() { fn test_encode_utf16() { fn check(input: char, expect: &[u16]) { let mut buf = [0u16, ..2]; - let n = input.encode_utf16(buf /* as mut slice! */); + let n = input.encode_utf16(buf); // as mut slice! assert_eq!(buf.slice_to(n), expect); } diff --git a/src/libdebug/reflect.rs b/src/libdebug/reflect.rs index 0cbae6ee2d3a3..0f195a59d4040 100644 --- a/src/libdebug/reflect.rs +++ b/src/libdebug/reflect.rs @@ -8,11 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Runtime type reflection - -*/ +//! Runtime type reflection #![allow(missing_doc)] @@ -20,13 +16,11 @@ use std::intrinsics::{Disr, Opaque, TyDesc, TyVisitor}; use std::mem; use std::gc::Gc; -/** - * Trait for visitor that wishes to reflect on data. - * - * To use this, create a struct that encapsulates the set of pointers you wish - * to walk through a data structure, and implement both `MovePtr` for it as well - * as `TyVisitor`; then build a MovePtrAdaptor wrapped around your struct. - */ +/// Trait for visitor that wishes to reflect on data. +/// +/// To use this, create a struct that encapsulates the set of pointers you wish +/// to walk through a data structure, and implement both `MovePtr` for it as well +/// as `TyVisitor`; then build a MovePtrAdaptor wrapped around your struct. pub trait MovePtr { fn move_ptr(&mut self, adjustment: |*const u8| -> *const u8); fn push_ptr(&mut self); diff --git a/src/libdebug/repr.rs b/src/libdebug/repr.rs index 133353ec3d717..087ec1c63a39c 100644 --- a/src/libdebug/repr.rs +++ b/src/libdebug/repr.rs @@ -8,11 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -More runtime type reflection - -*/ +//! More runtime type reflection use std::char; use std::intrinsics::{Disr, Opaque, TyDesc, TyVisitor, get_tydesc, visit_tydesc}; diff --git a/src/libflate/lib.rs b/src/libflate/lib.rs index 923aab5e03245..1df1ff7fe95e6 100644 --- a/src/libflate/lib.rs +++ b/src/libflate/lib.rs @@ -8,15 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Simple [DEFLATE][def]-based compression. This is a wrapper around the -[`miniz`][mz] library, which is a one-file pure-C implementation of zlib. - -[def]: https://en.wikipedia.org/wiki/DEFLATE -[mz]: https://code.google.com/p/miniz/ - -*/ +//! Simple [DEFLATE][def]-based compression. This is a wrapper around the +//! [`miniz`][mz] library, which is a one-file pure-C implementation of zlib. +//! +//! [def]: https://en.wikipedia.org/wiki/DEFLATE +//! [mz]: https://code.google.com/p/miniz/ #![crate_id = "flate#0.11.0"] // NOTE: remove after stage0 #![crate_name = "flate"] diff --git a/src/libfourcc/lib.rs b/src/libfourcc/lib.rs index 55e55ba7e51bc..5a37b6e1190ca 100644 --- a/src/libfourcc/lib.rs +++ b/src/libfourcc/lib.rs @@ -8,36 +8,33 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Syntax extension to generate FourCCs. - -Once loaded, fourcc!() is called with a single 4-character string, -and an optional ident that is either `big`, `little`, or `target`. -The ident represents endianness, and specifies in which direction -the characters should be read. If the ident is omitted, it is assumed -to be `big`, i.e. left-to-right order. It returns a u32. - -# Examples - -To load the extension and use it: - -```rust,ignore -#[phase(plugin)] -extern crate fourcc; - -fn main() { - let val = fourcc!("\xC0\xFF\xEE!"); - assert_eq!(val, 0xC0FFEE21u32); - let little_val = fourcc!("foo ", little); - assert_eq!(little_val, 0x21EEFFC0u32); -} -``` - -# References - -* [Wikipedia: FourCC](http://en.wikipedia.org/wiki/FourCC) - -*/ +//! Syntax extension to generate FourCCs. +//! +//! Once loaded, fourcc!() is called with a single 4-character string, +//! and an optional ident that is either `big`, `little`, or `target`. +//! The ident represents endianness, and specifies in which direction +//! the characters should be read. If the ident is omitted, it is assumed +//! to be `big`, i.e. left-to-right order. It returns a u32. +//! +//! # Examples +//! +//! To load the extension and use it: +//! +//! ```rust,ignore +//! #[phase(plugin)] +//! extern crate fourcc; +//! +//! fn main() { +//! let val = fourcc!("\xC0\xFF\xEE!"); +//! assert_eq!(val, 0xC0FFEE21u32); +//! let little_val = fourcc!("foo ", little); +//! assert_eq!(little_val, 0x21EEFFC0u32); +//! } +//! ``` +//! +//! # References +//! +//! * [Wikipedia: FourCC](http://en.wikipedia.org/wiki/FourCC) #![crate_id = "fourcc#0.11.0"] // NOTE: remove after stage0 #![crate_name = "fourcc"] diff --git a/src/libgetopts/lib.rs b/src/libgetopts/lib.rs index 790df13c1ffbf..9d0ec732ab88f 100644 --- a/src/libgetopts/lib.rs +++ b/src/libgetopts/lib.rs @@ -10,24 +10,26 @@ //! Simple getopt alternative. //! -//! Construct a vector of options, either by using `reqopt`, `optopt`, and `optflag` -//! or by building them from components yourself, and pass them to `getopts`, -//! along with a vector of actual arguments (not including `argv[0]`). You'll -//! either get a failure code back, or a match. You'll have to verify whether -//! the amount of 'free' arguments in the match is what you expect. Use `opt_*` -//! accessors to get argument values out of the matches object. +//! Construct a vector of options, either by using `reqopt`, `optopt`, and +//! `optflag` or by building them from components yourself, and pass them to +//! `getopts`, along with a vector of actual arguments (not including +//! `argv[0]`). You'll either get a failure code back, or a match. You'll +//! have to verify whether the amount of 'free' arguments in the match is +//! what you expect. Use `opt_*` accessors to get argument values out of the +//! matches object. //! -//! Single-character options are expected to appear on the command line with a -//! single preceding dash; multiple-character options are expected to be +//! Single-character options are expected to appear on the command line with +//! a single preceding dash; multiple-character options are expected to be //! proceeded by two dashes. Options that expect an argument accept their //! argument following either a space or an equals sign. Single-character //! options don't require the space. //! //! # Example //! -//! The following example shows simple command line parsing for an application -//! that requires an input file to be specified, accepts an optional output -//! file name following `-o`, and accepts both `-h` and `--help` as optional flags. +//! The following example shows simple command line parsing for an +//! application that requires an input file to be specified, accepts an +//! optional output file name following `-o`, and accepts both `-h` and +//! `--help` as optional flags. //! //! ~~~{.rust} //! extern crate getopts; @@ -573,12 +575,11 @@ pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result { let range = cur.as_slice().char_range_at(j); let opt = Short(range.ch); - /* In a series of potential options (eg. -aheJ), if we - see one which takes an argument, we assume all - subsequent characters make up the argument. This - allows options such as -L/usr/local/lib/foo to be - interpreted correctly - */ + // In a series of potential options (eg. -aheJ), if we + // see one which takes an argument, we assume all + // subsequent characters make up the argument. This + // allows options such as -L/usr/local/lib/foo to be + // interpreted correctly match find_opt(opts.as_slice(), opt.clone()) { Some(id) => last_valid_opt_id = Some(id), diff --git a/src/libglob/lib.rs b/src/libglob/lib.rs index 6aa48dc748ec0..1fe2bb27bb05a 100644 --- a/src/libglob/lib.rs +++ b/src/libglob/lib.rs @@ -8,20 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Support for matching file paths against Unix shell style patterns. - * - * The `glob` and `glob_with` functions, in concert with the `Paths` - * type, allow querying the filesystem for all files that match a particular - * pattern - just like the libc `glob` function (for an example see the `glob` - * documentation). The methods on the `Pattern` type provide functionality - * for checking if individual paths match a particular pattern - in a similar - * manner to the libc `fnmatch` function - * - * For consistency across platforms, and for Windows support, this module - * is implemented entirely in Rust rather than deferring to the libc - * `glob`/`fnmatch` functions. - */ +//! Support for matching file paths against Unix shell style patterns. +//! +//! The `glob` and `glob_with` functions, in concert with the `Paths` +//! type, allow querying the filesystem for all files that match a particular +//! pattern - just like the libc `glob` function (for an example see the `glob` +//! documentation). The methods on the `Pattern` type provide functionality +//! for checking if individual paths match a particular pattern - in a similar +//! manner to the libc `fnmatch` function +//! +//! For consistency across platforms, and for Windows support, this module +//! is implemented entirely in Rust rather than deferring to the libc +//! `glob`/`fnmatch` functions. #![crate_id = "glob#0.11.0"] // NOTE: remove after stage0 #![crate_name = "glob"] @@ -41,10 +39,8 @@ use std::io::fs; use std::path::is_sep; use std::string::String; -/** - * An iterator that yields Paths from the filesystem that match a particular - * pattern - see the `glob` function for more details. - */ +/// An iterator that yields Paths from the filesystem that match a particular +/// pattern - see the `glob` function for more details. pub struct Paths { dir_patterns: Vec, require_dir: bool, @@ -84,17 +80,15 @@ pub fn glob(pattern: &str) -> Paths { glob_with(pattern, MatchOptions::new()) } -/** - * Return an iterator that produces all the Paths that match the given pattern, - * which may be absolute or relative to the current working directory. - * - * This function accepts Unix shell style patterns as described by `Pattern::new(..)`. - * The options given are passed through unchanged to `Pattern::matches_with(..)` with - * the exception that `require_literal_separator` is always set to `true` regardless of the - * value passed to this function. - * - * Paths are yielded in alphabetical order, as absolute paths. - */ +/// Return an iterator that produces all the Paths that match the given pattern, +/// which may be absolute or relative to the current working directory. +/// +/// This function accepts Unix shell style patterns as described by `Pattern::new(..)`. +/// The options given are passed through unchanged to `Pattern::matches_with(..)` with +/// the exception that `require_literal_separator` is always set to `true` regardless of the +/// value passed to this function. +/// +/// Paths are yielded in alphabetical order, as absolute paths. pub fn glob_with(pattern: &str, options: MatchOptions) -> Paths { #[cfg(windows)] fn check_windows_verbatim(p: &Path) -> bool { path::windows::is_verbatim(p) } @@ -194,9 +188,7 @@ fn list_dir_sorted(path: &Path) -> Option> { } } -/** - * A compiled Unix shell style pattern. - */ +/// A compiled Unix shell style pattern. #[deriving(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct Pattern { tokens: Vec, @@ -226,25 +218,23 @@ enum MatchResult { impl Pattern { - /** - * This function compiles Unix shell style patterns: `?` matches any single - * character, `*` matches any (possibly empty) sequence of characters and - * `[...]` matches any character inside the brackets, unless the first - * character is `!` in which case it matches any character except those - * between the `!` and the `]`. Character sequences can also specify ranges - * of characters, as ordered by Unicode, so e.g. `[0-9]` specifies any - * character between 0 and 9 inclusive. - * - * The metacharacters `?`, `*`, `[`, `]` can be matched by using brackets - * (e.g. `[?]`). When a `]` occurs immediately following `[` or `[!` then - * it is interpreted as being part of, rather then ending, the character - * set, so `]` and NOT `]` can be matched by `[]]` and `[!]]` respectively. - * The `-` character can be specified inside a character sequence pattern by - * placing it at the start or the end, e.g. `[abc-]`. - * - * When a `[` does not have a closing `]` before the end of the string then - * the `[` will be treated literally. - */ + /// This function compiles Unix shell style patterns: `?` matches any single + /// character, `*` matches any (possibly empty) sequence of characters and + /// `[...]` matches any character inside the brackets, unless the first + /// character is `!` in which case it matches any character except those + /// between the `!` and the `]`. Character sequences can also specify ranges + /// of characters, as ordered by Unicode, so e.g. `[0-9]` specifies any + /// character between 0 and 9 inclusive. + /// + /// The metacharacters `?`, `*`, `[`, `]` can be matched by using brackets + /// (e.g. `[?]`). When a `]` occurs immediately following `[` or `[!` then + /// it is interpreted as being part of, rather then ending, the character + /// set, so `]` and NOT `]` can be matched by `[]]` and `[!]]` respectively. + /// The `-` character can be specified inside a character sequence pattern by + /// placing it at the start or the end, e.g. `[abc-]`. + /// + /// When a `[` does not have a closing `]` before the end of the string then + /// the `[` will be treated literally. pub fn new(pattern: &str) -> Pattern { let chars = pattern.chars().collect::>(); @@ -304,11 +294,9 @@ impl Pattern { Pattern { tokens: tokens } } - /** - * Escape metacharacters within the given string by surrounding them in - * brackets. The resulting string will, when compiled into a `Pattern`, - * match the input string and nothing else. - */ + /// Escape metacharacters within the given string by surrounding them in + /// brackets. The resulting string will, when compiled into a `Pattern`, + /// match the input string and nothing else. pub fn escape(s: &str) -> String { let mut escaped = String::new(); for c in s.chars() { @@ -327,28 +315,24 @@ impl Pattern { escaped } - /** - * Return if the given `str` matches this `Pattern` using the default - * match options (i.e. `MatchOptions::new()`). - * - * # Example - * - * ```rust - * use glob::Pattern; - * - * assert!(Pattern::new("c?t").matches("cat")); - * assert!(Pattern::new("k[!e]tteh").matches("kitteh")); - * assert!(Pattern::new("d*g").matches("doog")); - * ``` - */ + /// Return if the given `str` matches this `Pattern` using the default + /// match options (i.e. `MatchOptions::new()`). + /// + /// # Example + /// + /// ```rust + /// use glob::Pattern; + /// + /// assert!(Pattern::new("c?t").matches("cat")); + /// assert!(Pattern::new("k[!e]tteh").matches("kitteh")); + /// assert!(Pattern::new("d*g").matches("doog")); + /// ``` pub fn matches(&self, str: &str) -> bool { self.matches_with(str, MatchOptions::new()) } - /** - * Return if the given `Path`, when converted to a `str`, matches this `Pattern` - * using the default match options (i.e. `MatchOptions::new()`). - */ + /// Return if the given `Path`, when converted to a `str`, matches this `Pattern` + /// using the default match options (i.e. `MatchOptions::new()`). pub fn matches_path(&self, path: &Path) -> bool { // FIXME (#9639): This needs to handle non-utf8 paths path.as_str().map_or(false, |s| { @@ -356,17 +340,13 @@ impl Pattern { }) } - /** - * Return if the given `str` matches this `Pattern` using the specified match options. - */ + /// Return if the given `str` matches this `Pattern` using the specified match options. pub fn matches_with(&self, str: &str, options: MatchOptions) -> bool { self.matches_from(None, str, 0, options) == Match } - /** - * Return if the given `Path`, when converted to a `str`, matches this `Pattern` - * using the specified match options. - */ + /// Return if the given `Path`, when converted to a `str`, matches this `Pattern` + /// using the specified match options. pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool { // FIXME (#9639): This needs to handle non-utf8 paths path.as_str().map_or(false, |s| { @@ -592,50 +572,40 @@ fn chars_eq(a: char, b: char, case_sensitive: bool) -> bool { } } -/** - * Configuration options to modify the behaviour of `Pattern::matches_with(..)` - */ +/// Configuration options to modify the behaviour of `Pattern::matches_with(..)` #[deriving(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct MatchOptions { - /** - * Whether or not patterns should be matched in a case-sensitive manner. This - * currently only considers upper/lower case relationships between ASCII characters, - * but in future this might be extended to work with Unicode. - */ + /// Whether or not patterns should be matched in a case-sensitive manner. This + /// currently only considers upper/lower case relationships between ASCII characters, + /// but in future this might be extended to work with Unicode. case_sensitive: bool, - /** - * If this is true then path-component separator characters (e.g. `/` on Posix) - * must be matched by a literal `/`, rather than by `*` or `?` or `[...]` - */ + /// If this is true then path-component separator characters (e.g. `/` on Posix) + /// must be matched by a literal `/`, rather than by `*` or `?` or `[...]` require_literal_separator: bool, - /** - * If this is true then paths that contain components that start with a `.` will - * not match unless the `.` appears literally in the pattern: `*`, `?` or `[...]` - * will not match. This is useful because such files are conventionally considered - * hidden on Unix systems and it might be desirable to skip them when listing files. - */ + /// If this is true then paths that contain components that start with a `.` will + /// not match unless the `.` appears literally in the pattern: `*`, `?` or `[...]` + /// will not match. This is useful because such files are conventionally considered + /// hidden on Unix systems and it might be desirable to skip them when listing files. require_literal_leading_dot: bool } impl MatchOptions { - /** - * Constructs a new `MatchOptions` with default field values. This is used - * when calling functions that do not take an explicit `MatchOptions` parameter. - * - * This function always returns this value: - * - * ```rust,ignore - * MatchOptions { - * case_sensitive: true, - * require_literal_separator: false. - * require_literal_leading_dot: false - * } - * ``` - */ + /// Constructs a new `MatchOptions` with default field values. This is used + /// when calling functions that do not take an explicit `MatchOptions` parameter. + /// + /// This function always returns this value: + /// + /// ```rust,ignore + /// MatchOptions { + /// case_sensitive: true, + /// require_literal_separator: false. + /// require_literal_leading_dot: false + /// } + /// ``` pub fn new() -> MatchOptions { MatchOptions { case_sensitive: true, diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index 52990bae55433..d7ac8f4e66caf 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -8,263 +8,261 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Generate files suitable for use with [Graphviz](http://www.graphviz.org/) - -The `render` function generates output (e.g. an `output.dot` file) for -use with [Graphviz](http://www.graphviz.org/) by walking a labelled -graph. (Graphviz can then automatically lay out the nodes and edges -of the graph, and also optionally render the graph as an image or -other [output formats]( -http://www.graphviz.org/content/output-formats), such as SVG.) - -Rather than impose some particular graph data structure on clients, -this library exposes two traits that clients can implement on their -own structs before handing them over to the rendering function. - -Note: This library does not yet provide access to the full -expressiveness of the [DOT language]( -http://www.graphviz.org/doc/info/lang.html). For example, there are -many [attributes](http://www.graphviz.org/content/attrs) related to -providing layout hints (e.g. left-to-right versus top-down, which -algorithm to use, etc). The current intention of this library is to -emit a human-readable .dot file with very regular structure suitable -for easy post-processing. - -# Examples - -The first example uses a very simple graph representation: a list of -pairs of ints, representing the edges (the node set is implicit). -Each node label is derived directly from the int representing the node, -while the edge labels are all empty strings. - -This example also illustrates how to use `MaybeOwnedVector` to return -an owned vector or a borrowed slice as appropriate: we construct the -node vector from scratch, but borrow the edge list (rather than -constructing a copy of all the edges from scratch). - -The output from this example renders five nodes, with the first four -forming a diamond-shaped acyclic graph and then pointing to the fifth -which is cyclic. - -```rust -use dot = graphviz; -use graphviz::maybe_owned_vec::IntoMaybeOwnedVector; - -type Nd = int; -type Ed = (int,int); -struct Edges(Vec); - -pub fn render_to(output: &mut W) { - let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4))); - dot::render(&edges, output).unwrap() -} - -impl<'a> dot::Labeller<'a, Nd, Ed> for Edges { - fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1") } - - fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { - dot::Id::new(format!("N{}", *n)) - } -} - -impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges { - fn nodes(&self) -> dot::Nodes<'a,Nd> { - // (assumes that |N| \approxeq |E|) - let &Edges(ref v) = self; - let mut nodes = Vec::with_capacity(v.len()); - for &(s,t) in v.iter() { - nodes.push(s); nodes.push(t); - } - nodes.sort(); - nodes.dedup(); - nodes.into_maybe_owned() - } - - fn edges(&'a self) -> dot::Edges<'a,Ed> { - let &Edges(ref edges) = self; - edges.as_slice().into_maybe_owned() - } - - fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s } - - fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t } -} - -# pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } -``` - -```no_run -# pub fn render_to(output: &mut W) { unimplemented!() } -pub fn main() { - use std::io::File; - let mut f = File::create(&Path::new("example1.dot")); - render_to(&mut f) -} -``` - -Output from first example (in `example1.dot`): - -```ignore -digraph example1 { - N0[label="N0"]; - N1[label="N1"]; - N2[label="N2"]; - N3[label="N3"]; - N4[label="N4"]; - N0 -> N1[label=""]; - N0 -> N2[label=""]; - N1 -> N3[label=""]; - N2 -> N3[label=""]; - N3 -> N4[label=""]; - N4 -> N4[label=""]; -} -``` - -The second example illustrates using `node_label` and `edge_label` to -add labels to the nodes and edges in the rendered graph. The graph -here carries both `nodes` (the label text to use for rendering a -particular node), and `edges` (again a list of `(source,target)` -indices). - -This example also illustrates how to use a type (in this case the edge -type) that shares substructure with the graph: the edge type here is a -direct reference to the `(source,target)` pair stored in the graph's -internal vector (rather than passing around a copy of the pair -itself). Note that this implies that `fn edges(&'a self)` must -construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>` -edges stored in `self`. - -Since both the set of nodes and the set of edges are always -constructed from scratch via iterators, we use the `collect()` method -from the `Iterator` trait to collect the nodes and edges into freshly -constructed growable `Vec` values (rather use the `into_maybe_owned` -from the `IntoMaybeOwnedVector` trait as was used in the first example -above). - -The output from this example renders four nodes that make up the -Hasse-diagram for the subsets of the set `{x, y}`. Each edge is -labelled with the ⊆ character (specified using the HTML character -entity `&sube`). - -```rust -use dot = graphviz; -use std::str; - -type Nd = uint; -type Ed<'a> = &'a (uint, uint); -struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } - -pub fn render_to(output: &mut W) { - let nodes = vec!("{x,y}","{x}","{y}","{}"); - let edges = vec!((0,1), (0,2), (1,3), (2,3)); - let graph = Graph { nodes: nodes, edges: edges }; - - dot::render(&graph, output).unwrap() -} - -impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph { - fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2") } - fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { - dot::Id::new(format!("N{}", n)) - } - fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> { - dot::LabelStr(str::Slice(self.nodes.get(*n).as_slice())) - } - fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> { - dot::LabelStr(str::Slice("⊆")) - } -} - -impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph { - fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() } - fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() } - fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s } - fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t } -} - -# pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } -``` - -```no_run -# pub fn render_to(output: &mut W) { unimplemented!() } -pub fn main() { - use std::io::File; - let mut f = File::create(&Path::new("example2.dot")); - render_to(&mut f) -} -``` - -The third example is similar to the second, except now each node and -edge now carries a reference to the string label for each node as well -as that node's index. (This is another illustration of how to share -structure with the graph itself, and why one might want to do so.) - -The output from this example is the same as the second example: the -Hasse-diagram for the subsets of the set `{x, y}`. - -```rust -use dot = graphviz; -use std::str; - -type Nd<'a> = (uint, &'a str); -type Ed<'a> = (Nd<'a>, Nd<'a>); -struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } - -pub fn render_to(output: &mut W) { - let nodes = vec!("{x,y}","{x}","{y}","{}"); - let edges = vec!((0,1), (0,2), (1,3), (2,3)); - let graph = Graph { nodes: nodes, edges: edges }; - - dot::render(&graph, output).unwrap() -} - -impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph { - fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3") } - fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> { - dot::Id::new(format!("N{:u}", n.val0())) - } - fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> { - let &(i, _) = n; - dot::LabelStr(str::Slice(self.nodes.get(i).as_slice())) - } - fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> { - dot::LabelStr(str::Slice("⊆")) - } -} - -impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph { - fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> { - self.nodes.iter().map(|s|s.as_slice()).enumerate().collect() - } - fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { - self.edges.iter() - .map(|&(i,j)|((i, self.nodes.get(i).as_slice()), - (j, self.nodes.get(j).as_slice()))) - .collect() - } - fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s } - fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t } -} - -# pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } -``` - -```no_run -# pub fn render_to(output: &mut W) { unimplemented!() } -pub fn main() { - use std::io::File; - let mut f = File::create(&Path::new("example3.dot")); - render_to(&mut f) -} -``` - -# References - -* [Graphviz](http://www.graphviz.org/) - -* [DOT language](http://www.graphviz.org/doc/info/lang.html) - -*/ +//! Generate files suitable for use with [Graphviz](http://www.graphviz.org/) +//! +//! The `render` function generates output (e.g. an `output.dot` file) for +//! use with [Graphviz](http://www.graphviz.org/) by walking a labelled +//! graph. (Graphviz can then automatically lay out the nodes and edges +//! of the graph, and also optionally render the graph as an image or +//! other [output formats]( +//! http://www.graphviz.org/content/output-formats), such as SVG.) +//! +//! Rather than impose some particular graph data structure on clients, +//! this library exposes two traits that clients can implement on their +//! own structs before handing them over to the rendering function. +//! +//! Note: This library does not yet provide access to the full +//! expressiveness of the [DOT language]( +//! http://www.graphviz.org/doc/info/lang.html). For example, there are +//! many [attributes](http://www.graphviz.org/content/attrs) related to +//! providing layout hints (e.g. left-to-right versus top-down, which +//! algorithm to use, etc). The current intention of this library is to +//! emit a human-readable .dot file with very regular structure suitable +//! for easy post-processing. +//! +//! # Examples +//! +//! The first example uses a very simple graph representation: a list of +//! pairs of ints, representing the edges (the node set is implicit). +//! Each node label is derived directly from the int representing the node, +//! while the edge labels are all empty strings. +//! +//! This example also illustrates how to use `MaybeOwnedVector` to return +//! an owned vector or a borrowed slice as appropriate: we construct the +//! node vector from scratch, but borrow the edge list (rather than +//! constructing a copy of all the edges from scratch). +//! +//! The output from this example renders five nodes, with the first four +//! forming a diamond-shaped acyclic graph and then pointing to the fifth +//! which is cyclic. +//! +//! ```rust +//! use dot = graphviz; +//! use graphviz::maybe_owned_vec::IntoMaybeOwnedVector; +//! +//! type Nd = int; +//! type Ed = (int,int); +//! struct Edges(Vec); +//! +//! pub fn render_to(output: &mut W) { +//! let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4))); +//! dot::render(&edges, output).unwrap() +//! } +//! +//! impl<'a> dot::Labeller<'a, Nd, Ed> for Edges { +//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1") } +//! +//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { +//! dot::Id::new(format!("N{}", *n)) +//! } +//! } +//! +//! impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges { +//! fn nodes(&self) -> dot::Nodes<'a,Nd> { +//! // (assumes that |N| \approxeq |E|) +//! let &Edges(ref v) = self; +//! let mut nodes = Vec::with_capacity(v.len()); +//! for &(s,t) in v.iter() { +//! nodes.push(s); nodes.push(t); +//! } +//! nodes.sort(); +//! nodes.dedup(); +//! nodes.into_maybe_owned() +//! } +//! +//! fn edges(&'a self) -> dot::Edges<'a,Ed> { +//! let &Edges(ref edges) = self; +//! edges.as_slice().into_maybe_owned() +//! } +//! +//! fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s } +//! +//! fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t } +//! } +//! +//! # pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } +//! ``` +//! +//! ```no_run +//! # pub fn render_to(output: &mut W) { unimplemented!() } +//! pub fn main() { +//! use std::io::File; +//! let mut f = File::create(&Path::new("example1.dot")); +//! render_to(&mut f) +//! } +//! ``` +//! +//! Output from first example (in `example1.dot`): +//! +//! ```ignore +//! digraph example1 { +//! N0[label="N0"]; +//! N1[label="N1"]; +//! N2[label="N2"]; +//! N3[label="N3"]; +//! N4[label="N4"]; +//! N0 -> N1[label=""]; +//! N0 -> N2[label=""]; +//! N1 -> N3[label=""]; +//! N2 -> N3[label=""]; +//! N3 -> N4[label=""]; +//! N4 -> N4[label=""]; +//! } +//! ``` +//! +//! The second example illustrates using `node_label` and `edge_label` to +//! add labels to the nodes and edges in the rendered graph. The graph +//! here carries both `nodes` (the label text to use for rendering a +//! particular node), and `edges` (again a list of `(source,target)` +//! indices). +//! +//! This example also illustrates how to use a type (in this case the edge +//! type) that shares substructure with the graph: the edge type here is a +//! direct reference to the `(source,target)` pair stored in the graph's +//! internal vector (rather than passing around a copy of the pair +//! itself). Note that this implies that `fn edges(&'a self)` must +//! construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>` +//! edges stored in `self`. +//! +//! Since both the set of nodes and the set of edges are always +//! constructed from scratch via iterators, we use the `collect()` method +//! from the `Iterator` trait to collect the nodes and edges into freshly +//! constructed growable `Vec` values (rather use the `into_maybe_owned` +//! from the `IntoMaybeOwnedVector` trait as was used in the first example +//! above). +//! +//! The output from this example renders four nodes that make up the +//! Hasse-diagram for the subsets of the set `{x, y}`. Each edge is +//! labelled with the ⊆ character (specified using the HTML character +//! entity `&sube`). +//! +//! ```rust +//! use dot = graphviz; +//! use std::str; +//! +//! type Nd = uint; +//! type Ed<'a> = &'a (uint, uint); +//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } +//! +//! pub fn render_to(output: &mut W) { +//! let nodes = vec!("{x,y}","{x}","{y}","{}"); +//! let edges = vec!((0,1), (0,2), (1,3), (2,3)); +//! let graph = Graph { nodes: nodes, edges: edges }; +//! +//! dot::render(&graph, output).unwrap() +//! } +//! +//! impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph { +//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2") } +//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { +//! dot::Id::new(format!("N{}", n)) +//! } +//! fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> { +//! dot::LabelStr(str::Slice(self.nodes.get(*n).as_slice())) +//! } +//! fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> { +//! dot::LabelStr(str::Slice("⊆")) +//! } +//! } +//! +//! impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph { +//! fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() } +//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() } +//! fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s } +//! fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t } +//! } +//! +//! # pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } +//! ``` +//! +//! ```no_run +//! # pub fn render_to(output: &mut W) { unimplemented!() } +//! pub fn main() { +//! use std::io::File; +//! let mut f = File::create(&Path::new("example2.dot")); +//! render_to(&mut f) +//! } +//! ``` +//! +//! The third example is similar to the second, except now each node and +//! edge now carries a reference to the string label for each node as well +//! as that node's index. (This is another illustration of how to share +//! structure with the graph itself, and why one might want to do so.) +//! +//! The output from this example is the same as the second example: the +//! Hasse-diagram for the subsets of the set `{x, y}`. +//! +//! ```rust +//! use dot = graphviz; +//! use std::str; +//! +//! type Nd<'a> = (uint, &'a str); +//! type Ed<'a> = (Nd<'a>, Nd<'a>); +//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } +//! +//! pub fn render_to(output: &mut W) { +//! let nodes = vec!("{x,y}","{x}","{y}","{}"); +//! let edges = vec!((0,1), (0,2), (1,3), (2,3)); +//! let graph = Graph { nodes: nodes, edges: edges }; +//! +//! dot::render(&graph, output).unwrap() +//! } +//! +//! impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph { +//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3") } +//! fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> { +//! dot::Id::new(format!("N{:u}", n.val0())) +//! } +//! fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> { +//! let &(i, _) = n; +//! dot::LabelStr(str::Slice(self.nodes.get(i).as_slice())) +//! } +//! fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> { +//! dot::LabelStr(str::Slice("⊆")) +//! } +//! } +//! +//! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph { +//! fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> { +//! self.nodes.iter().map(|s|s.as_slice()).enumerate().collect() +//! } +//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { +//! self.edges.iter() +//! .map(|&(i,j)|((i, self.nodes.get(i).as_slice()), +//! (j, self.nodes.get(j).as_slice()))) +//! .collect() +//! } +//! fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s } +//! fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t } +//! } +//! +//! # pub fn main() { use std::io::MemWriter; render_to(&mut MemWriter::new()) } +//! ``` +//! +//! ```no_run +//! # pub fn render_to(output: &mut W) { unimplemented!() } +//! pub fn main() { +//! use std::io::File; +//! let mut f = File::create(&Path::new("example3.dot")); +//! render_to(&mut f) +//! } +//! ``` +//! +//! # References +//! +//! * [Graphviz](http://www.graphviz.org/) +//! +//! * [DOT language](http://www.graphviz.org/doc/info/lang.html) #![crate_id = "graphviz#0.11.0"] // NOTE: remove after stage0 #![crate_name = "graphviz"] diff --git a/src/libgreen/context.rs b/src/libgreen/context.rs index 8c60f3d9fe1dc..3ab74c34591b2 100644 --- a/src/libgreen/context.rs +++ b/src/libgreen/context.rs @@ -78,12 +78,11 @@ impl Context { } } - /* Switch contexts - - Suspend the current execution context and resume another by - saving the registers values of the executing thread to a Context - then loading the registers from a previously saved Context. - */ + // Switch contexts + // + // Suspend the current execution context and resume another by + // saving the registers values of the executing thread to a Context + // then loading the registers from a previously saved Context. pub fn swap(out_context: &mut Context, in_context: &Context) { rtdebug!("swapping contexts"); let out_regs: &mut Registers = match out_context { diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 357644aed03f9..02a712d1b03d6 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -185,7 +185,7 @@ //! //! // Pin a task to the spawned scheduler //! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() { -//! /* ... */ +//! // ... //! }); //! //! // Handles keep schedulers alive, so be sure to drop all handles before diff --git a/src/libhexfloat/lib.rs b/src/libhexfloat/lib.rs index 17c71c6365e1b..dd671ab03541b 100644 --- a/src/libhexfloat/lib.rs +++ b/src/libhexfloat/lib.rs @@ -8,33 +8,30 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Syntax extension to create floating point literals from hexadecimal strings - -Once loaded, hexfloat!() is called with a string containing the hexadecimal -floating-point literal, and an optional type (f32 or f64). -If the type is omitted, the literal is treated the same as a normal unsuffixed -literal. - -# Examples - -To load the extension and use it: - -```rust,ignore -#[phase(plugin)] -extern crate hexfloat; - -fn main() { - let val = hexfloat!("0x1.ffffb4", f32); -} -``` - -# References - -* [ExploringBinary: hexadecimal floating point constants] - (http://www.exploringbinary.com/hexadecimal-floating-point-constants/) - -*/ +//! Syntax extension to create floating point literals from hexadecimal strings +//! +//! Once loaded, hexfloat!() is called with a string containing the hexadecimal +//! floating-point literal, and an optional type (f32 or f64). +//! If the type is omitted, the literal is treated the same as a normal unsuffixed +//! literal. +//! +//! # Examples +//! +//! To load the extension and use it: +//! +//! ```rust,ignore +//! #[phase(plugin)] +//! extern crate hexfloat; +//! +//! fn main() { +//! let val = hexfloat!("0x1.ffffb4", f32); +//! } +//! ``` +//! +//! # References +//! +//! * [ExploringBinary: hexadecimal floating point constants] +//! (http://www.exploringbinary.com/hexadecimal-floating-point-constants/) #![crate_id = "hexfloat#0.11.0"] // NOTE: remove after stage0 #![crate_name = "hexfloat"] diff --git a/src/liblibc/lib.rs b/src/liblibc/lib.rs index e0dd57f6ae92f..b5f19e85d6efc 100644 --- a/src/liblibc/lib.rs +++ b/src/liblibc/lib.rs @@ -8,6 +8,58 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +//! Bindings for the C standard library and other platform libraries +//! +//! **NOTE:** These are *architecture and libc* specific. On Linux, these +//! bindings are only correct for glibc. +//! +//! This module contains bindings to the C standard library, organized into +//! modules by their defining standard. Additionally, it contains some assorted +//! platform-specific definitions. For convenience, most functions and types +//! are reexported, so `use libc::*` will import the available C bindings as +//! appropriate for the target platform. The exact set of functions available +//! are platform specific. +//! +//! *Note:* Because these definitions are platform-specific, some may not appear +//! in the generated documentation. +//! +//! We consider the following specs reasonably normative with respect to +//! interoperating with the C standard library (libc/msvcrt): +//! +//! * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995. +//! * ISO 9899:1999 ('C99' or 'C9x'). +//! * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1'). +//! * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3'). +//! * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4'). +//! +//! Note that any reference to the 1996 revision of POSIX, or any revs between +//! 1990 (when '88 was approved at ISO) and 2001 (when the next actual +//! revision-revision happened), are merely additions of other chapters (1b and +//! 1c) outside the core interfaces. +//! +//! Despite having several names each, these are *reasonably* coherent +//! point-in-time, list-of-definition sorts of specs. You can get each under a +//! variety of names but will wind up with the same definition in each case. +//! +//! See standards(7) in linux-manpages for more details. +//! +//! Our interface to these libraries is complicated by the non-universality of +//! conformance to any of them. About the only thing universally supported is +//! the first (C95), beyond that definitions quickly become absent on various +//! platforms. +//! +//! We therefore wind up dividing our module-space up (mostly for the sake of +//! sanity while editing, filling-in-details and eliminating duplication) into +//! definitions common-to-all (held in modules named c95, c99, posix88, posix01 +//! and posix08) and definitions that appear only on *some* platforms (named +//! 'extra'). This would be things like significant OSX foundation kit, or win32 +//! library kernel32.dll, or various fancy glibc, linux or BSD extensions. +//! +//! In addition to the per-platform 'extra' modules, we define a module of +//! 'common BSD' libc routines that never quite made it into POSIX but show up +//! in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final +//! one from Berkeley after the lawsuits died down and the CSRG dissolved. + #![feature(globs)] #![crate_id = "libc#0.11.0"] // NOTE: remove after a stage0 snap #![crate_name = "libc"] @@ -21,60 +73,6 @@ html_playground_url = "http://play.rust-lang.org/")] #![allow(unused_attribute)] // NOTE: remove after stage0 -/*! -* Bindings for the C standard library and other platform libraries -* -* **NOTE:** These are *architecture and libc* specific. On Linux, these -* bindings are only correct for glibc. -* -* This module contains bindings to the C standard library, organized into -* modules by their defining standard. Additionally, it contains some assorted -* platform-specific definitions. For convenience, most functions and types -* are reexported, so `use libc::*` will import the available C bindings as -* appropriate for the target platform. The exact set of functions available -* are platform specific. -* -* *Note:* Because these definitions are platform-specific, some may not appear -* in the generated documentation. -* -* We consider the following specs reasonably normative with respect to -* interoperating with the C standard library (libc/msvcrt): -* -* * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995. -* * ISO 9899:1999 ('C99' or 'C9x'). -* * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1'). -* * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3'). -* * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4'). -* -* Note that any reference to the 1996 revision of POSIX, or any revs between -* 1990 (when '88 was approved at ISO) and 2001 (when the next actual -* revision-revision happened), are merely additions of other chapters (1b and -* 1c) outside the core interfaces. -* -* Despite having several names each, these are *reasonably* coherent -* point-in-time, list-of-definition sorts of specs. You can get each under a -* variety of names but will wind up with the same definition in each case. -* -* See standards(7) in linux-manpages for more details. -* -* Our interface to these libraries is complicated by the non-universality of -* conformance to any of them. About the only thing universally supported is -* the first (C95), beyond that definitions quickly become absent on various -* platforms. -* -* We therefore wind up dividing our module-space up (mostly for the sake of -* sanity while editing, filling-in-details and eliminating duplication) into -* definitions common-to-all (held in modules named c95, c99, posix88, posix01 -* and posix08) and definitions that appear only on *some* platforms (named -* 'extra'). This would be things like significant OSX foundation kit, or win32 -* library kernel32.dll, or various fancy glibc, linux or BSD extensions. -* -* In addition to the per-platform 'extra' modules, we define a module of -* 'common BSD' libc routines that never quite made it into POSIX but show up -* in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final -* one from Berkeley after the lawsuits died down and the CSRG dissolved. -*/ - #![allow(non_camel_case_types)] #![allow(non_snake_case_functions)] #![allow(non_uppercase_statics)] @@ -315,20 +313,19 @@ pub mod types { // Standard types that are opaque or common, so are not per-target. pub mod common { pub mod c95 { - /** - Type used to construct void pointers for use with C. - - This type is only useful as a pointer target. Do not use it as a - return type for FFI functions which have the `void` return type in - C. Use the unit type `()` or omit the return type instead. - - For LLVM to recognize the void pointer type and by extension - functions like malloc(), we need to have it represented as i8* in - LLVM bitcode. The enum used here ensures this and prevents misuse - of the "raw" type by only having private variants.. We need two - variants, because the compiler complains about the repr attribute - otherwise. - */ + /// Type used to construct void pointers for use with C. + /// + /// This type is only useful as a pointer target. Do not use it + /// as a return type for FFI functions which have the `void` + /// return type in C. Use the unit type `()` or omit the return + /// type instead. + /// + /// For LLVM to recognize the void pointer type and by extension + /// functions like malloc(), we need to have it represented + /// as i8* in LLVM bitcode. The enum used here ensures this and + /// prevents misuse of the "raw" type by only having private + /// variants.. We need two variants, because the compiler + /// complains about the repr attribute otherwise. #[repr(u8)] pub enum c_void { __variant1, diff --git a/src/liblog/lib.rs b/src/liblog/lib.rs index 33d1cc87b7312..df761919b5290 100644 --- a/src/liblog/lib.rs +++ b/src/liblog/lib.rs @@ -8,102 +8,101 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Utilities for program-wide and customizable logging - -## Example - -``` -#![feature(phase)] -#[phase(plugin, link)] extern crate log; - -fn main() { - debug!("this is a debug {}", "message"); - error!("this is printed by default"); - - if log_enabled!(log::INFO) { - let x = 3i * 4i; // expensive computation - info!("the answer was: {}", x); - } -} -``` - -## Logging Macros - -There are five macros that the logging subsystem uses: - -* `log!(level, ...)` - the generic logging macro, takes a level as a u32 and any - related `format!` arguments -* `debug!(...)` - a macro hard-wired to the log level of `DEBUG` -* `info!(...)` - a macro hard-wired to the log level of `INFO` -* `warn!(...)` - a macro hard-wired to the log level of `WARN` -* `error!(...)` - a macro hard-wired to the log level of `ERROR` - -All of these macros use the same style of syntax as the `format!` syntax -extension. Details about the syntax can be found in the documentation of -`std::fmt` along with the Rust tutorial/manual. - -If you want to check at runtime if a given logging level is enabled (e.g. if the -information you would want to log is expensive to produce), you can use the -following macro: - -* `log_enabled!(level)` - returns true if logging of the given level is enabled - -## Enabling logging - -Log levels are controlled on a per-module basis, and by default all logging is -disabled except for `error!` (a log level of 1). Logging is controlled via the -`RUST_LOG` environment variable. The value of this environment variable is a -comma-separated list of logging directives. A logging directive is of the form: - -```text -path::to::module=log_level -``` - -The path to the module is rooted in the name of the crate it was compiled for, -so if your program is contained in a file `hello.rs`, for example, to turn on -logging for this file you would use a value of `RUST_LOG=hello`. -Furthermore, this path is a prefix-search, so all modules nested in the -specified module will also have logging enabled. - -The actual `log_level` is optional to specify. If omitted, all logging will be -enabled. If specified, the it must be either a numeric in the range of 1-255, or -it must be one of the strings `debug`, `error`, `info`, or `warn`. If a numeric -is specified, then all logging less than or equal to that numeral is enabled. -For example, if logging level 3 is active, error, warn, and info logs will be -printed, but debug will be omitted. - -As the log level for a module is optional, the module to enable logging for is -also optional. If only a `log_level` is provided, then the global log level for -all modules is set to this value. - -Some examples of valid values of `RUST_LOG` are: - -```text -hello // turns on all logging for the 'hello' module -info // turns on all info logging -hello=debug // turns on debug logging for 'hello' -hello=3 // turns on info logging for 'hello' -hello,std::option // turns on hello, and std's option logging -error,hello=warn // turn on global error logging and also warn for hello -``` - -## Performance and Side Effects - -Each of these macros will expand to code similar to: - -```rust,ignore -if log_level <= my_module_log_level() { - ::log::log(log_level, format!(...)); -} -``` - -What this means is that each of these macros are very cheap at runtime if -they're turned off (just a load and an integer comparison). This also means that -if logging is disabled, none of the components of the log will be executed. - -*/ +//! Utilities for program-wide and customizable logging +//! +//! ## Example +//! +//! ``` +//! #![feature(phase)] +//! #[phase(plugin, link)] extern crate log; +//! +//! fn main() { +//! debug!("this is a debug {}", "message"); +//! error!("this is printed by default"); +//! +//! if log_enabled!(log::INFO) { +//! let x = 3i * 4i; // expensive computation +//! info!("the answer was: {}", x); +//! } +//! } +//! ``` +//! +//! ## Logging Macros +//! +//! There are five macros that the logging subsystem uses: +//! +//! * `log!(level, ...)` - the generic logging macro, takes a level as a u32 +//! and any related `format!` arguments +//! * `debug!(...)` - a macro hard-wired to the log level of `DEBUG` +//! * `info!(...)` - a macro hard-wired to the log level of `INFO` +//! * `warn!(...)` - a macro hard-wired to the log level of `WARN` +//! * `error!(...)` - a macro hard-wired to the log level of `ERROR` +//! +//! All of these macros use the same style of syntax as the `format!` syntax +//! extension. Details about the syntax can be found in the documentation of +//! `std::fmt` along with the Rust tutorial/manual. +//! +//! If you want to check at runtime if a given logging level is enabled (e.g. +//! if the information you would want to log is expensive to produce), you +//! can use the following macro: +//! +//! * `log_enabled!(level)` - returns true if logging of the given level +//! is enabled +//! +//! ## Enabling logging +//! +//! Log levels are controlled on a per-module basis, and by default all +//! logging is disabled except for `error!` (a log level of 1). Logging is +//! controlled via the `RUST_LOG` environment variable. The value of this +//! environment variable is a comma-separated list of logging directives. +//! A logging directive is of the form: +//! +//! ```text +//! path::to::module=log_level +//! ``` +//! +//! The path to the module is rooted in the name of the crate it was compiled +//! for, so if your program is contained in a file `hello.rs`, for example, to +//! turn on logging for this file you would use a value of `RUST_LOG=hello`. +//! Furthermore, this path is a prefix-search, so all modules nested in the +//! specified module will also have logging enabled. +//! +//! The actual `log_level` is optional to specify. If omitted, all logging +//! will be enabled. If specified, the it must be either a numeric in the range +//! of 1-255, or it must be one of the strings `debug`, `error`, `info`, or +//! `warn`. If a numeric is specified, then all logging less than or equal to +//! that numeral is enabled. For example, if logging level 3 is active, error, +//! warn, and info logs will be printed, but debug will be omitted. +//! +//! As the log level for a module is optional, the module to enable logging for +//! is also optional. If only a `log_level` is provided, then the global log +//! level for all modules is set to this value. +//! +//! Some examples of valid values of `RUST_LOG` are: +//! +//! ```text +//! hello // turns on all logging for the 'hello' module +//! info // turns on all info logging +//! hello=debug // turns on debug logging for 'hello' +//! hello=3 // turns on info logging for 'hello' +//! hello,std::option // turns on hello, and std's option logging +//! error,hello=warn // turn on global error logging and also warn for hello +//! ``` +//! +//! ## Performance and Side Effects +//! +//! Each of these macros will expand to code similar to: +//! +//! ```rust,ignore +//! if log_level <= my_module_log_level() { +//! ::log::log(log_level, format!(...)); +//! } +//! ``` +//! +//! What this means is that each of these macros are very cheap at runtime if +//! they're turned off (just a load and an integer comparison). This also +//! means that if logging is disabled, none of the components of the log will +//! be executed. #![crate_id = "log#0.11.0"] // NOTE: Remove after stage0 #![crate_name = "log"] diff --git a/src/libnative/io/process.rs b/src/libnative/io/process.rs index 6fab73115cf9a..7f46d2b672fb4 100644 --- a/src/libnative/io/process.rs +++ b/src/libnative/io/process.rs @@ -29,13 +29,11 @@ use super::util; #[cfg(unix)] helper_init!(static mut HELPER: Helper) -/** - * A value representing a child process. - * - * The lifetime of this value is linked to the lifetime of the actual - * process - the Process destructor calls self.finish() which waits - * for the process to terminate. - */ +/// A value representing a child process. +/// +/// The lifetime of this value is linked to the lifetime of the actual +/// process - the Process destructor calls self.finish() which waits +/// for the process to terminate. pub struct Process { /// The unique id of the process (this should never be negative). pid: pid_t, @@ -840,16 +838,14 @@ fn translate_status(status: c_int) -> rtio::ProcessExit { } } -/** - * Waits for a process to exit and returns the exit code, failing - * if there is no process with the specified id. - * - * Note that this is private to avoid race conditions on unix where if - * a user calls waitpid(some_process.get_id()) then some_process.finish() - * and some_process.destroy() and some_process.finalize() will then either - * operate on a none-existent process or, even worse, on a newer process - * with the same id. - */ +/// Waits for a process to exit and returns the exit code, failing +/// if there is no process with the specified id. +/// +/// Note that this is private to avoid race conditions on unix where if +/// a user calls waitpid(some_process.get_id()) then some_process.finish() +/// and some_process.destroy() and some_process.finalize() will then either +/// operate on a none-existent process or, even worse, on a newer process +/// with the same id. #[cfg(windows)] fn waitpid(pid: pid_t, deadline: u64) -> IoResult { use libc::types::os::arch::extra::DWORD; diff --git a/src/libnum/bigint.rs b/src/libnum/bigint.rs index cc3753def59f4..a9eec941dc5b9 100644 --- a/src/libnum/bigint.rs +++ b/src/libnum/bigint.rs @@ -8,13 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -A Big integer (signed version: `BigInt`, unsigned version: `BigUint`). - -A `BigUint` is represented as an array of `BigDigit`s. -A `BigInt` is a combination of `BigUint` and `Sign`. -*/ +//! A Big integer (signed version: `BigInt`, unsigned version: `BigUint`). +//! +//! A `BigUint` is represented as an array of `BigDigit`s. +//! A `BigInt` is a combination of `BigUint` and `Sign`. use Integer; use rand::Rng; @@ -28,15 +25,11 @@ use std::num::{Zero, One, ToStrRadix, FromStrRadix}; use std::string::String; use std::{uint, i64, u64}; -/** -A `BigDigit` is a `BigUint`'s composing element. -*/ +/// A `BigDigit` is a `BigUint`'s composing element. pub type BigDigit = u32; -/** -A `DoubleBigDigit` is the internal type used to do the computations. Its -size is the double of the size of `BigDigit`. -*/ +/// A `DoubleBigDigit` is the internal type used to do the computations. +/// Its size is the double of the size of `BigDigit`. pub type DoubleBigDigit = u64; pub static ZERO_BIG_DIGIT: BigDigit = 0; @@ -70,12 +63,10 @@ pub mod BigDigit { } } -/** -A big unsigned integer type. - -A `BigUint`-typed value `BigUint { data: vec!(a, b, c) }` represents a number -`(a + b * BigDigit::base + c * BigDigit::base^2)`. -*/ +/// A big unsigned integer type. +/// +/// A `BigUint`-typed value `BigUint { data: vec!(a, b, c) }` represents +/// a number/ `(a + b * BigDigit::base + c * BigDigit::base^2)`. #[deriving(Clone)] pub struct BigUint { data: Vec @@ -223,10 +214,8 @@ impl Sub for BigUint { - (*bi as DoubleBigDigit) - (borrow as DoubleBigDigit) ); - /* - hi * (base) + lo == 1*(base) + ai - bi - borrow - => ai - bi - borrow < 0 <=> hi == 0 - */ + // hi * (base) + lo == 1*(base) + ai - bi - borrow + // => ai - bi - borrow < 0 <=> hi == 0 borrow = if hi == 0 { 1 } else { 0 }; lo }).collect(); @@ -460,11 +449,9 @@ impl Integer for BigUint { } } - /** - * Calculates the Greatest Common Divisor (GCD) of the number and `other` - * - * The result is always positive - */ + /// Calculates the Greatest Common Divisor (GCD) of the number and `other` + /// + /// The result is always positive #[inline] fn gcd(&self, other: &BigUint) -> BigUint { // Use Euclid's algorithm @@ -478,9 +465,7 @@ impl Integer for BigUint { return n; } - /** - * Calculates the Lowest Common Multiple (LCM) of the number and `other` - */ + /// Calculates the Lowest Common Multiple (LCM) of the number and `other` #[inline] fn lcm(&self, other: &BigUint) -> BigUint { ((*self * *other) / self.gcd(other)) } @@ -1068,19 +1053,15 @@ impl Integer for BigInt { } } - /** - * Calculates the Greatest Common Divisor (GCD) of the number and `other` - * - * The result is always positive - */ + /// Calculates the Greatest Common Divisor (GCD) of the number and `other` + /// + /// The result is always positive #[inline] fn gcd(&self, other: &BigInt) -> BigInt { BigInt::from_biguint(Plus, self.data.gcd(&other.data)) } - /** - * Calculates the Lowest Common Multiple (LCM) of the number and `other` - */ + /// Calculates the Lowest Common Multiple (LCM) of the number and `other` #[inline] fn lcm(&self, other: &BigInt) -> BigInt { BigInt::from_biguint(Plus, self.data.lcm(&other.data)) diff --git a/src/libnum/complex.rs b/src/libnum/complex.rs index 9ee80d283cf92..e4b6a2ad7ce22 100644 --- a/src/libnum/complex.rs +++ b/src/libnum/complex.rs @@ -36,10 +36,8 @@ impl Complex { Complex { re: re, im: im } } - /** - Returns the square of the norm (since `T` doesn't necessarily - have a sqrt function), i.e. `re^2 + im^2`. - */ + /// Returns the square of the norm (since `T` doesn't necessarily + /// have a sqrt function), i.e. `re^2 + im^2`. #[inline] pub fn norm_sqr(&self) -> T { self.re * self.re + self.im * self.im @@ -101,7 +99,7 @@ impl Complex { } } -/* arithmetic */ +// arithmetic // (a + i b) + (c + i d) == (a + c) + i (b + d) impl Add, Complex> for Complex { #[inline] @@ -143,7 +141,7 @@ impl Neg> for Complex { } } -/* constants */ +// constants impl Zero for Complex { #[inline] fn zero() -> Complex { @@ -163,7 +161,7 @@ impl One for Complex { } } -/* string conversions */ +// string conversions impl fmt::Show for Complex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { diff --git a/src/libnum/rational.rs b/src/libnum/rational.rs index 1792f282eca70..a0dfc3eb7965e 100644 --- a/src/libnum/rational.rs +++ b/src/libnum/rational.rs @@ -171,7 +171,7 @@ impl Ratio { } } -/* Comparisons */ +// Comparisons // comparing a/b and c/d is the same as comparing a*d and b*c, so we // abstract that pattern. The following macro takes a trait and either @@ -199,7 +199,7 @@ cmp_impl!(impl PartialOrd, lt -> bool, gt -> bool, le -> bool, ge -> bool, cmp_impl!(impl Eq, ) cmp_impl!(impl Ord, cmp -> cmp::Ordering) -/* Arithmetic */ +// Arithmetic // a/b * c/d = (a*c)/(b*d) impl Mul,Ratio> for Ratio { @@ -249,7 +249,7 @@ impl } } -/* Constants */ +// Constants impl Zero for Ratio { #[inline] @@ -304,7 +304,7 @@ impl fn is_negative(&self) -> bool { *self < Zero::zero() } } -/* String conversions */ +// String conversions impl fmt::Show for Ratio { /// Renders as `numer/denom`. If denom=1, renders as numer. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/librand/distributions/mod.rs b/src/librand/distributions/mod.rs index faafbc4421e24..58e47a7c4d04c 100644 --- a/src/librand/distributions/mod.rs +++ b/src/librand/distributions/mod.rs @@ -8,17 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Sampling from random distributions. - -This is a generalization of `Rand` to allow parameters to control the -exact properties of the generated values, e.g. the mean and standard -deviation of a normal distribution. The `Sample` trait is the most -general, and allows for generating values that change some state -internally. The `IndependentSample` trait is for generating values -that do not need to record state. - -*/ +//! Sampling from random distributions. +//! +//! This is a generalization of `Rand` to allow parameters to control the +//! exact properties of the generated values, e.g. the mean and standard +//! deviation of a normal distribution. The `Sample` trait is the most +//! general, and allows for generating values that change some state +//! internally. The `IndependentSample` trait is for generating values +//! that do not need to record state. #![experimental] diff --git a/src/librustc/back/link.rs b/src/librustc/back/link.rs index f1856850701b2..2c7bda5f2ee75 100644 --- a/src/librustc/back/link.rs +++ b/src/librustc/back/link.rs @@ -198,7 +198,7 @@ pub mod write { lib::llvm::CodeModelDefault, reloc_model, opt_level, - true /* EnableSegstk */, + true, // EnableSegstk use_softfp, no_fp_elim, ffunction_sections, @@ -493,57 +493,55 @@ pub mod write { } -/* - * Name mangling and its relationship to metadata. This is complex. Read - * carefully. - * - * The semantic model of Rust linkage is, broadly, that "there's no global - * namespace" between crates. Our aim is to preserve the illusion of this - * model despite the fact that it's not *quite* possible to implement on - * modern linkers. We initially didn't use system linkers at all, but have - * been convinced of their utility. - * - * There are a few issues to handle: - * - * - Linkers operate on a flat namespace, so we have to flatten names. - * We do this using the C++ namespace-mangling technique. Foo::bar - * symbols and such. - * - * - Symbols with the same name but different types need to get different - * linkage-names. We do this by hashing a string-encoding of the type into - * a fixed-size (currently 16-byte hex) cryptographic hash function (CHF: - * we use SHA256) to "prevent collisions". This is not airtight but 16 hex - * digits on uniform probability means you're going to need 2**32 same-name - * symbols in the same process before you're even hitting birthday-paradox - * collision probability. - * - * - Symbols in different crates but with same names "within" the crate need - * to get different linkage-names. - * - * - The hash shown in the filename needs to be predictable and stable for - * build tooling integration. It also needs to be using a hash function - * which is easy to use from Python, make, etc. - * - * So here is what we do: - * - * - Consider the package id; every crate has one (specified with crate_id - * attribute). If a package id isn't provided explicitly, we infer a - * versionless one from the output name. The version will end up being 0.0 - * in this case. CNAME and CVERS are taken from this package id. For - * example, github.com/mozilla/CNAME#CVERS. - * - * - Define CMH as SHA256(crateid). - * - * - Define CMH8 as the first 8 characters of CMH. - * - * - Compile our crate to lib CNAME-CMH8-CVERS.so - * - * - Define STH(sym) as SHA256(CMH, type_str(sym)) - * - * - Suffix a mangled sym with ::STH@CVERS, so that it is unique in the - * name, non-name metadata, and type sense, and versioned in the way - * system linkers understand. - */ +// Name mangling and its relationship to metadata. This is complex. Read +// carefully. +// +// The semantic model of Rust linkage is, broadly, that "there's no global +// namespace" between crates. Our aim is to preserve the illusion of this +// model despite the fact that it's not *quite* possible to implement on +// modern linkers. We initially didn't use system linkers at all, but have +// been convinced of their utility. +// +// There are a few issues to handle: +// +// - Linkers operate on a flat namespace, so we have to flatten names. +// We do this using the C++ namespace-mangling technique. Foo::bar +// symbols and such. +// +// - Symbols with the same name but different types need to get different +// linkage-names. We do this by hashing a string-encoding of the type into +// a fixed-size (currently 16-byte hex) cryptographic hash function (CHF: +// we use SHA256) to "prevent collisions". This is not airtight but 16 hex +// digits on uniform probability means you're going to need 2**32 same-name +// symbols in the same process before you're even hitting birthday-paradox +// collision probability. +// +// - Symbols in different crates but with same names "within" the crate need +// to get different linkage-names. +// +// - The hash shown in the filename needs to be predictable and stable for +// build tooling integration. It also needs to be using a hash function +// which is easy to use from Python, make, etc. +// +// So here is what we do: +// +// - Consider the package id; every crate has one (specified with crate_id +// attribute). If a package id isn't provided explicitly, we infer a +// versionless one from the output name. The version will end up being 0.0 +// in this case. CNAME and CVERS are taken from this package id. For +// example, github.com/mozilla/CNAME#CVERS. +// +// - Define CMH as SHA256(crateid). +// +// - Define CMH8 as the first 8 characters of CMH. +// +// - Compile our crate to lib CNAME-CMH8-CVERS.so +// +// - Define STH(sym) as SHA256(CMH, type_str(sym)) +// +// - Suffix a mangled sym with ::STH@CVERS, so that it is unique in the +// name, non-name metadata, and type sense, and versioned in the way +// system linkers understand. pub fn find_crate_name(sess: Option<&Session>, attrs: &[ast::Attribute], diff --git a/src/librustc/driver/driver.rs b/src/librustc/driver/driver.rs index 16605c060170f..ca6897fe29df2 100644 --- a/src/librustc/driver/driver.rs +++ b/src/librustc/driver/driver.rs @@ -106,10 +106,8 @@ pub fn compile_input(sess: Session, phase_6_link_output(&sess, &trans, &outputs); } -/** - * The name used for source code that doesn't originate in a file - * (e.g. source from stdin or a string) - */ +/// The name used for source code that doesn't originate in a file +/// (e.g. source from stdin or a string) pub fn anon_src() -> String { "".to_string() } diff --git a/src/librustc/front/test.rs b/src/librustc/front/test.rs index b2c6840ad2272..36cec8da99fb1 100644 --- a/src/librustc/front/test.rs +++ b/src/librustc/front/test.rs @@ -270,23 +270,19 @@ fn add_test_module(cx: &TestCtxt, m: &ast::Mod) -> ast::Mod { } } -/* - -We're going to be building a module that looks more or less like: - -mod __test { - #![!resolve_unexported] - extern crate test (name = "test", vers = "..."); - fn main() { - test::test_main_static(::os::args().as_slice(), tests) - } - - static tests : &'static [test::TestDescAndFn] = &[ - ... the list of tests in the crate ... - ]; -} - -*/ +// We're going to be building a module that looks more or less like: +// +// mod __test { +// #![!resolve_unexported] +// extern crate test (name = "test", vers = "..."); +// fn main() { +// test::test_main_static(::os::args().as_slice(), tests) +// } +// +// static tests : &'static [test::TestDescAndFn] = &[ +// ... the list of tests in the crate ... +// ]; +// } fn mk_std(cx: &TestCtxt) -> ast::ViewItem { let id_test = token::str_to_ident("test"); diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index eee909f59e304..6595d0c4faa43 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -8,15 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The Rust compiler. - -# Note - -This API is completely unstable and subject to change. - -*/ +//! The Rust compiler. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. #![crate_id = "rustc#0.11.0"] // NOTE: remove after stage0 #![crate_name = "rustc"] diff --git a/src/librustc/lib/llvm.rs b/src/librustc/lib/llvm.rs index d07e74493be96..445538a13188f 100644 --- a/src/librustc/lib/llvm.rs +++ b/src/librustc/lib/llvm.rs @@ -350,7 +350,7 @@ pub mod llvm { // set of the libraries we need to link to LLVM for. #[link(name = "rustllvm", kind = "static")] extern { - /* Create and destroy contexts. */ + // Create and destroy contexts. pub fn LLVMContextCreate() -> ContextRef; pub fn LLVMContextDispose(C: ContextRef); pub fn LLVMGetMDKindIDInContext(C: ContextRef, @@ -358,34 +358,34 @@ pub mod llvm { SLen: c_uint) -> c_uint; - /* Create and destroy modules. */ + // Create and destroy modules. pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: ContextRef) -> ModuleRef; pub fn LLVMGetModuleContext(M: ModuleRef) -> ContextRef; pub fn LLVMDisposeModule(M: ModuleRef); - /** Data layout. See Module::getDataLayout. */ + /// Data layout. See Module::getDataLayout. pub fn LLVMGetDataLayout(M: ModuleRef) -> *const c_char; pub fn LLVMSetDataLayout(M: ModuleRef, Triple: *const c_char); - /** Target triple. See Module::getTargetTriple. */ + /// Target triple. See Module::getTargetTriple. pub fn LLVMGetTarget(M: ModuleRef) -> *const c_char; pub fn LLVMSetTarget(M: ModuleRef, Triple: *const c_char); - /** See Module::dump. */ + /// See Module::dump. pub fn LLVMDumpModule(M: ModuleRef); - /** See Module::setModuleInlineAsm. */ + /// See Module::setModuleInlineAsm. pub fn LLVMSetModuleInlineAsm(M: ModuleRef, Asm: *const c_char); - /** See llvm::LLVMTypeKind::getTypeID. */ + /// See llvm::LLVMTypeKind::getTypeID. pub fn LLVMGetTypeKind(Ty: TypeRef) -> TypeKind; - /** See llvm::LLVMType::getContext. */ + /// See llvm::LLVMType::getContext. pub fn LLVMGetTypeContext(Ty: TypeRef) -> ContextRef; - /* Operations on integer types */ + // Operations on integer types pub fn LLVMInt1TypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMInt8TypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMInt16TypeInContext(C: ContextRef) -> TypeRef; @@ -396,14 +396,14 @@ pub mod llvm { pub fn LLVMGetIntTypeWidth(IntegerTy: TypeRef) -> c_uint; - /* Operations on real types */ + // Operations on real types pub fn LLVMFloatTypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMDoubleTypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMX86FP80TypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMFP128TypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMPPCFP128TypeInContext(C: ContextRef) -> TypeRef; - /* Operations on function types */ + // Operations on function types pub fn LLVMFunctionType(ReturnType: TypeRef, ParamTypes: *const TypeRef, ParamCount: c_uint, @@ -414,7 +414,7 @@ pub mod llvm { pub fn LLVMCountParamTypes(FunctionTy: TypeRef) -> c_uint; pub fn LLVMGetParamTypes(FunctionTy: TypeRef, Dest: *const TypeRef); - /* Operations on struct types */ + // Operations on struct types pub fn LLVMStructTypeInContext(C: ContextRef, ElementTypes: *const TypeRef, ElementCount: c_uint, @@ -425,7 +425,7 @@ pub mod llvm { Dest: *mut TypeRef); pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool; - /* Operations on array, pointer, and vector types (sequence types) */ + // Operations on array, pointer, and vector types (sequence types) pub fn LLVMRustArrayType(ElementType: TypeRef, ElementCount: u64) -> TypeRef; pub fn LLVMPointerType(ElementType: TypeRef, AddressSpace: c_uint) -> TypeRef; @@ -439,12 +439,12 @@ pub mod llvm { -> *const (); pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint; - /* Operations on other types */ + // Operations on other types pub fn LLVMVoidTypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMLabelTypeInContext(C: ContextRef) -> TypeRef; pub fn LLVMMetadataTypeInContext(C: ContextRef) -> TypeRef; - /* Operations on all values */ + // Operations on all values pub fn LLVMTypeOf(Val: ValueRef) -> TypeRef; pub fn LLVMGetValueName(Val: ValueRef) -> *const c_char; pub fn LLVMSetValueName(Val: ValueRef, Name: *const c_char); @@ -454,33 +454,33 @@ pub mod llvm { pub fn LLVMGetMetadata(Val: ValueRef, KindID: c_uint) -> ValueRef; pub fn LLVMSetMetadata(Val: ValueRef, KindID: c_uint, Node: ValueRef); - /* Operations on Uses */ + // Operations on Uses pub fn LLVMGetFirstUse(Val: ValueRef) -> UseRef; pub fn LLVMGetNextUse(U: UseRef) -> UseRef; pub fn LLVMGetUser(U: UseRef) -> ValueRef; pub fn LLVMGetUsedValue(U: UseRef) -> ValueRef; - /* Operations on Users */ + // Operations on Users pub fn LLVMGetNumOperands(Val: ValueRef) -> c_int; pub fn LLVMGetOperand(Val: ValueRef, Index: c_uint) -> ValueRef; pub fn LLVMSetOperand(Val: ValueRef, Index: c_uint, Op: ValueRef); - /* Operations on constants of any type */ + // Operations on constants of any type pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef; - /* all zeroes */ + // all zeroes pub fn LLVMConstAllOnes(Ty: TypeRef) -> ValueRef; pub fn LLVMConstICmp(Pred: c_ushort, V1: ValueRef, V2: ValueRef) -> ValueRef; pub fn LLVMConstFCmp(Pred: c_ushort, V1: ValueRef, V2: ValueRef) -> ValueRef; - /* only for int/vector */ + // only for int/vector pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef; pub fn LLVMIsConstant(Val: ValueRef) -> Bool; pub fn LLVMIsNull(Val: ValueRef) -> Bool; pub fn LLVMIsUndef(Val: ValueRef) -> Bool; pub fn LLVMConstPointerNull(Ty: TypeRef) -> ValueRef; - /* Operations on metadata */ + // Operations on metadata pub fn LLVMMDStringInContext(C: ContextRef, Str: *const c_char, SLen: c_uint) @@ -493,7 +493,7 @@ pub mod llvm { Str: *const c_char, Val: ValueRef); - /* Operations on scalar constants */ + // Operations on scalar constants pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef; pub fn LLVMConstIntOfString(IntTy: TypeRef, Text: *const c_char, Radix: u8) @@ -514,7 +514,7 @@ pub mod llvm { pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong; - /* Operations on composite constants */ + // Operations on composite constants pub fn LLVMConstStringInContext(C: ContextRef, Str: *const c_char, Length: c_uint, @@ -533,7 +533,7 @@ pub mod llvm { pub fn LLVMConstVector(ScalarConstantVals: *const ValueRef, Size: c_uint) -> ValueRef; - /* Constant expressions */ + // Constant expressions pub fn LLVMAlignOf(Ty: TypeRef) -> ValueRef; pub fn LLVMSizeOf(Ty: TypeRef) -> ValueRef; pub fn LLVMConstNeg(ConstantVal: ValueRef) -> ValueRef; @@ -672,7 +672,7 @@ pub mod llvm { - /* Operations on global variables, functions, and aliases (globals) */ + // Operations on global variables, functions, and aliases (globals) pub fn LLVMGetGlobalParent(Global: ValueRef) -> ModuleRef; pub fn LLVMIsDeclaration(Global: ValueRef) -> Bool; pub fn LLVMGetLinkage(Global: ValueRef) -> c_uint; @@ -685,7 +685,7 @@ pub mod llvm { pub fn LLVMSetAlignment(Global: ValueRef, Bytes: c_uint); - /* Operations on global variables */ + // Operations on global variables pub fn LLVMAddGlobal(M: ModuleRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; pub fn LLVMAddGlobalInAddressSpace(M: ModuleRef, @@ -707,14 +707,14 @@ pub mod llvm { pub fn LLVMIsGlobalConstant(GlobalVar: ValueRef) -> Bool; pub fn LLVMSetGlobalConstant(GlobalVar: ValueRef, IsConstant: Bool); - /* Operations on aliases */ + // Operations on aliases pub fn LLVMAddAlias(M: ModuleRef, Ty: TypeRef, Aliasee: ValueRef, Name: *const c_char) -> ValueRef; - /* Operations on functions */ + // Operations on functions pub fn LLVMAddFunction(M: ModuleRef, Name: *const c_char, FunctionTy: TypeRef) @@ -739,7 +739,7 @@ pub mod llvm { pub fn LLVMRemoveFunctionAttrString(Fn: ValueRef, index: c_uint, Name: *const c_char); pub fn LLVMGetFunctionAttr(Fn: ValueRef) -> c_ulonglong; - /* Operations on parameters */ + // Operations on parameters pub fn LLVMCountParams(Fn: ValueRef) -> c_uint; pub fn LLVMGetParams(Fn: ValueRef, Params: *const ValueRef); pub fn LLVMGetParam(Fn: ValueRef, Index: c_uint) -> ValueRef; @@ -753,7 +753,7 @@ pub mod llvm { pub fn LLVMGetAttribute(Arg: ValueRef) -> c_uint; pub fn LLVMSetParamAlignment(Arg: ValueRef, align: c_uint); - /* Operations on basic blocks */ + // Operations on basic blocks pub fn LLVMBasicBlockAsValue(BB: BasicBlockRef) -> ValueRef; pub fn LLVMValueIsBasicBlock(Val: ValueRef) -> Bool; pub fn LLVMValueAsBasicBlock(Val: ValueRef) -> BasicBlockRef; @@ -782,7 +782,7 @@ pub mod llvm { pub fn LLVMMoveBasicBlockBefore(BB: BasicBlockRef, MoveBefore: BasicBlockRef); - /* Operations on instructions */ + // Operations on instructions pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef; pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef; pub fn LLVMGetLastInstruction(BB: BasicBlockRef) -> ValueRef; @@ -790,7 +790,7 @@ pub mod llvm { pub fn LLVMGetPreviousInstruction(Inst: ValueRef) -> ValueRef; pub fn LLVMInstructionEraseFromParent(Inst: ValueRef); - /* Operations on call sites */ + // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); pub fn LLVMGetInstructionCallConv(Instr: ValueRef) -> c_uint; pub fn LLVMAddInstrAttribute(Instr: ValueRef, @@ -806,15 +806,15 @@ pub mod llvm { index: c_uint, Val: uint64_t); - /* Operations on call instructions (only) */ + // Operations on call instructions (only) pub fn LLVMIsTailCall(CallInst: ValueRef) -> Bool; pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool); - /* Operations on load/store instructions (only) */ + // Operations on load/store instructions (only) pub fn LLVMGetVolatile(MemoryAccessInst: ValueRef) -> Bool; pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); - /* Operations on phi nodes */ + // Operations on phi nodes pub fn LLVMAddIncoming(PhiNode: ValueRef, IncomingValues: *const ValueRef, IncomingBlocks: *const BasicBlockRef, @@ -825,7 +825,7 @@ pub mod llvm { pub fn LLVMGetIncomingBlock(PhiNode: ValueRef, Index: c_uint) -> BasicBlockRef; - /* Instruction builders */ + // Instruction builders pub fn LLVMCreateBuilderInContext(C: ContextRef) -> BuilderRef; pub fn LLVMPositionBuilder(Builder: BuilderRef, Block: BasicBlockRef, @@ -843,12 +843,12 @@ pub mod llvm { pub fn LLVMDisposeBuilder(Builder: BuilderRef); pub fn LLVMDisposeExecutionEngine(EE: ExecutionEngineRef); - /* Metadata */ + // Metadata pub fn LLVMSetCurrentDebugLocation(Builder: BuilderRef, L: ValueRef); pub fn LLVMGetCurrentDebugLocation(Builder: BuilderRef) -> ValueRef; pub fn LLVMSetInstDebugLocation(Builder: BuilderRef, Inst: ValueRef); - /* Terminators */ + // Terminators pub fn LLVMBuildRetVoid(B: BuilderRef) -> ValueRef; pub fn LLVMBuildRet(B: BuilderRef, V: ValueRef) -> ValueRef; pub fn LLVMBuildAggregateRet(B: BuilderRef, @@ -887,21 +887,21 @@ pub mod llvm { pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef; pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef; - /* Add a case to the switch instruction */ + // Add a case to the switch instruction pub fn LLVMAddCase(Switch: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef); - /* Add a destination to the indirectbr instruction */ + // Add a destination to the indirectbr instruction pub fn LLVMAddDestination(IndirectBr: ValueRef, Dest: BasicBlockRef); - /* Add a clause to the landing pad instruction */ + // Add a clause to the landing pad instruction pub fn LLVMAddClause(LandingPad: ValueRef, ClauseVal: ValueRef); - /* Set the cleanup on a landing pad instruction */ + // Set the cleanup on a landing pad instruction pub fn LLVMSetCleanup(LandingPad: ValueRef, Val: Bool); - /* Arithmetic */ + // Arithmetic pub fn LLVMBuildAdd(B: BuilderRef, LHS: ValueRef, RHS: ValueRef, @@ -1044,7 +1044,7 @@ pub mod llvm { pub fn LLVMBuildNot(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; - /* Memory */ + // Memory pub fn LLVMBuildMalloc(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; pub fn LLVMBuildArrayMalloc(B: BuilderRef, @@ -1094,7 +1094,7 @@ pub mod llvm { Name: *const c_char) -> ValueRef; - /* Casts */ + // Casts pub fn LLVMBuildTrunc(B: BuilderRef, Val: ValueRef, DestTy: TypeRef, @@ -1191,7 +1191,7 @@ pub mod llvm { Name: *const c_char) -> ValueRef; - /* Comparisons */ + // Comparisons pub fn LLVMBuildICmp(B: BuilderRef, Op: c_uint, LHS: ValueRef, @@ -1205,7 +1205,7 @@ pub mod llvm { Name: *const c_char) -> ValueRef; - /* Miscellaneous instructions */ + // Miscellaneous instructions pub fn LLVMBuildPhi(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; pub fn LLVMBuildCall(B: BuilderRef, @@ -1264,7 +1264,7 @@ pub mod llvm { Name: *const c_char) -> ValueRef; - /* Atomic Operations */ + // Atomic Operations pub fn LLVMBuildAtomicLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char, @@ -1297,34 +1297,34 @@ pub mod llvm { pub fn LLVMBuildAtomicFence(B: BuilderRef, Order: AtomicOrdering); - /* Selected entries from the downcasts. */ + // Selected entries from the downcasts. pub fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef; pub fn LLVMIsAStoreInst(Inst: ValueRef) -> ValueRef; - /** Writes a module to the specified path. Returns 0 on success. */ + /// Writes a module to the specified path. Returns 0 on success. pub fn LLVMWriteBitcodeToFile(M: ModuleRef, Path: *const c_char) -> c_int; - /** Creates target data from a target layout string. */ + /// Creates target data from a target layout string. pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; /// Adds the target data to the given pass manager. The pass manager /// references the target data only weakly. pub fn LLVMAddTargetData(TD: TargetDataRef, PM: PassManagerRef); - /** Number of bytes clobbered when doing a Store to *T. */ + /// Number of bytes clobbered when doing a Store to *T. pub fn LLVMStoreSizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - /** Number of bytes clobbered when doing a Store to *T. */ + /// Number of bytes clobbered when doing a Store to *T. pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - /** Distance between successive elements in an array of T. - Includes ABI padding. */ + /// Distance between successive elements in an array of T. + /// Includes ABI padding. pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /** Returns the preferred alignment of a type. */ + /// Returns the preferred alignment of a type. pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /** Returns the minimum alignment of a type. */ + /// Returns the minimum alignment of a type. pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; @@ -1335,41 +1335,39 @@ pub mod llvm { Element: c_uint) -> c_ulonglong; - /** - * Returns the minimum alignment of a type when part of a call frame. - */ + /// Returns the minimum alignment of a type when part of a call frame. pub fn LLVMCallFrameAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /** Disposes target data. */ + /// Disposes target data. pub fn LLVMDisposeTargetData(TD: TargetDataRef); - /** Creates a pass manager. */ + /// Creates a pass manager. pub fn LLVMCreatePassManager() -> PassManagerRef; - /** Creates a function-by-function pass manager */ + /// Creates a function-by-function pass manager pub fn LLVMCreateFunctionPassManagerForModule(M: ModuleRef) -> PassManagerRef; - /** Disposes a pass manager. */ + /// Disposes a pass manager. pub fn LLVMDisposePassManager(PM: PassManagerRef); - /** Runs a pass manager on a module. */ + /// Runs a pass manager on a module. pub fn LLVMRunPassManager(PM: PassManagerRef, M: ModuleRef) -> Bool; - /** Runs the function passes on the provided function. */ + /// Runs the function passes on the provided function. pub fn LLVMRunFunctionPassManager(FPM: PassManagerRef, F: ValueRef) -> Bool; - /** Initializes all the function passes scheduled in the manager */ + /// Initializes all the function passes scheduled in the manager pub fn LLVMInitializeFunctionPassManager(FPM: PassManagerRef) -> Bool; - /** Finalizes all the function passes scheduled in the manager */ + /// Finalizes all the function passes scheduled in the manager pub fn LLVMFinalizeFunctionPassManager(FPM: PassManagerRef) -> Bool; pub fn LLVMInitializePasses(); - /** Adds a verification pass. */ + /// Adds a verification pass. pub fn LLVMAddVerifierPass(PM: PassManagerRef); pub fn LLVMAddGlobalOptimizerPass(PM: PassManagerRef); @@ -1439,38 +1437,38 @@ pub mod llvm { Internalize: Bool, RunInliner: Bool); - /** Destroys a memory buffer. */ + /// Destroys a memory buffer. pub fn LLVMDisposeMemoryBuffer(MemBuf: MemoryBufferRef); - /* Stuff that's in rustllvm/ because it's not upstream yet. */ + // Stuff that's in rustllvm/ because it's not upstream yet. - /** Opens an object file. */ + /// Opens an object file. pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef; - /** Closes an object file. */ + /// Closes an object file. pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef); - /** Enumerates the sections in an object file. */ + /// Enumerates the sections in an object file. pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef; - /** Destroys a section iterator. */ + /// Destroys a section iterator. pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef); - /** Returns true if the section iterator is at the end of the section - list: */ + /// Returns true if the section iterator is at the end of the section + /// list: pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef, SI: SectionIteratorRef) -> Bool; - /** Moves the section iterator to point to the next section. */ + /// Moves the section iterator to point to the next section. pub fn LLVMMoveToNextSection(SI: SectionIteratorRef); - /** Returns the current section size. */ + /// Returns the current section size. pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong; - /** Returns the current section contents as a string buffer. */ + /// Returns the current section contents as a string buffer. pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char; - /** Reads the given file and returns it as a memory buffer. Use - LLVMDisposeMemoryBuffer() to get rid of it. */ + /// Reads the given file and returns it as a memory buffer. Use + /// LLVMDisposeMemoryBuffer() to get rid of it. pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char) -> MemoryBufferRef; - /** Borrows the contents of the memory buffer (doesn't copy it) */ + /// Borrows the contents of the memory buffer (doesn't copy it) pub fn LLVMCreateMemoryBufferWithMemoryRange(InputData: *const c_char, InputDataLength: size_t, BufferName: *const c_char, @@ -1484,8 +1482,8 @@ pub mod llvm { pub fn LLVMIsMultithreaded() -> Bool; pub fn LLVMStartMultithreaded() -> Bool; - /** Returns a string describing the last error caused by an LLVMRust* - call. */ + /// Returns a string describing the last error caused by an LLVMRust* + /// call. pub fn LLVMRustGetLastError() -> *const c_char; /// Print the pass timings since static dtors aren't picking them up. @@ -1503,10 +1501,10 @@ pub mod llvm { Count: c_uint) -> ValueRef; - /** Enables LLVM debug output. */ + /// Enables LLVM debug output. pub fn LLVMSetDebug(Enabled: c_int); - /** Prepares inline assembly. */ + /// Prepares inline assembly. pub fn LLVMInlineAsm(Ty: TypeRef, AsmString: *const c_char, Constraints: *const c_char, @@ -1861,7 +1859,7 @@ pub fn SetFunctionAttribute(fn_: ValueRef, attr: Attribute) { llvm::LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint, attr as uint64_t) } } -/* Memory-managed object interface to type handles. */ +// Memory-managed object interface to type handles. pub struct TypeNames { named_types: RefCell>, @@ -1907,7 +1905,7 @@ impl TypeNames { } } -/* Memory-managed interface to target data. */ +// Memory-managed interface to target data. pub struct TargetData { pub lltd: TargetDataRef @@ -1929,7 +1927,7 @@ pub fn mk_target_data(string_rep: &str) -> TargetData { } } -/* Memory-managed interface to object files. */ +// Memory-managed interface to object files. pub struct ObjectFile { pub llof: ObjectFileRef, @@ -1960,7 +1958,7 @@ impl Drop for ObjectFile { } } -/* Memory-managed interface to section iterators. */ +// Memory-managed interface to section iterators. pub struct SectionIter { pub llsi: SectionIteratorRef diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 0ab3d50cfbc3d..5514e302daa4e 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -373,7 +373,7 @@ impl LintPass for CTypes { } } } - _ => {/* nothing to do */ } + _ => { /* nothing to do */ } } } } diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index 79fbd73c23d3c..5f411a35b194d 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -340,11 +340,9 @@ impl<'a> Context<'a> { self.lookup_and_emit(lint, Some(span), msg); } - /** - * Merge the lints specified by any lint attributes into the - * current lint context, call the provided function, then reset the - * lints in effect to their previous state. - */ + /// Merge the lints specified by any lint attributes into the + /// current lint context, call the provided function, then reset the + /// lints in effect to their previous state. fn with_lint_attrs(&mut self, attrs: &[ast::Attribute], f: |&mut Context|) { diff --git a/src/librustc/metadata/common.rs b/src/librustc/metadata/common.rs index cdeecf3a080fe..cd0923e640fe4 100644 --- a/src/librustc/metadata/common.rs +++ b/src/librustc/metadata/common.rs @@ -94,14 +94,12 @@ pub static tag_item_field: uint = 0x28; pub static tag_item_field_origin: uint = 0x29; pub static tag_item_variances: uint = 0x2a; -/* - trait items contain tag_item_trait_method elements, - impl items contain tag_item_impl_method elements, and classes - have both. That's because some code treats classes like traits, - and other code treats them like impls. Because classes can contain - both, tag_item_trait_method and tag_item_impl_method have to be two - different tags. - */ +// trait items contain tag_item_trait_method elements, +// impl items contain tag_item_impl_method elements, and classes +// have both. That's because some code treats classes like traits, +// and other code treats them like impls. Because classes can contain +// both, tag_item_trait_method and tag_item_impl_method have to be two +// different tags. pub static tag_item_impl_method: uint = 0x30; pub static tag_item_trait_method_explicit_self: uint = 0x31; diff --git a/src/librustc/metadata/encoder.rs b/src/librustc/metadata/encoder.rs index 21713672f8149..c2678a468eb6c 100644 --- a/src/librustc/metadata/encoder.rs +++ b/src/librustc/metadata/encoder.rs @@ -665,18 +665,18 @@ fn encode_provided_source(ebml_w: &mut Encoder, } } -/* Returns an index of items in this class */ +// Returns an index of items in this class fn encode_info_for_struct(ecx: &EncodeContext, ebml_w: &mut Encoder, fields: &[ty::field_ty], global_index: &mut Vec>) -> Vec> { - /* Each class has its own index, since different classes - may have fields with the same name */ + // Each class has its own index, since different classes + // may have fields with the same name let mut index = Vec::new(); let tcx = ecx.tcx; - /* We encode both private and public fields -- need to include - private fields to get the offsets right */ + // We encode both private and public fields -- need to include + // private fields to get the offsets right for field in fields.iter() { let nm = field.name; let id = field.id.node; @@ -1039,19 +1039,19 @@ fn encode_info_for_item(ecx: &EncodeContext, ItemStruct(struct_def, _) => { let fields = ty::lookup_struct_fields(tcx, def_id); - /* First, encode the fields - These come first because we need to write them to make - the index, and the index needs to be in the item for the - class itself */ + // First, encode the fields + // These come first because we need to write them to make + // the index, and the index needs to be in the item for the + // class itself let idx = encode_info_for_struct(ecx, ebml_w, fields.as_slice(), index); - /* Index the class*/ + // Index the class add_to_index(item, ebml_w, index); - /* Now, make an item for the class itself */ + // Now, make an item for the class itself ebml_w.start_tag(tag_items_data_item); encode_def_id(ebml_w, def_id); encode_family(ebml_w, 'S'); @@ -1064,9 +1064,9 @@ fn encode_info_for_item(ecx: &EncodeContext, encode_stability(ebml_w, stab); encode_visibility(ebml_w, vis); - /* Encode def_ids for each field and method - for methods, write all the stuff get_trait_method - needs to know*/ + // Encode def_ids for each field and method + // for methods, write all the stuff get_trait_method + // needs to know encode_struct_fields(ebml_w, fields.as_slice(), def_id); encode_inlined_item(ecx, ebml_w, IIItemRef(item)); @@ -1074,7 +1074,7 @@ fn encode_info_for_item(ecx: &EncodeContext, // Encode inherent implementations for this structure. encode_inherent_implementations(ecx, ebml_w, def_id); - /* Each class has its own index -- encode it */ + // Each class has its own index -- encode it encode_index(ebml_w, idx, write_i64); ebml_w.end_tag(); diff --git a/src/librustc/middle/astencode.rs b/src/librustc/middle/astencode.rs index d7a7d2902b491..d89ce78ce2057 100644 --- a/src/librustc/middle/astencode.rs +++ b/src/librustc/middle/astencode.rs @@ -170,53 +170,44 @@ fn reserve_id_range(sess: &Session, } impl<'a> ExtendedDecodeContext<'a> { + /// Translates an internal id, meaning a node id that is known + /// to refer to some part of the item currently being inlined, + /// such as a local variable or argument. All naked node-ids + /// that appear in types have this property, since if something + /// might refer to an external item we would use a def-id to + /// allow for the possibility that the item resides in another + /// crate. pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId { - /*! - * Translates an internal id, meaning a node id that is known - * to refer to some part of the item currently being inlined, - * such as a local variable or argument. All naked node-ids - * that appear in types have this property, since if something - * might refer to an external item we would use a def-id to - * allow for the possibility that the item resides in another - * crate. - */ - // from_id_range should be non-empty assert!(!self.from_id_range.empty()); (id - self.from_id_range.min + self.to_id_range.min) } + /// Translates an EXTERNAL def-id, converting the crate number + /// from the one used in the encoded data to the current crate + /// numbers.. By external, I mean that it be translated to a + /// reference to the item in its original crate, as opposed to + /// being translated to a reference to the inlined version of + /// the item. This is typically, but not always, what you + /// want, because most def-ids refer to external things like + /// types or other fns that may or may not be inlined. Note + /// that even when the inlined function is referencing itself + /// recursively, we would want `tr_def_id` for that + /// reference--- conceptually the function calls the original, + /// non-inlined version, and trans deals with linking that + /// recursive call to the inlined copy. + /// + /// However, there are a *few* cases where def-ids are used but + /// we know that the thing being referenced is in fact *internal* + /// to the item being inlined. In those cases, you should use + /// `tr_intern_def_id()` below. pub fn tr_def_id(&self, did: ast::DefId) -> ast::DefId { - /*! - * Translates an EXTERNAL def-id, converting the crate number - * from the one used in the encoded data to the current crate - * numbers.. By external, I mean that it be translated to a - * reference to the item in its original crate, as opposed to - * being translated to a reference to the inlined version of - * the item. This is typically, but not always, what you - * want, because most def-ids refer to external things like - * types or other fns that may or may not be inlined. Note - * that even when the inlined function is referencing itself - * recursively, we would want `tr_def_id` for that - * reference--- conceptually the function calls the original, - * non-inlined version, and trans deals with linking that - * recursive call to the inlined copy. - * - * However, there are a *few* cases where def-ids are used but - * we know that the thing being referenced is in fact *internal* - * to the item being inlined. In those cases, you should use - * `tr_intern_def_id()` below. - */ - decoder::translate_def_id(self.dcx.cdata, did) } + /// Translates an INTERNAL def-id, meaning a def-id that is + /// known to refer to some part of the item currently being + /// inlined. In that case, we want to convert the def-id to + /// refer to the current crate and to the new, inlined node-id. pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId { - /*! - * Translates an INTERNAL def-id, meaning a def-id that is - * known to refer to some part of the item currently being - * inlined. In that case, we want to convert the def-id to - * refer to the current crate and to the new, inlined node-id. - */ - assert_eq!(did.krate, ast::LOCAL_CRATE); ast::DefId { krate: ast::LOCAL_CRATE, node: self.tr_id(did.node) } } @@ -1318,39 +1309,36 @@ impl<'a> ebml_decoder_decoder_helpers for reader::Decoder<'a> { }).unwrap() } + /// Converts a def-id that appears in a type. The correct + /// translation will depend on what kind of def-id this is. + /// This is a subtle point: type definitions are not + /// inlined into the current crate, so if the def-id names + /// a nominal type or type alias, then it should be + /// translated to refer to the source crate. + /// + /// However, *type parameters* are cloned along with the function + /// they are attached to. So we should translate those def-ids + /// to refer to the new, cloned copy of the type parameter. + /// We only see references to free type parameters in the body of + /// an inlined function. In such cases, we need the def-id to + /// be a local id so that the TypeContents code is able to lookup + /// the relevant info in the ty_param_defs table. + /// + /// *Region parameters*, unfortunately, are another kettle of fish. + /// In such cases, def_id's can appear in types to distinguish + /// shadowed bound regions and so forth. It doesn't actually + /// matter so much what we do to these, since regions are erased + /// at trans time, but it's good to keep them consistent just in + /// case. We translate them with `tr_def_id()` which will map + /// the crate numbers back to the original source crate. + /// + /// It'd be really nice to refactor the type repr to not include + /// def-ids so that all these distinctions were unnecessary. fn convert_def_id(&mut self, xcx: &ExtendedDecodeContext, source: tydecode::DefIdSource, did: ast::DefId) -> ast::DefId { - /*! - * Converts a def-id that appears in a type. The correct - * translation will depend on what kind of def-id this is. - * This is a subtle point: type definitions are not - * inlined into the current crate, so if the def-id names - * a nominal type or type alias, then it should be - * translated to refer to the source crate. - * - * However, *type parameters* are cloned along with the function - * they are attached to. So we should translate those def-ids - * to refer to the new, cloned copy of the type parameter. - * We only see references to free type parameters in the body of - * an inlined function. In such cases, we need the def-id to - * be a local id so that the TypeContents code is able to lookup - * the relevant info in the ty_param_defs table. - * - * *Region parameters*, unfortunately, are another kettle of fish. - * In such cases, def_id's can appear in types to distinguish - * shadowed bound regions and so forth. It doesn't actually - * matter so much what we do to these, since regions are erased - * at trans time, but it's good to keep them consistent just in - * case. We translate them with `tr_def_id()` which will map - * the crate numbers back to the original source crate. - * - * It'd be really nice to refactor the type repr to not include - * def-ids so that all these distinctions were unnecessary. - */ - let r = match source { NominalType | TypeWithId | RegionParameter => xcx.tr_def_id(did), TypeParameter => xcx.tr_intern_def_id(did) diff --git a/src/librustc/middle/borrowck/check_loans.rs b/src/librustc/middle/borrowck/check_loans.rs index df208b9cdc133..9f1f93487a9cd 100644 --- a/src/librustc/middle/borrowck/check_loans.rs +++ b/src/librustc/middle/borrowck/check_loans.rs @@ -584,16 +584,13 @@ impl<'a> CheckLoanCtxt<'a> { return ret; } + /// Reports an error if `expr` (which should be a path) + /// is using a moved/uninitialized value fn check_if_path_is_moved(&self, id: ast::NodeId, span: Span, use_kind: MovedValueUseKind, lp: &Rc) { - /*! - * Reports an error if `expr` (which should be a path) - * is using a moved/uninitialized value - */ - debug!("check_if_path_is_moved(id={:?}, use_kind={:?}, lp={})", id, use_kind, lp.repr(self.bccx.tcx)); self.move_data.each_move_of(id, lp, |move, moved_lp| { @@ -607,30 +604,26 @@ impl<'a> CheckLoanCtxt<'a> { }); } + /// Reports an error if assigning to `lp` will use a + /// moved/uninitialized value. Mainly this is concerned with + /// detecting derefs of uninitialized pointers. + /// + /// For example: + /// + /// let a: int; + /// a = 10; // ok, even though a is uninitialized + /// + /// struct Point { x: uint, y: uint } + /// let p: Point; + /// p.x = 22; // ok, even though `p` is uninitialized + /// + /// let p: ~Point; + /// (*p).x = 22; // not ok, p is uninitialized, can't deref fn check_if_assigned_path_is_moved(&self, id: ast::NodeId, span: Span, use_kind: MovedValueUseKind, - lp: &Rc) - { - /*! - * Reports an error if assigning to `lp` will use a - * moved/uninitialized value. Mainly this is concerned with - * detecting derefs of uninitialized pointers. - * - * For example: - * - * let a: int; - * a = 10; // ok, even though a is uninitialized - * - * struct Point { x: uint, y: uint } - * let p: Point; - * p.x = 22; // ok, even though `p` is uninitialized - * - * let p: ~Point; - * (*p).x = 22; // not ok, p is uninitialized, can't deref - */ - + lp: &Rc) { match **lp { LpVar(_) | LpUpvar(_) => { // assigning to `x` does not require that `x` is initialized diff --git a/src/librustc/middle/borrowck/gather_loans/gather_moves.rs b/src/librustc/middle/borrowck/gather_loans/gather_moves.rs index de77fa602c9b4..2c807aaa92476 100644 --- a/src/librustc/middle/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc/middle/borrowck/gather_loans/gather_moves.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Computes moves. - */ +//! Computes moves. use mc = middle::mem_categorization; use middle::borrowck::*; diff --git a/src/librustc/middle/borrowck/gather_loans/lifetime.rs b/src/librustc/middle/borrowck/gather_loans/lifetime.rs index dc8567af9edad..8d1ae6fed93ec 100644 --- a/src/librustc/middle/borrowck/gather_loans/lifetime.rs +++ b/src/librustc/middle/borrowck/gather_loans/lifetime.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * This module implements the check that the lifetime of a borrow - * does not exceed the lifetime of the value being borrowed. - */ +//! This module implements the check that the lifetime of a borrow +//! does not exceed the lifetime of the value being borrowed. use middle::borrowck::*; use euv = middle::expr_use_visitor; diff --git a/src/librustc/middle/borrowck/gather_loans/mod.rs b/src/librustc/middle/borrowck/gather_loans/mod.rs index 454c3dcd5d3ca..4dc8ac082e2b3 100644 --- a/src/librustc/middle/borrowck/gather_loans/mod.rs +++ b/src/librustc/middle/borrowck/gather_loans/mod.rs @@ -162,7 +162,7 @@ fn check_aliasability(bccx: &BorrowckCtxt, match (cmt.freely_aliasable(bccx.tcx), req_kind) { (None, _) => { - /* Uniquely accessible path -- OK for `&` and `&mut` */ + // Uniquely accessible path -- OK for `&` and `&mut` Ok(()) } (Some(mc::AliasableStatic(safety)), ty::ImmBorrow) => { @@ -207,6 +207,11 @@ fn check_aliasability(bccx: &BorrowckCtxt, impl<'a> GatherLoanCtxt<'a> { pub fn tcx(&self) -> &'a ty::ctxt { self.bccx.tcx } + /// Guarantees that `addr_of(cmt)` will be valid for the duration of + /// `static_scope_r`, or reports an error. This may entail taking + /// out loans, which will be added to the `req_loan_map`. This can + /// also entail "rooting" GC'd pointers, which means ensuring + /// dynamically that they are not freed. fn guarantee_valid(&mut self, borrow_id: ast::NodeId, borrow_span: Span, @@ -214,14 +219,6 @@ impl<'a> GatherLoanCtxt<'a> { req_kind: ty::BorrowKind, loan_region: ty::Region, cause: euv::LoanCause) { - /*! - * Guarantees that `addr_of(cmt)` will be valid for the duration of - * `static_scope_r`, or reports an error. This may entail taking - * out loans, which will be added to the `req_loan_map`. This can - * also entail "rooting" GC'd pointers, which means ensuring - * dynamically that they are not freed. - */ - debug!("guarantee_valid(borrow_id={:?}, cmt={}, \ req_mutbl={:?}, loan_region={:?})", borrow_id, diff --git a/src/librustc/middle/borrowck/gather_loans/restrictions.rs b/src/librustc/middle/borrowck/gather_loans/restrictions.rs index d131b6f7eda29..b823fd570dd52 100644 --- a/src/librustc/middle/borrowck/gather_loans/restrictions.rs +++ b/src/librustc/middle/borrowck/gather_loans/restrictions.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Computes the restrictions that result from a borrow. - */ +//! Computes the restrictions that result from a borrow. use middle::borrowck::*; use euv = middle::expr_use_visitor; diff --git a/src/librustc/middle/borrowck/mod.rs b/src/librustc/middle/borrowck/mod.rs index 9ab3202b9096e..303b890e04894 100644 --- a/src/librustc/middle/borrowck/mod.rs +++ b/src/librustc/middle/borrowck/mod.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! See doc.rs for a thorough explanation of the borrow checker */ +//! See doc.rs for a thorough explanation of the borrow checker #![allow(non_camel_case_types)] diff --git a/src/librustc/middle/borrowck/move_data.rs b/src/librustc/middle/borrowck/move_data.rs index b61596908e60a..e3c0bb67aaa6e 100644 --- a/src/librustc/middle/borrowck/move_data.rs +++ b/src/librustc/middle/borrowck/move_data.rs @@ -8,12 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Data structures used for tracking moves. Please see the extensive -comments in the section "Moves and initialization" and in `doc.rs`. - -*/ +//! Data structures used for tracking moves. Please see the extensive +//! comments in the section "Moves and initialization" and in `doc.rs`. use std::cell::RefCell; use std::rc::Rc; @@ -231,15 +227,12 @@ impl MoveData { self.path_parent(index) == InvalidMovePathIndex } + /// Returns the existing move path index for `lp`, if any, + /// and otherwise adds a new index for `lp` and any of its + /// base paths that do not yet have an index. pub fn move_path(&self, tcx: &ty::ctxt, lp: Rc) -> MovePathIndex { - /*! - * Returns the existing move path index for `lp`, if any, - * and otherwise adds a new index for `lp` and any of its - * base paths that do not yet have an index. - */ - match self.path_map.borrow().find(&lp) { Some(&index) => { return index; @@ -303,13 +296,10 @@ impl MoveData { result } + /// Adds any existing move path indices for `lp` and any base + /// paths of `lp` to `result`, but does not add new move paths fn add_existing_base_paths(&self, lp: &Rc, result: &mut Vec) { - /*! - * Adds any existing move path indices for `lp` and any base - * paths of `lp` to `result`, but does not add new move paths - */ - match self.path_map.borrow().find_copy(lp) { Some(index) => { self.each_base_path(index, |p| { @@ -329,16 +319,13 @@ impl MoveData { } + /// Adds a new move entry for a move of `lp` that occurs at + /// location `id` with kind `kind`. pub fn add_move(&self, tcx: &ty::ctxt, lp: Rc, id: ast::NodeId, kind: MoveKind) { - /*! - * Adds a new move entry for a move of `lp` that occurs at - * location `id` with kind `kind`. - */ - debug!("add_move(lp={}, id={:?}, kind={:?})", lp.repr(tcx), id, @@ -358,6 +345,8 @@ impl MoveData { }); } + /// Adds a new record for an assignment to `lp` that occurs at + /// location `id` with the given `span`. pub fn add_assignment(&self, tcx: &ty::ctxt, lp: Rc, @@ -365,11 +354,6 @@ impl MoveData { span: Span, assignee_id: ast::NodeId, mode: euv::MutateMode) { - /*! - * Adds a new record for an assignment to `lp` that occurs at - * location `id` with the given `span`. - */ - debug!("add_assignment(lp={}, assign_id={:?}, assignee_id={:?}", lp.repr(tcx), assign_id, assignee_id); @@ -401,18 +385,15 @@ impl MoveData { } } + /// Adds the gen/kills for the various moves and + /// assignments into the provided data flow contexts. + /// Moves are generated by moves and killed by assignments and + /// scoping. Assignments are generated by assignment to variables and + /// killed by scoping. See `doc.rs` for more details. fn add_gen_kills(&self, tcx: &ty::ctxt, dfcx_moves: &mut MoveDataFlow, dfcx_assign: &mut AssignDataFlow) { - /*! - * Adds the gen/kills for the various moves and - * assignments into the provided data flow contexts. - * Moves are generated by moves and killed by assignments and - * scoping. Assignments are generated by assignment to variables and - * killed by scoping. See `doc.rs` for more details. - */ - for (i, move) in self.moves.borrow().iter().enumerate() { dfcx_moves.add_gen(move.id, i); } @@ -568,14 +549,11 @@ impl<'a> FlowedMoveData<'a> { } } + /// Iterates through each path moved by `id` pub fn each_path_moved_by(&self, id: ast::NodeId, f: |&Move, &LoanPath| -> bool) -> bool { - /*! - * Iterates through each path moved by `id` - */ - self.dfcx_moves.each_gen_bit_frozen(id, |index| { let move = self.move_data.moves.borrow(); let move = move.get(index); @@ -584,12 +562,11 @@ impl<'a> FlowedMoveData<'a> { }) } + /// Returns the kind of a move of `loan_path` by `id`, if one exists. pub fn kind_of_move_of_path(&self, id: ast::NodeId, loan_path: &Rc) -> Option { - //! Returns the kind of a move of `loan_path` by `id`, if one exists. - let mut ret = None; for loan_path_index in self.move_data.path_map.borrow().find(&*loan_path).iter() { self.dfcx_moves.each_gen_bit_frozen(id, |move_index| { @@ -606,18 +583,15 @@ impl<'a> FlowedMoveData<'a> { ret } + /// Iterates through each move of `loan_path` (or some base path + /// of `loan_path`) that *may* have occurred on entry to `id` without + /// an intervening assignment. In other words, any moves that + /// would invalidate a reference to `loan_path` at location `id`. pub fn each_move_of(&self, id: ast::NodeId, loan_path: &Rc, f: |&Move, &LoanPath| -> bool) -> bool { - /*! - * Iterates through each move of `loan_path` (or some base path - * of `loan_path`) that *may* have occurred on entry to `id` without - * an intervening assignment. In other words, any moves that - * would invalidate a reference to `loan_path` at location `id`. - */ - // Bad scenarios: // // 1. Move of `a.b.c`, use of `a.b.c` @@ -665,24 +639,21 @@ impl<'a> FlowedMoveData<'a> { }) } + /// True if `id` is the id of the LHS of an assignment pub fn is_assignee(&self, id: ast::NodeId) -> bool { - //! True if `id` is the id of the LHS of an assignment self.move_data.assignee_ids.borrow().iter().any(|x| x == &id) } + /// Iterates through every assignment to `loan_path` that + /// may have occurred on entry to `id`. `loan_path` must be + /// a single variable. pub fn each_assignment_of(&self, id: ast::NodeId, loan_path: &Rc, f: |&Assignment| -> bool) -> bool { - /*! - * Iterates through every assignment to `loan_path` that - * may have occurred on entry to `id`. `loan_path` must be - * a single variable. - */ - let loan_path_index = { match self.move_data.existing_move_path(loan_path) { Some(i) => i, diff --git a/src/librustc/middle/cfg/mod.rs b/src/librustc/middle/cfg/mod.rs index bb758ec7c38b7..a2e8ba8d65c33 100644 --- a/src/librustc/middle/cfg/mod.rs +++ b/src/librustc/middle/cfg/mod.rs @@ -8,12 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Module that constructs a control-flow graph representing an item. -Uses `Graph` as the underlying representation. - -*/ +//! Module that constructs a control-flow graph representing an item. +//! Uses `Graph` as the underlying representation. use middle::graph; use middle::ty; diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index 5ac85833e221e..4d99eb3bb88eb 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -8,14 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -/*! - * A module for propagating forward dataflow information. The analysis - * assumes that the items to be propagated can be represented as bits - * and thus uses bitvectors. Your job is simply to specify the so-called - * GEN and KILL bits for each expression. - */ - +//! A module for propagating forward dataflow information. The analysis +//! assumes that the items to be propagated can be represented as bits +//! and thus uses bitvectors. Your job is simply to specify the so-called +//! GEN and KILL bits for each expression. use middle::cfg; use middle::cfg::CFGIndex; diff --git a/src/librustc/middle/def.rs b/src/librustc/middle/def.rs index 7ee8b33b1fa67..9b25000044222 100644 --- a/src/librustc/middle/def.rs +++ b/src/librustc/middle/def.rs @@ -45,7 +45,7 @@ pub enum Def { /// - If it's an ExprPath referring to some tuple struct, then DefMap maps /// it to a def whose id is the StructDef.ctor_id. DefStruct(ast::DefId), - DefTyParamBinder(ast::NodeId), /* struct, impl or trait with ty params */ + DefTyParamBinder(ast::NodeId), // struct, impl or trait with ty params DefRegion(ast::NodeId), DefLabel(ast::NodeId), DefMethod(ast::DefId /* method */, Option /* trait */), diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 1e06b3b1fd463..aea2a622430cf 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -8,11 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * A different sort of visitor for walking fn bodies. Unlike the - * normal visitor, which just walks the entire body in one shot, the - * `ExprUseVisitor` determines how expressions are being used. - */ +//! A different sort of visitor for walking fn bodies. Unlike the +//! normal visitor, which just walks the entire body in one shot, the +//! `ExprUseVisitor` determines how expressions are being used. use mc = middle::mem_categorization; use middle::def; @@ -566,12 +564,9 @@ impl<'d,'t,TYPER:mc::Typer> ExprUseVisitor<'d,'t,TYPER> { } } + /// Indicates that the value of `blk` will be consumed, + /// meaning either copied or moved depending on its type. fn walk_block(&mut self, blk: &ast::Block) { - /*! - * Indicates that the value of `blk` will be consumed, - * meaning either copied or moved depending on its type. - */ - debug!("walk_block(blk.id={:?})", blk.id); for stmt in blk.stmts.iter() { @@ -668,16 +663,14 @@ impl<'d,'t,TYPER:mc::Typer> ExprUseVisitor<'d,'t,TYPER> { } } + /// Autoderefs for overloaded Deref calls in fact reference + /// their receiver. That is, if we have `(*x)` where `x` is of + /// type `Rc`, then this in fact is equivalent to + /// `x.deref()`. Since `deref()` is declared with `&self`, this + /// is an autoref of `x`. fn walk_autoderefs(&mut self, expr: &ast::Expr, autoderefs: uint) { - /*! - * Autoderefs for overloaded Deref calls in fact reference - * their receiver. That is, if we have `(*x)` where `x` is of - * type `Rc`, then this in fact is equivalent to - * `x.deref()`. Since `deref()` is declared with `&self`, this - * is an autoref of `x`. - */ debug!("walk_autoderefs expr={} autoderefs={}", expr.repr(self.tcx()), autoderefs); for i in range(0, autoderefs) { diff --git a/src/librustc/middle/graph.rs b/src/librustc/middle/graph.rs index b1f9b0bff9fd2..1fa09ab4b58d6 100644 --- a/src/librustc/middle/graph.rs +++ b/src/librustc/middle/graph.rs @@ -8,31 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -A graph module for use in dataflow, region resolution, and elsewhere. - -# Interface details - -You customize the graph by specifying a "node data" type `N` and an -"edge data" type `E`. You can then later gain access (mutable or -immutable) to these "user-data" bits. Currently, you can only add -nodes or edges to the graph. You cannot remove or modify them once -added. This could be changed if we have a need. - -# Implementation details - -The main tricky thing about this code is the way that edges are -stored. The edges are stored in a central array, but they are also -threaded onto two linked lists for each node, one for incoming edges -and one for outgoing edges. Note that every edge is a member of some -incoming list and some outgoing list. Basically you can load the -first index of the linked list from the node data structures (the -field `first_edge`) and then, for each edge, load the next index from -the field `next_edge`). Each of those fields is an array that should -be indexed by the direction (see the type `Direction`). - -*/ +//! A graph module for use in dataflow, region resolution, and elsewhere. +//! +//! # Interface details +//! +//! You customize the graph by specifying a "node data" type `N` and an +//! "edge data" type `E`. You can then later gain access (mutable or +//! immutable) to these "user-data" bits. Currently, you can only add +//! nodes or edges to the graph. You cannot remove or modify them once +//! added. This could be changed if we have a need. +//! +//! # Implementation details +//! +//! The main tricky thing about this code is the way that edges are +//! stored. The edges are stored in a central array, but they are also +//! threaded onto two linked lists for each node, one for incoming edges +//! and one for outgoing edges. Note that every edge is a member of some +//! incoming list and some outgoing list. Basically you can load the +//! first index of the linked list from the node data structures (the +//! field `first_edge`) and then, for each edge, load the next index from +//! the field `next_edge`). Each of those fields is an array that should +//! be indexed by the direction (see the type `Direction`). #![allow(dead_code)] // still WIP diff --git a/src/librustc/middle/kind.rs b/src/librustc/middle/kind.rs index a7154e78bc586..37abe485d1ef5 100644 --- a/src/librustc/middle/kind.rs +++ b/src/librustc/middle/kind.rs @@ -625,7 +625,7 @@ pub fn check_cast_for_escaping_regions( // possibly escape the enclosing fn item (note that all type parameters // must have been declared on the enclosing fn item). if target_regions.iter().any(|r| is_ReScope(*r)) { - return; /* case (1) */ + return; // case (1) } // Assuming the trait instance can escape, then ensure that each parameter @@ -658,9 +658,9 @@ pub fn check_cast_for_escaping_regions( // except for historical accident. Bottom // line, we need proper region bounding. } else if target_params.iter().any(|x| x == &source_param) { - /* case (2) */ + // case (2) } else { - check_static(cx.tcx, ty, source_span); /* case (3) */ + check_static(cx.tcx, ty, source_span); // case (3) } } _ => {} diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index a33062457629b..a3681f8735717 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -8,99 +8,97 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * A classic liveness analysis based on dataflow over the AST. Computes, - * for each local variable in a function, whether that variable is live - * at a given point. Program execution points are identified by their - * id. - * - * # Basic idea - * - * The basic model is that each local variable is assigned an index. We - * represent sets of local variables using a vector indexed by this - * index. The value in the vector is either 0, indicating the variable - * is dead, or the id of an expression that uses the variable. - * - * We conceptually walk over the AST in reverse execution order. If we - * find a use of a variable, we add it to the set of live variables. If - * we find an assignment to a variable, we remove it from the set of live - * variables. When we have to merge two flows, we take the union of - * those two flows---if the variable is live on both paths, we simply - * pick one id. In the event of loops, we continue doing this until a - * fixed point is reached. - * - * ## Checking initialization - * - * At the function entry point, all variables must be dead. If this is - * not the case, we can report an error using the id found in the set of - * live variables, which identifies a use of the variable which is not - * dominated by an assignment. - * - * ## Checking moves - * - * After each explicit move, the variable must be dead. - * - * ## Computing last uses - * - * Any use of the variable where the variable is dead afterwards is a - * last use. - * - * # Implementation details - * - * The actual implementation contains two (nested) walks over the AST. - * The outer walk has the job of building up the ir_maps instance for the - * enclosing function. On the way down the tree, it identifies those AST - * nodes and variable IDs that will be needed for the liveness analysis - * and assigns them contiguous IDs. The liveness id for an AST node is - * called a `live_node` (it's a newtype'd uint) and the id for a variable - * is called a `variable` (another newtype'd uint). - * - * On the way back up the tree, as we are about to exit from a function - * declaration we allocate a `liveness` instance. Now that we know - * precisely how many nodes and variables we need, we can allocate all - * the various arrays that we will need to precisely the right size. We then - * perform the actual propagation on the `liveness` instance. - * - * This propagation is encoded in the various `propagate_through_*()` - * methods. It effectively does a reverse walk of the AST; whenever we - * reach a loop node, we iterate until a fixed point is reached. - * - * ## The `Users` struct - * - * At each live node `N`, we track three pieces of information for each - * variable `V` (these are encapsulated in the `Users` struct): - * - * - `reader`: the `LiveNode` ID of some node which will read the value - * that `V` holds on entry to `N`. Formally: a node `M` such - * that there exists a path `P` from `N` to `M` where `P` does not - * write `V`. If the `reader` is `invalid_node()`, then the current - * value will never be read (the variable is dead, essentially). - * - * - `writer`: the `LiveNode` ID of some node which will write the - * variable `V` and which is reachable from `N`. Formally: a node `M` - * such that there exists a path `P` from `N` to `M` and `M` writes - * `V`. If the `writer` is `invalid_node()`, then there is no writer - * of `V` that follows `N`. - * - * - `used`: a boolean value indicating whether `V` is *used*. We - * distinguish a *read* from a *use* in that a *use* is some read that - * is not just used to generate a new value. For example, `x += 1` is - * a read but not a use. This is used to generate better warnings. - * - * ## Special Variables - * - * We generate various special variables for various, well, special purposes. - * These are described in the `specials` struct: - * - * - `exit_ln`: a live node that is generated to represent every 'exit' from - * the function, whether it be by explicit return, fail, or other means. - * - * - `fallthrough_ln`: a live node that represents a fallthrough - * - * - `no_ret_var`: a synthetic variable that is only 'read' from, the - * fallthrough node. This allows us to detect functions where we fail - * to return explicitly. - */ +//! A classic liveness analysis based on dataflow over the AST. Computes, +//! for each local variable in a function, whether that variable is live +//! at a given point. Program execution points are identified by their +//! id. +//! +//! # Basic idea +//! +//! The basic model is that each local variable is assigned an index. We +//! represent sets of local variables using a vector indexed by this +//! index. The value in the vector is either 0, indicating the variable +//! is dead, or the id of an expression that uses the variable. +//! +//! We conceptually walk over the AST in reverse execution order. If we +//! find a use of a variable, we add it to the set of live variables. If +//! we find an assignment to a variable, we remove it from the set of live +//! variables. When we have to merge two flows, we take the union of +//! those two flows---if the variable is live on both paths, we simply +//! pick one id. In the event of loops, we continue doing this until a +//! fixed point is reached. +//! +//! ## Checking initialization +//! +//! At the function entry point, all variables must be dead. If this is +//! not the case, we can report an error using the id found in the set of +//! live variables, which identifies a use of the variable which is not +//! dominated by an assignment. +//! +//! ## Checking moves +//! +//! After each explicit move, the variable must be dead. +//! +//! ## Computing last uses +//! +//! Any use of the variable where the variable is dead afterwards is a +//! last use. +//! +//! # Implementation details +//! +//! The actual implementation contains two (nested) walks over the AST. +//! The outer walk has the job of building up the ir_maps instance for the +//! enclosing function. On the way down the tree, it identifies those AST +//! nodes and variable IDs that will be needed for the liveness analysis +//! and assigns them contiguous IDs. The liveness id for an AST node is +//! called a `live_node` (it's a newtype'd uint) and the id for a variable +//! is called a `variable` (another newtype'd uint). +//! +//! On the way back up the tree, as we are about to exit from a function +//! declaration we allocate a `liveness` instance. Now that we know +//! precisely how many nodes and variables we need, we can allocate all +//! the various arrays that we will need to precisely the right size. We then +//! perform the actual propagation on the `liveness` instance. +//! +//! This propagation is encoded in the various `propagate_through_*()` +//! methods. It effectively does a reverse walk of the AST; whenever we +//! reach a loop node, we iterate until a fixed point is reached. +//! +//! ## The `Users` struct +//! +//! At each live node `N`, we track three pieces of information for each +//! variable `V` (these are encapsulated in the `Users` struct): +//! +//! - `reader`: the `LiveNode` ID of some node which will read the value +//! that `V` holds on entry to `N`. Formally: a node `M` such +//! that there exists a path `P` from `N` to `M` where `P` does not +//! write `V`. If the `reader` is `invalid_node()`, then the current +//! value will never be read (the variable is dead, essentially). +//! +//! - `writer`: the `LiveNode` ID of some node which will write the +//! variable `V` and which is reachable from `N`. Formally: a node `M` +//! such that there exists a path `P` from `N` to `M` and `M` writes +//! `V`. If the `writer` is `invalid_node()`, then there is no writer +//! of `V` that follows `N`. +//! +//! - `used`: a boolean value indicating whether `V` is *used*. We +//! distinguish a *read* from a *use* in that a *use* is some read that +//! is not just used to generate a new value. For example, `x += 1` is +//! a read but not a use. This is used to generate better warnings. +//! +//! ## Special Variables +//! +//! We generate various special variables for various, well, special purposes. +//! These are described in the `specials` struct: +//! +//! - `exit_ln`: a live node that is generated to represent every 'exit' from +//! the function, whether it be by explicit return, fail, or other means. +//! +//! - `fallthrough_ln`: a live node that represents a fallthrough +//! +//! - `no_ret_var`: a synthetic variable that is only 'read' from, the +//! fallthrough node. This allows us to detect functions where we fail +//! to return explicitly. use middle::def::*; use middle::freevars; @@ -629,9 +627,7 @@ impl<'a> Liveness<'a> { if reader.is_valid() {Some(self.ir.lnk(reader))} else {None} } - /* - Is this variable live on entry to any of its successor nodes? - */ + // Is this variable live on entry to any of its successor nodes? fn live_on_exit(&self, ln: LiveNode, var: Variable) -> Option { let successor = *self.successors.get(ln.get()); @@ -944,10 +940,8 @@ impl<'a> Liveness<'a> { ExprFnBlock(_, ref blk) | ExprProc(_, ref blk) => { debug!("{} is an ExprFnBlock or ExprProc", expr_to_str(expr)); - /* - The next-node for a break is the successor of the entire - loop. The next-node for a continue is the top of this loop. - */ + // The next-node for a break is the successor of the entire + // loop. The next-node for a continue is the top of this loop. let node = self.live_node(expr.id, expr.span); self.with_loop_nodes(blk.id, succ, node, |this| { @@ -1282,25 +1276,19 @@ impl<'a> Liveness<'a> { body: &Block, succ: LiveNode) -> LiveNode { - - /* - - We model control flow like this: - - (cond) <--+ - | | - v | - +-- (expr) | - | | | - | v | - | (body) ---+ - | - | - v - (succ) - - */ - + // We model control flow like this: + // + // (cond) <--+ + // | | + // v | + // +-- (expr) | + // | | | + // | v | + // | (body) ---+ + // | + // | + // v + // (succ) // first iteration: let mut first_merge = true; diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 54cca082e0de8..649c39e2d26d3 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -8,57 +8,55 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * # Categorization - * - * The job of the categorization module is to analyze an expression to - * determine what kind of memory is used in evaluating it (for example, - * where dereferences occur and what kind of pointer is dereferenced; - * whether the memory is mutable; etc) - * - * Categorization effectively transforms all of our expressions into - * expressions of the following forms (the actual enum has many more - * possibilities, naturally, but they are all variants of these base - * forms): - * - * E = rvalue // some computed rvalue - * | x // address of a local variable or argument - * | *E // deref of a ptr - * | E.comp // access to an interior component - * - * Imagine a routine ToAddr(Expr) that evaluates an expression and returns an - * address where the result is to be found. If Expr is an lvalue, then this - * is the address of the lvalue. If Expr is an rvalue, this is the address of - * some temporary spot in memory where the result is stored. - * - * Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr) - * as follows: - * - * - cat: what kind of expression was this? This is a subset of the - * full expression forms which only includes those that we care about - * for the purpose of the analysis. - * - mutbl: mutability of the address A - * - ty: the type of data found at the address A - * - * The resulting categorization tree differs somewhat from the expressions - * themselves. For example, auto-derefs are explicit. Also, an index a[b] is - * decomposed into two operations: a dereference to reach the array data and - * then an index to jump forward to the relevant item. - * - * ## By-reference upvars - * - * One part of the translation which may be non-obvious is that we translate - * closure upvars into the dereference of a borrowed pointer; this more closely - * resembles the runtime translation. So, for example, if we had: - * - * let mut x = 3; - * let y = 5; - * let inc = || x += y; - * - * Then when we categorize `x` (*within* the closure) we would yield a - * result of `*x'`, effectively, where `x'` is a `cat_upvar` reference - * tied to `x`. The type of `x'` will be a borrowed pointer. - */ +//! # Categorization +//! +//! The job of the categorization module is to analyze an expression to +//! determine what kind of memory is used in evaluating it (for example, +//! where dereferences occur and what kind of pointer is dereferenced; +//! whether the memory is mutable; etc) +//! +//! Categorization effectively transforms all of our expressions into +//! expressions of the following forms (the actual enum has many more +//! possibilities, naturally, but they are all variants of these base +//! forms): +//! +//! E = rvalue // some computed rvalue +//! | x // address of a local variable or argument +//! | *E // deref of a ptr +//! | E.comp // access to an interior component +//! +//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an +//! address where the result is to be found. If Expr is an lvalue, then this +//! is the address of the lvalue. If Expr is an rvalue, this is the address of +//! some temporary spot in memory where the result is stored. +//! +//! Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr) +//! as follows: +//! +//! - cat: what kind of expression was this? This is a subset of the +//! full expression forms which only includes those that we care about +//! for the purpose of the analysis. +//! - mutbl: mutability of the address A +//! - ty: the type of data found at the address A +//! +//! The resulting categorization tree differs somewhat from the expressions +//! themselves. For example, auto-derefs are explicit. Also, an index a[b] is +//! decomposed into two operations: a dereference to reach the array data and +//! then an index to jump forward to the relevant item. +//! +//! ## By-reference upvars +//! +//! One part of the translation which may be non-obvious is that we translate +//! closure upvars into the dereference of a borrowed pointer; this more closely +//! resembles the runtime translation. So, for example, if we had: +//! +//! let mut x = 3; +//! let y = 5; +//! let inc = || x += y; +//! +//! Then when we categorize `x` (*within* the closure) we would yield a +//! result of `*x'`, effectively, where `x'` is a `cat_upvar` reference +//! tied to `x`. The type of `x'` will be a borrowed pointer. #![allow(non_camel_case_types)] @@ -243,24 +241,22 @@ pub struct MemCategorizationContext<'t,TYPER> { pub type McResult = Result; -/** - * The `Typer` trait provides the interface for the mem-categorization - * module to the results of the type check. It can be used to query - * the type assigned to an expression node, to inquire after adjustments, - * and so on. - * - * This interface is needed because mem-categorization is used from - * two places: `regionck` and `borrowck`. `regionck` executes before - * type inference is complete, and hence derives types and so on from - * intermediate tables. This also implies that type errors can occur, - * and hence `node_ty()` and friends return a `Result` type -- any - * error will propagate back up through the mem-categorization - * routines. - * - * In the borrow checker, in contrast, type checking is complete and we - * know that no errors have occurred, so we simply consult the tcx and we - * can be sure that only `Ok` results will occur. - */ +/// The `Typer` trait provides the interface for the mem-categorization +/// module to the results of the type check. It can be used to query +/// the type assigned to an expression node, to inquire after adjustments, +/// and so on. +/// +/// This interface is needed because mem-categorization is used from +/// two places: `regionck` and `borrowck`. `regionck` executes before +/// type inference is complete, and hence derives types and so on from +/// intermediate tables. This also implies that type errors can occur, +/// and hence `node_ty()` and friends return a `Result` type -- any +/// error will propagate back up through the mem-categorization +/// routines. +/// +/// In the borrow checker, in contrast, type checking is complete and we +/// know that no errors have occurred, so we simply consult the tcx and we +/// can be sure that only `Ok` results will occur. pub trait Typer { fn tcx<'a>(&'a self) -> &'a ty::ctxt; fn node_ty(&self, id: ast::NodeId) -> McResult; @@ -596,22 +592,19 @@ impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> { } } + /// Upvars through a closure are in fact indirect + /// references. That is, when a closure refers to a + /// variable from a parent stack frame like `x = 10`, + /// that is equivalent to `*x_ = 10` where `x_` is a + /// borrowed pointer (`&mut x`) created when the closure + /// was created and store in the environment. This + /// equivalence is expose in the mem-categorization. fn cat_upvar(&self, id: ast::NodeId, span: Span, var_id: ast::NodeId, fn_node_id: ast::NodeId) -> McResult { - /*! - * Upvars through a closure are in fact indirect - * references. That is, when a closure refers to a - * variable from a parent stack frame like `x = 10`, - * that is equivalent to `*x_ = 10` where `x_` is a - * borrowed pointer (`&mut x`) created when the closure - * was created and store in the environment. This - * equivalence is expose in the mem-categorization. - */ - let upvar_id = ty::UpvarId { var_id: var_id, closure_expr_id: fn_node_id }; @@ -844,20 +837,17 @@ impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> { } } + /// Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is + /// the cmt for `P`, `slice_pat` is the pattern `Q`, returns: + /// - a cmt for `Q` + /// - the mutability and region of the slice `Q` + /// + /// These last two bits of info happen to be things that + /// borrowck needs. pub fn cat_slice_pattern(&self, vec_cmt: cmt, slice_pat: &ast::Pat) -> McResult<(cmt, ast::Mutability, ty::Region)> { - /*! - * Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is - * the cmt for `P`, `slice_pat` is the pattern `Q`, returns: - * - a cmt for `Q` - * - the mutability and region of the slice `Q` - * - * These last two bits of info happen to be things that - * borrowck needs. - */ - let slice_ty = if_ok!(self.node_ty(slice_pat.id)); let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(), slice_pat, @@ -865,17 +855,14 @@ impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> { let cmt_slice = self.cat_index(slice_pat, vec_cmt, 0); return Ok((cmt_slice, slice_mutbl, slice_r)); + /// In a pattern like [a, b, ..c], normally `c` has slice type, + /// but if you have [a, b, ..ref c], then the type of `ref c` + /// will be `&&[]`, so to extract the slice details we have + /// to recurse through rptrs. fn vec_slice_info(tcx: &ty::ctxt, pat: &ast::Pat, slice_ty: ty::t) -> (ast::Mutability, ty::Region) { - /*! - * In a pattern like [a, b, ..c], normally `c` has slice type, - * but if you have [a, b, ..ref c], then the type of `ref c` - * will be `&&[]`, so to extract the slice details we have - * to recurse through rptrs. - */ - match ty::get(slice_ty).sty { ty::ty_rptr(r, ref mt) => match ty::get(mt.ty).sty { ty::ty_vec(slice_mt, None) => (slice_mt.mutbl, r), @@ -1086,7 +1073,7 @@ impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> { } ast::PatLit(_) | ast::PatRange(_, _) => { - /*always ok*/ + // always ok } ast::PatMac(_) => { @@ -1192,12 +1179,9 @@ impl cmt_ { } } + /// Returns `Some(_)` if this lvalue represents a freely aliasable + /// pointer type. pub fn freely_aliasable(&self, ctxt: &ty::ctxt) -> Option { - /*! - * Returns `Some(_)` if this lvalue represents a freely aliasable - * pointer type. - */ - // Maybe non-obvious: copied upvars can only be considered // non-aliasable in once closures, since any other kind can be // aliased and eventually recused. diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 3b59736e292b5..a57fc0ae0f938 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -8,18 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -This file actually contains two passes related to regions. The first -pass builds up the `scope_map`, which describes the parent links in -the region hierarchy. The second pass infers which types must be -region parameterized. - -Most of the documentation on regions can be found in -`middle/typeck/infer/region_inference.rs` - -*/ - +//! This file actually contains two passes related to regions. The first +//! pass builds up the `scope_map`, which describes the parent links in +//! the region hierarchy. The second pass infers which types must be +//! region parameterized. +//! +//! Most of the documentation on regions can be found in +//! `middle/typeck/infer/region_inference.rs` use driver::session::Session; use middle::ty::{FreeRegion}; @@ -35,46 +30,44 @@ use syntax::visit::{Visitor, FnKind}; use syntax::ast::{Block, Item, FnDecl, NodeId, Arm, Pat, Stmt, Expr, Local}; use syntax::ast_util::{stmt_id}; -/** -The region maps encode information about region relationships. - -- `scope_map` maps from a scope id to the enclosing scope id; this is - usually corresponding to the lexical nesting, though in the case of - closures the parent scope is the innermost conditional expression or repeating - block - -- `var_map` maps from a variable or binding id to the block in which - that variable is declared. - -- `free_region_map` maps from a free region `a` to a list of free - regions `bs` such that `a <= b for all b in bs` - - the free region map is populated during type check as we check - each function. See the function `relate_free_regions` for - more information. - -- `rvalue_scopes` includes entries for those expressions whose cleanup - scope is larger than the default. The map goes from the expression - id to the cleanup scope id. For rvalues not present in this table, - the appropriate cleanup scope is the innermost enclosing statement, - conditional expression, or repeating block (see `terminating_scopes`). - -- `terminating_scopes` is a set containing the ids of each statement, - or conditional/repeating expression. These scopes are calling "terminating - scopes" because, when attempting to find the scope of a temporary, by - default we search up the enclosing scopes until we encounter the - terminating scope. A conditional/repeating - expression is one which is not guaranteed to execute exactly once - upon entering the parent scope. This could be because the expression - only executes conditionally, such as the expression `b` in `a && b`, - or because the expression may execute many times, such as a loop - body. The reason that we distinguish such expressions is that, upon - exiting the parent scope, we cannot statically know how many times - the expression executed, and thus if the expression creates - temporaries we cannot know statically how many such temporaries we - would have to cleanup. Therefore we ensure that the temporaries never - outlast the conditional/repeating expression, preventing the need - for dynamic checks and/or arbitrary amounts of stack space. -*/ +/// The region maps encode information about region relationships. +/// +/// - `scope_map` maps from a scope id to the enclosing scope id; this is +/// usually corresponding to the lexical nesting, though in the case of +/// closures the parent scope is the innermost conditional expression or repeating +/// block +/// +/// - `var_map` maps from a variable or binding id to the block in which +/// that variable is declared. +/// +/// - `free_region_map` maps from a free region `a` to a list of free +/// regions `bs` such that `a <= b for all b in bs` +/// - the free region map is populated during type check as we check +/// each function. See the function `relate_free_regions` for +/// more information. +/// +/// - `rvalue_scopes` includes entries for those expressions whose cleanup +/// scope is larger than the default. The map goes from the expression +/// id to the cleanup scope id. For rvalues not present in this table, +/// the appropriate cleanup scope is the innermost enclosing statement, +/// conditional expression, or repeating block (see `terminating_scopes`). +/// +/// - `terminating_scopes` is a set containing the ids of each statement, +/// or conditional/repeating expression. These scopes are calling "terminating +/// scopes" because, when attempting to find the scope of a temporary, by +/// default we search up the enclosing scopes until we encounter the +/// terminating scope. A conditional/repeating +/// expression is one which is not guaranteed to execute exactly once +/// upon entering the parent scope. This could be because the expression +/// only executes conditionally, such as the expression `b` in `a && b`, +/// or because the expression may execute many times, such as a loop +/// body. The reason that we distinguish such expressions is that, upon +/// exiting the parent scope, we cannot statically know how many times +/// the expression executed, and thus if the expression creates +/// temporaries we cannot know statically how many such temporaries we +/// would have to cleanup. Therefore we ensure that the temporaries never +/// outlast the conditional/repeating expression, preventing the need +/// for dynamic checks and/or arbitrary amounts of stack space. pub struct RegionMaps { scope_map: RefCell>, var_map: RefCell>, @@ -133,14 +126,11 @@ impl RegionMaps { self.rvalue_scopes.borrow_mut().insert(var, lifetime); } + /// Records that a scope is a TERMINATING SCOPE. Whenever we + /// create automatic temporaries -- e.g. by an + /// expression like `a().f` -- they will be freed within + /// the innermost terminating scope. pub fn mark_as_terminating_scope(&self, scope_id: ast::NodeId) { - /*! - * Records that a scope is a TERMINATING SCOPE. Whenever we - * create automatic temporaries -- e.g. by an - * expression like `a().f` -- they will be freed within - * the innermost terminating scope. - */ - debug!("record_terminating_scope(scope_id={})", scope_id); self.terminating_scopes.borrow_mut().insert(scope_id); } @@ -159,10 +149,8 @@ impl RegionMaps { } } + /// Returns the lifetime of the local variable `var_id` pub fn var_scope(&self, var_id: ast::NodeId) -> ast::NodeId { - /*! - * Returns the lifetime of the local variable `var_id` - */ match self.var_map.borrow().find(&var_id) { Some(&r) => r, None => { fail!("no enclosing scope for id {}", var_id); } @@ -217,15 +205,12 @@ impl RegionMaps { self.is_subscope_of(scope2, scope1) } + /// Returns true if `subscope` is equal to or is lexically + /// nested inside `superscope` and false otherwise. pub fn is_subscope_of(&self, subscope: ast::NodeId, superscope: ast::NodeId) -> bool { - /*! - * Returns true if `subscope` is equal to or is lexically - * nested inside `superscope` and false otherwise. - */ - let mut s = subscope; while superscope != s { match self.scope_map.borrow().find(&s) { @@ -245,14 +230,11 @@ impl RegionMaps { return true; } + /// Determines whether two free regions have a subregion relationship + /// by walking the graph encoded in `free_region_map`. Note that + /// it is possible that `sub != sup` and `sub <= sup` and `sup <= sub` + /// (that is, the user can give two different names to the same lifetime). pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool { - /*! - * Determines whether two free regions have a subregion relationship - * by walking the graph encoded in `free_region_map`. Note that - * it is possible that `sub != sup` and `sub <= sup` and `sup <= sub` - * (that is, the user can give two different names to the same lifetime). - */ - if sub == sup { return true; } @@ -283,16 +265,13 @@ impl RegionMaps { return false; } + /// Determines whether one region is a subregion of another. This is + /// intended to run *after inference* and sadly the logic is somewhat + /// duplicated with the code in infer.rs. pub fn is_subregion_of(&self, sub_region: ty::Region, super_region: ty::Region) -> bool { - /*! - * Determines whether one region is a subregion of another. This is - * intended to run *after inference* and sadly the logic is somewhat - * duplicated with the code in infer.rs. - */ - debug!("is_subregion_of(sub_region={:?}, super_region={:?})", sub_region, super_region); @@ -321,16 +300,13 @@ impl RegionMaps { } } + /// Finds the nearest common ancestor (if any) of two scopes. That + /// is, finds the smallest scope which is greater than or equal to + /// both `scope_a` and `scope_b`. pub fn nearest_common_ancestor(&self, scope_a: ast::NodeId, scope_b: ast::NodeId) -> Option { - /*! - * Finds the nearest common ancestor (if any) of two scopes. That - * is, finds the smallest scope which is greater than or equal to - * both `scope_a` and `scope_b`. - */ - if scope_a == scope_b { return Some(scope_a); } let a_ancestors = ancestors_of(self, scope_a); @@ -642,18 +618,15 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, visit::walk_local(visitor, local, cx); + /// True if `pat` match the `P&` nonterminal: + /// + /// P& = ref X + /// | StructName { ..., P&, ... } + /// | VariantName(..., P&, ...) + /// | [ ..., P&, ... ] + /// | ( ..., P&, ... ) + /// | box P& fn is_binding_pat(pat: &ast::Pat) -> bool { - /*! - * True if `pat` match the `P&` nonterminal: - * - * P& = ref X - * | StructName { ..., P&, ... } - * | VariantName(..., P&, ...) - * | [ ..., P&, ... ] - * | ( ..., P&, ... ) - * | box P& - */ - match pat.node { ast::PatIdent(ast::BindByRef(_), _, _) => true, @@ -680,35 +653,29 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, } } + /// True if `ty` is a borrowed pointer type + /// like `&int` or `&[...]`. fn is_borrowed_ty(ty: &ast::Ty) -> bool { - /*! - * True if `ty` is a borrowed pointer type - * like `&int` or `&[...]`. - */ - match ty.node { ast::TyRptr(..) => true, _ => false } } + /// If `expr` matches the `E&` grammar, then records an extended + /// rvalue scope as appropriate: + /// + /// E& = & ET + /// | StructName { ..., f: E&, ... } + /// | [ ..., E&, ... ] + /// | ( ..., E&, ... ) + /// | {...; E&} + /// | box E& + /// | E& as ... + /// | ( E& ) fn record_rvalue_scope_if_borrow_expr(visitor: &mut RegionResolutionVisitor, expr: &ast::Expr, blk_id: ast::NodeId) { - /*! - * If `expr` matches the `E&` grammar, then records an extended - * rvalue scope as appropriate: - * - * E& = & ET - * | StructName { ..., f: E&, ... } - * | [ ..., E&, ... ] - * | ( ..., E&, ... ) - * | {...; E&} - * | box E& - * | E& as ... - * | ( E& ) - */ - match expr.node { ast::ExprAddrOf(_, ref subexpr) => { record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id); @@ -752,29 +719,26 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, } } + /// Applied to an expression `expr` if `expr` -- or something + /// owned or partially owned by `expr` -- is going to be + /// indirectly referenced by a variable in a let statement. In + /// that case, the "temporary lifetime" or `expr` is extended + /// to be the block enclosing the `let` statement. + /// + /// More formally, if `expr` matches the grammar `ET`, record + /// the rvalue scope of the matching `` as `blk_id`: + /// + /// ET = *ET + /// | ET[...] + /// | ET.f + /// | (ET) + /// | + /// + /// Note: ET is intended to match "rvalues or + /// lvalues based on rvalues". fn record_rvalue_scope<'a>(visitor: &mut RegionResolutionVisitor, expr: &'a ast::Expr, blk_id: ast::NodeId) { - /*! - * Applied to an expression `expr` if `expr` -- or something - * owned or partially owned by `expr` -- is going to be - * indirectly referenced by a variable in a let statement. In - * that case, the "temporary lifetime" or `expr` is extended - * to be the block enclosing the `let` statement. - * - * More formally, if `expr` matches the grammar `ET`, record - * the rvalue scope of the matching `` as `blk_id`: - * - * ET = *ET - * | ET[...] - * | ET.f - * | (ET) - * | - * - * Note: ET is intended to match "rvalues or - * lvalues based on rvalues". - */ - let mut expr = expr; loop { // Note: give all the expressions matching `ET` with the diff --git a/src/librustc/middle/resolve.rs b/src/librustc/middle/resolve.rs index b7b4618a79046..fd35dc8c70f39 100644 --- a/src/librustc/middle/resolve.rs +++ b/src/librustc/middle/resolve.rs @@ -668,10 +668,8 @@ impl NameBindings { } } - /** - * Returns the module node. Fails if this node does not have a module - * definition. - */ + /// Returns the module node. Fails if this node does not have a module + /// definition. fn get_module(&self) -> Rc { match self.get_module_if_available() { None => { @@ -975,16 +973,14 @@ impl<'a> Resolver<'a> { visit::walk_crate(&mut visitor, krate, initial_parent); } - /** - * Adds a new child item to the module definition of the parent node and - * returns its corresponding name bindings as well as the current parent. - * Or, if we're inside a block, creates (or reuses) an anonymous module - * corresponding to the innermost block ID and returns the name bindings - * as well as the newly-created parent. - * - * If this node does not have a module definition and we are not inside - * a block, fails. - */ + /// Adds a new child item to the module definition of the parent node and + /// returns its corresponding name bindings as well as the current parent. + /// Or, if we're inside a block, creates (or reuses) an anonymous module + /// corresponding to the innermost block ID and returns the name bindings + /// as well as the newly-created parent. + /// + /// If this node does not have a module definition and we are not inside + /// a block, fails. fn add_child(&self, name: Ident, reduced_graph_parent: ReducedGraphParent, diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index 8ff5331cec231..22d8b7f825164 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -8,14 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Name resolution for lifetimes. - * - * Name resolution for lifetimes follows MUCH simpler rules than the - * full resolve. For example, lifetime names are never exported or - * used between functions, and they operate in a purely top-down - * way. Therefore we break lifetime name resolution into a separate pass. - */ +//! Name resolution for lifetimes. +//! +//! Name resolution for lifetimes follows MUCH simpler rules than the +//! full resolve. For example, lifetime names are never exported or +//! used between functions, and they operate in a purely top-down +//! way. Therefore we break lifetime name resolution into a separate pass. use driver::session::Session; use middle::subst; @@ -175,35 +173,33 @@ impl<'a, 'b> Visitor> for LifetimeContext<'b> { impl<'a> LifetimeContext<'a> { /// Visits self by adding a scope and handling recursive walk over the contents with `walk`. + /// + /// Handles visiting fns and methods. These are a bit + /// complicated because we must distinguish early- vs late-bound + /// lifetime parameters. We do this by checking which lifetimes + /// appear within type bounds; those are early bound lifetimes, + /// and the rest are late bound. + /// + /// For example: + /// + /// fn foo<'a,'b,'c,T:Trait<'b>>(...) + /// + /// Here `'a` and `'c` are late bound but `'b` is early + /// bound. Note that early- and late-bound lifetimes may be + /// interspersed together. + /// + /// If early bound lifetimes are present, we separate them into + /// their own list (and likewise for late bound). They will be + /// numbered sequentially, starting from the lowest index that + /// is already in scope (for a fn item, that will be 0, but for + /// a method it might not be). Late bound lifetimes are + /// resolved by name and associated with a binder id (`n`), so + /// the ordering is not important there. fn visit_fn_decl(&mut self, n: ast::NodeId, generics: &ast::Generics, scope: Scope, walk: |&mut LifetimeContext, Scope|) { - /*! - * Handles visiting fns and methods. These are a bit - * complicated because we must distinguish early- vs late-bound - * lifetime parameters. We do this by checking which lifetimes - * appear within type bounds; those are early bound lifetimes, - * and the rest are late bound. - * - * For example: - * - * fn foo<'a,'b,'c,T:Trait<'b>>(...) - * - * Here `'a` and `'c` are late bound but `'b` is early - * bound. Note that early- and late-bound lifetimes may be - * interspersed together. - * - * If early bound lifetimes are present, we separate them into - * their own list (and likewise for late bound). They will be - * numbered sequentially, starting from the lowest index that - * is already in scope (for a fn item, that will be 0, but for - * a method it might not be). Late bound lifetimes are - * resolved by name and associated with a binder id (`n`), so - * the ordering is not important there. - */ - self.check_lifetime_names(&generics.lifetimes); let referenced_idents = free_lifetimes(&generics.ty_params); @@ -404,14 +400,11 @@ pub fn early_bound_lifetimes<'a>(generics: &'a ast::Generics) -> Vec) -> Vec { - /*! - * Gathers up and returns the names of any lifetimes that appear - * free in `ty_params`. Of course, right now, all lifetimes appear - * free, since we don't currently have any binders in type parameter - * declarations; just being forwards compatible with future extensions. - */ - let mut collector = FreeLifetimeCollector { names: vec!() }; for ty_param in ty_params.iter() { visit::walk_ty_param_bounds(&mut collector, &ty_param.bounds, ()); diff --git a/src/librustc/middle/subst.rs b/src/librustc/middle/subst.rs index 4684bd3532ec1..e24ac1d63dbf0 100644 --- a/src/librustc/middle/subst.rs +++ b/src/librustc/middle/subst.rs @@ -77,22 +77,19 @@ impl HomogeneousTuple3 for (T, T, T) { /////////////////////////////////////////////////////////////////////////// -/** - * A substitution mapping type/region parameters to new values. We - * identify each in-scope parameter by an *index* and a *parameter - * space* (which indices where the parameter is defined; see - * `ParamSpace`). - */ +/// A substitution mapping type/region parameters to new values. We +/// identify each in-scope parameter by an *index* and a *parameter +/// space* (which indices where the parameter is defined; see +/// `ParamSpace`). #[deriving(Clone, PartialEq, Eq, Hash)] pub struct Substs { pub types: VecPerParamSpace, pub regions: RegionSubsts, } -/** - * Represents the values to use when substituting lifetime parameters. - * If the value is `ErasedRegions`, then this subst is occurring during - * trans, and all region parameters will be replaced with `ty::ReStatic`. */ +/// Represents the values to use when substituting lifetime parameters. +/// If the value is `ErasedRegions`, then this subst is occurring during +/// trans, and all region parameters will be replaced with `ty::ReStatic`. #[deriving(Clone, PartialEq, Eq, Hash)] pub enum RegionSubsts { ErasedRegions, @@ -163,26 +160,20 @@ impl Substs { s } + /// Since ErasedRegions are only to be used in trans, most of + /// the compiler can use this method to easily access the set + /// of region substitutions. pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace { - /*! - * Since ErasedRegions are only to be used in trans, most of - * the compiler can use this method to easily access the set - * of region substitutions. - */ - match self.regions { ErasedRegions => fail!("Erased regions only expected in trans"), NonerasedRegions(ref r) => r } } + /// Since ErasedRegions are only to be used in trans, most of + /// the compiler can use this method to easily access the set + /// of region substitutions. pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace { - /*! - * Since ErasedRegions are only to be used in trans, most of - * the compiler can use this method to easily access the set - * of region substitutions. - */ - match self.regions { ErasedRegions => fail!("Erased regions only expected in trans"), NonerasedRegions(ref mut r) => r @@ -253,11 +244,9 @@ impl ParamSpace { } } -/** - * Vector of things sorted by param space. Used to keep - * the set of things declared on the type, self, or method - * distinct. - */ +/// Vector of things sorted by param space. Used to keep +/// the set of things declared on the type, self, or method +/// distinct. #[deriving(PartialEq, Eq, Clone, Hash, Encodable, Decodable)] pub struct VecPerParamSpace { // This was originally represented as a tuple with one Vec for @@ -442,16 +431,13 @@ impl VecPerParamSpace { self.get_slice(FnSpace).iter().map(|p| pred(p)).collect()) } + /// Executes the map but in reverse order. For hacky reasons, we rely + /// on this in table. + /// + /// FIXME(#5527) -- order of eval becomes irrelevant with newer + /// trait reform, which features an idempotent algorithm that + /// can be run to a fixed point pub fn map_rev(&self, pred: |&T| -> U) -> VecPerParamSpace { - /*! - * Executes the map but in reverse order. For hacky reasons, we rely - * on this in table. - * - * FIXME(#5527) -- order of eval becomes irrelevant with newer - * trait reform, which features an idempotent algorithm that - * can be run to a fixed point - */ - let mut fns: Vec = self.get_slice(FnSpace).iter().rev().map(|p| pred(p)).collect(); // NB: Calling foo.rev().map().rev() causes the calls to map diff --git a/src/librustc/middle/trans/_match.rs b/src/librustc/middle/trans/_match.rs index 0809079347b1f..ab44769e553f7 100644 --- a/src/librustc/middle/trans/_match.rs +++ b/src/librustc/middle/trans/_match.rs @@ -8,183 +8,179 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * - * # Compilation of match statements - * - * I will endeavor to explain the code as best I can. I have only a loose - * understanding of some parts of it. - * - * ## Matching - * - * The basic state of the code is maintained in an array `m` of `Match` - * objects. Each `Match` describes some list of patterns, all of which must - * match against the current list of values. If those patterns match, then - * the arm listed in the match is the correct arm. A given arm may have - * multiple corresponding match entries, one for each alternative that - * remains. As we proceed these sets of matches are adjusted by the various - * `enter_XXX()` functions, each of which adjusts the set of options given - * some information about the value which has been matched. - * - * So, initially, there is one value and N matches, each of which have one - * constituent pattern. N here is usually the number of arms but may be - * greater, if some arms have multiple alternatives. For example, here: - * - * enum Foo { A, B(int), C(uint, uint) } - * match foo { - * A => ..., - * B(x) => ..., - * C(1u, 2) => ..., - * C(_) => ... - * } - * - * The value would be `foo`. There would be four matches, each of which - * contains one pattern (and, in one case, a guard). We could collect the - * various options and then compile the code for the case where `foo` is an - * `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1) - * drop the two matches that do not match a `C` and (2) expand the other two - * into two patterns each. In the first case, the two patterns would be `1u` - * and `2`, and the in the second case the _ pattern would be expanded into - * `_` and `_`. The two values are of course the arguments to `C`. - * - * Here is a quick guide to the various functions: - * - * - `compile_submatch()`: The main workhouse. It takes a list of values and - * a list of matches and finds the various possibilities that could occur. - * - * - `enter_XXX()`: modifies the list of matches based on some information - * about the value that has been matched. For example, - * `enter_rec_or_struct()` adjusts the values given that a record or struct - * has been matched. This is an infallible pattern, so *all* of the matches - * must be either wildcards or record/struct patterns. `enter_opt()` - * handles the fallible cases, and it is correspondingly more complex. - * - * ## Bindings - * - * We store information about the bound variables for each arm as part of the - * per-arm `ArmData` struct. There is a mapping from identifiers to - * `BindingInfo` structs. These structs contain the mode/id/type of the - * binding, but they also contain an LLVM value which points at an alloca - * called `llmatch`. For by value bindings that are Copy, we also create - * an extra alloca that we copy the matched value to so that any changes - * we do to our copy is not reflected in the original and vice-versa. - * We don't do this if it's a move since the original value can't be used - * and thus allowing us to cheat in not creating an extra alloca. - * - * The `llmatch` binding always stores a pointer into the value being matched - * which points at the data for the binding. If the value being matched has - * type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence - * `llmatch` has type `T**`). So, if you have a pattern like: - * - * let a: A = ...; - * let b: B = ...; - * match (a, b) { (ref c, d) => { ... } } - * - * For `c` and `d`, we would generate allocas of type `C*` and `D*` - * respectively. These are called the `llmatch`. As we match, when we come - * up against an identifier, we store the current pointer into the - * corresponding alloca. - * - * Once a pattern is completely matched, and assuming that there is no guard - * pattern, we will branch to a block that leads to the body itself. For any - * by-value bindings, this block will first load the ptr from `llmatch` (the - * one of type `D*`) and then load a second time to get the actual value (the - * one of type `D`). For by ref bindings, the value of the local variable is - * simply the first alloca. - * - * So, for the example above, we would generate a setup kind of like this: - * - * +-------+ - * | Entry | - * +-------+ - * | - * +--------------------------------------------+ - * | llmatch_c = (addr of first half of tuple) | - * | llmatch_d = (addr of second half of tuple) | - * +--------------------------------------------+ - * | - * +--------------------------------------+ - * | *llbinding_d = **llmatch_d | - * +--------------------------------------+ - * - * If there is a guard, the situation is slightly different, because we must - * execute the guard code. Moreover, we need to do so once for each of the - * alternatives that lead to the arm, because if the guard fails, they may - * have different points from which to continue the search. Therefore, in that - * case, we generate code that looks more like: - * - * +-------+ - * | Entry | - * +-------+ - * | - * +-------------------------------------------+ - * | llmatch_c = (addr of first half of tuple) | - * | llmatch_d = (addr of first half of tuple) | - * +-------------------------------------------+ - * | - * +-------------------------------------------------+ - * | *llbinding_d = **llmatch_d | - * | check condition | - * | if false { goto next case } | - * | if true { goto body } | - * +-------------------------------------------------+ - * - * The handling for the cleanups is a bit... sensitive. Basically, the body - * is the one that invokes `add_clean()` for each binding. During the guard - * evaluation, we add temporary cleanups and revoke them after the guard is - * evaluated (it could fail, after all). Note that guards and moves are - * just plain incompatible. - * - * Some relevant helper functions that manage bindings: - * - `create_bindings_map()` - * - `insert_lllocals()` - * - * - * ## Notes on vector pattern matching. - * - * Vector pattern matching is surprisingly tricky. The problem is that - * the structure of the vector isn't fully known, and slice matches - * can be done on subparts of it. - * - * The way that vector pattern matches are dealt with, then, is as - * follows. First, we make the actual condition associated with a - * vector pattern simply a vector length comparison. So the pattern - * [1, .. x] gets the condition "vec len >= 1", and the pattern - * [.. x] gets the condition "vec len >= 0". The problem here is that - * having the condition "vec len >= 1" hold clearly does not mean that - * only a pattern that has exactly that condition will match. This - * means that it may well be the case that a condition holds, but none - * of the patterns matching that condition match; to deal with this, - * when doing vector length matches, we have match failures proceed to - * the next condition to check. - * - * There are a couple more subtleties to deal with. While the "actual" - * condition associated with vector length tests is simply a test on - * the vector length, the actual vec_len Opt entry contains more - * information used to restrict which matches are associated with it. - * So that all matches in a submatch are matching against the same - * values from inside the vector, they are split up by how many - * elements they match at the front and at the back of the vector. In - * order to make sure that arms are properly checked in order, even - * with the overmatching conditions, each vec_len Opt entry is - * associated with a range of matches. - * Consider the following: - * - * match &[1, 2, 3] { - * [1, 1, .. _] => 0, - * [1, 2, 2, .. _] => 1, - * [1, 2, 3, .. _] => 2, - * [1, 2, .. _] => 3, - * _ => 4 - * } - * The proper arm to match is arm 2, but arms 0 and 3 both have the - * condition "len >= 2". If arm 3 was lumped in with arm 0, then the - * wrong branch would be taken. Instead, vec_len Opts are associated - * with a contiguous range of matches that have the same "shape". - * This is sort of ugly and requires a bunch of special handling of - * vec_len options. - * - */ +//! # Compilation of match statements +//! +//! I will endeavor to explain the code as best I can. I have only a loose +//! understanding of some parts of it. +//! +//! ## Matching +//! +//! The basic state of the code is maintained in an array `m` of `Match` +//! objects. Each `Match` describes some list of patterns, all of which must +//! match against the current list of values. If those patterns match, then +//! the arm listed in the match is the correct arm. A given arm may have +//! multiple corresponding match entries, one for each alternative that +//! remains. As we proceed these sets of matches are adjusted by the various +//! `enter_XXX()` functions, each of which adjusts the set of options given +//! some information about the value which has been matched. +//! +//! So, initially, there is one value and N matches, each of which have one +//! constituent pattern. N here is usually the number of arms but may be +//! greater, if some arms have multiple alternatives. For example, here: +//! +//! enum Foo { A, B(int), C(uint, uint) } +//! match foo { +//! A => ..., +//! B(x) => ..., +//! C(1u, 2) => ..., +//! C(_) => ... +//! } +//! +//! The value would be `foo`. There would be four matches, each of which +//! contains one pattern (and, in one case, a guard). We could collect the +//! various options and then compile the code for the case where `foo` is an +//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1) +//! drop the two matches that do not match a `C` and (2) expand the other two +//! into two patterns each. In the first case, the two patterns would be `1u` +//! and `2`, and the in the second case the _ pattern would be expanded into +//! `_` and `_`. The two values are of course the arguments to `C`. +//! +//! Here is a quick guide to the various functions: +//! +//! - `compile_submatch()`: The main workhouse. It takes a list of values and +//! a list of matches and finds the various possibilities that could occur. +//! +//! - `enter_XXX()`: modifies the list of matches based on some information +//! about the value that has been matched. For example, +//! `enter_rec_or_struct()` adjusts the values given that a record or struct +//! has been matched. This is an infallible pattern, so *all* of the matches +//! must be either wildcards or record/struct patterns. `enter_opt()` +//! handles the fallible cases, and it is correspondingly more complex. +//! +//! ## Bindings +//! +//! We store information about the bound variables for each arm as part of the +//! per-arm `ArmData` struct. There is a mapping from identifiers to +//! `BindingInfo` structs. These structs contain the mode/id/type of the +//! binding, but they also contain an LLVM value which points at an alloca +//! called `llmatch`. For by value bindings that are Copy, we also create +//! an extra alloca that we copy the matched value to so that any changes +//! we do to our copy is not reflected in the original and vice-versa. +//! We don't do this if it's a move since the original value can't be used +//! and thus allowing us to cheat in not creating an extra alloca. +//! +//! The `llmatch` binding always stores a pointer into the value being matched +//! which points at the data for the binding. If the value being matched has +//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence +//! `llmatch` has type `T**`). So, if you have a pattern like: +//! +//! let a: A = ...; +//! let b: B = ...; +//! match (a, b) { (ref c, d) => { ... } } +//! +//! For `c` and `d`, we would generate allocas of type `C*` and `D*` +//! respectively. These are called the `llmatch`. As we match, when we come +//! up against an identifier, we store the current pointer into the +//! corresponding alloca. +//! +//! Once a pattern is completely matched, and assuming that there is no guard +//! pattern, we will branch to a block that leads to the body itself. For any +//! by-value bindings, this block will first load the ptr from `llmatch` (the +//! one of type `D*`) and then load a second time to get the actual value (the +//! one of type `D`). For by ref bindings, the value of the local variable is +//! simply the first alloca. +//! +//! So, for the example above, we would generate a setup kind of like this: +//! +//! +-------+ +//! | Entry | +//! +-------+ +//! | +//! +--------------------------------------------+ +//! | llmatch_c = (addr of first half of tuple) | +//! | llmatch_d = (addr of second half of tuple) | +//! +--------------------------------------------+ +//! | +//! +--------------------------------------+ +//! | *llbinding_d = **llmatch_d | +//! +--------------------------------------+ +//! +//! If there is a guard, the situation is slightly different, because we must +//! execute the guard code. Moreover, we need to do so once for each of the +//! alternatives that lead to the arm, because if the guard fails, they may +//! have different points from which to continue the search. Therefore, in that +//! case, we generate code that looks more like: +//! +//! +-------+ +//! | Entry | +//! +-------+ +//! | +//! +-------------------------------------------+ +//! | llmatch_c = (addr of first half of tuple) | +//! | llmatch_d = (addr of first half of tuple) | +//! +-------------------------------------------+ +//! | +//! +-------------------------------------------------+ +//! | *llbinding_d = **llmatch_d | +//! | check condition | +//! | if false { goto next case } | +//! | if true { goto body } | +//! +-------------------------------------------------+ +//! +//! The handling for the cleanups is a bit... sensitive. Basically, the body +//! is the one that invokes `add_clean()` for each binding. During the guard +//! evaluation, we add temporary cleanups and revoke them after the guard is +//! evaluated (it could fail, after all). Note that guards and moves are +//! just plain incompatible. +//! +//! Some relevant helper functions that manage bindings: +//! - `create_bindings_map()` +//! - `insert_lllocals()` +//! +//! +//! ## Notes on vector pattern matching. +//! +//! Vector pattern matching is surprisingly tricky. The problem is that +//! the structure of the vector isn't fully known, and slice matches +//! can be done on subparts of it. +//! +//! The way that vector pattern matches are dealt with, then, is as +//! follows. First, we make the actual condition associated with a +//! vector pattern simply a vector length comparison. So the pattern +//! [1, .. x] gets the condition "vec len >= 1", and the pattern +//! [.. x] gets the condition "vec len >= 0". The problem here is that +//! having the condition "vec len >= 1" hold clearly does not mean that +//! only a pattern that has exactly that condition will match. This +//! means that it may well be the case that a condition holds, but none +//! of the patterns matching that condition match; to deal with this, +//! when doing vector length matches, we have match failures proceed to +//! the next condition to check. +//! +//! There are a couple more subtleties to deal with. While the "actual" +//! condition associated with vector length tests is simply a test on +//! the vector length, the actual vec_len Opt entry contains more +//! information used to restrict which matches are associated with it. +//! So that all matches in a submatch are matching against the same +//! values from inside the vector, they are split up by how many +//! elements they match at the front and at the back of the vector. In +//! order to make sure that arms are properly checked in order, even +//! with the overmatching conditions, each vec_len Opt entry is +//! associated with a range of matches. +//! Consider the following: +//! +//! match &[1, 2, 3] { +//! [1, 1, .. _] => 0, +//! [1, 2, 2, .. _] => 1, +//! [1, 2, 3, .. _] => 2, +//! [1, 2, .. _] => 3, +//! _ => 4 +//! } +//! The proper arm to match is arm 2, but arms 0 and 3 both have the +//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the +//! wrong branch would be taken. Instead, vec_len Opts are associated +//! with a contiguous range of matches that have the same "shape". +//! This is sort of ugly and requires a bunch of special handling of +//! vec_len options. #![allow(non_camel_case_types)] @@ -341,15 +337,14 @@ pub enum TransBindingMode { TrByRef, } -/** - * Information about a pattern binding: - * - `llmatch` is a pointer to a stack slot. The stack slot contains a - * pointer into the value being matched. Hence, llmatch has type `T**` - * where `T` is the value being matched. - * - `trmode` is the trans binding mode - * - `id` is the node id of the binding - * - `ty` is the Rust type of the binding */ - #[deriving(Clone)] +/// Information about a pattern binding: +/// - `llmatch` is a pointer to a stack slot. The stack slot contains a +/// pointer into the value being matched. Hence, llmatch has type `T**` +/// where `T` is the value being matched. +/// - `trmode` is the trans binding mode +/// - `id` is the node id of the binding +/// - `ty` is the Rust type of the binding +#[deriving(Clone)] pub struct BindingInfo { pub llmatch: ValueRef, pub trmode: TransBindingMode, @@ -366,12 +361,10 @@ struct ArmData<'a, 'b> { bindings_map: BindingsMap } -/** - * Info about Match. - * If all `pats` are matched then arm `data` will be executed. - * As we proceed `bound_ptrs` are filled with pointers to values to be bound, - * these pointers are stored in llmatch variables just before executing `data` arm. - */ +/// Info about Match. +/// If all `pats` are matched then arm `data` will be executed. +/// As we proceed `bound_ptrs` are filled with pointers to values to be bound, +/// these pointers are stored in llmatch variables just before executing `data` arm. struct Match<'a, 'b> { pats: Vec>, data: &'a ArmData<'a, 'b>, @@ -716,16 +709,13 @@ fn extract_variant_args<'a>( ExtractedBlock { vals: args, bcx: bcx } } +/// Helper for converting from the ValueRef that we pass around in +/// the match code, which is always an lvalue, into a Datum. Eventually +/// we should just pass around a Datum and be done with it. fn match_datum(bcx: &Block, val: ValueRef, pat_id: ast::NodeId) -> Datum { - /*! - * Helper for converting from the ValueRef that we pass around in - * the match code, which is always an lvalue, into a Datum. Eventually - * we should just pass around a Datum and be done with it. - */ - let ty = node_id_type(bcx, pat_id); Datum::new(val, ty, Lvalue) } @@ -946,14 +936,11 @@ fn compare_values<'a>( } } +/// For each binding in `data.bindings_map`, adds an appropriate entry into +/// the `fcx.lllocals` map fn insert_lllocals<'a>(mut bcx: &'a Block<'a>, bindings_map: &BindingsMap, cs: Option) -> &'a Block<'a> { - /*! - * For each binding in `data.bindings_map`, adds an appropriate entry into - * the `fcx.lllocals` map - */ - for (&ident, &binding_info) in bindings_map.iter() { let llval = match binding_info.trmode { // By value mut binding for a copy type: load from the ptr @@ -1482,13 +1469,11 @@ enum IrrefutablePatternBindingMode { BindArgument } +/// Generates code for a local variable declaration like +/// `let ;` or `let = `. pub fn store_local<'a>(bcx: &'a Block<'a>, local: &ast::Local) -> &'a Block<'a> { - /*! - * Generates code for a local variable declaration like - * `let ;` or `let = `. - */ let _icx = push_ctxt("match::store_local"); let mut bcx = bcx; let tcx = bcx.tcx(); @@ -1553,24 +1538,21 @@ pub fn store_local<'a>(bcx: &'a Block<'a>, } } +/// Generates code for argument patterns like `fn foo(: T)`. +/// Creates entries in the `llargs` map for each of the bindings +/// in `pat`. +/// +/// # Arguments +/// +/// - `pat` is the argument pattern +/// - `llval` is a pointer to the argument value (in other words, +/// if the argument type is `T`, then `llval` is a `T*`). In some +/// cases, this code may zero out the memory `llval` points at. pub fn store_arg<'a>(mut bcx: &'a Block<'a>, pat: Gc, arg: Datum, arg_scope: cleanup::ScopeId) -> &'a Block<'a> { - /*! - * Generates code for argument patterns like `fn foo(: T)`. - * Creates entries in the `llargs` map for each of the bindings - * in `pat`. - * - * # Arguments - * - * - `pat` is the argument pattern - * - `llval` is a pointer to the argument value (in other words, - * if the argument type is `T`, then `llval` is a `T*`). In some - * cases, this code may zero out the memory `llval` points at. - */ - let _icx = push_ctxt("match::store_arg"); match simple_identifier(&*pat) { @@ -1634,6 +1616,19 @@ fn mk_binding_alloca<'a,A>(bcx: &'a Block<'a>, bcx } +/// A simple version of the pattern matching code that only handles +/// irrefutable patterns. This is used in let/argument patterns, +/// not in match statements. Unifying this code with the code above +/// sounds nice, but in practice it produces very inefficient code, +/// since the match code is so much more general. In most cases, +/// LLVM is able to optimize the code, but it causes longer compile +/// times and makes the generated code nigh impossible to read. +/// +/// # Arguments +/// - bcx: starting basic block context +/// - pat: the irrefutable pattern being matched. +/// - val: the value being matched -- must be an lvalue (by ref, with cleanup) +/// - binding_mode: is this for an argument or a local variable? fn bind_irrefutable_pat<'a>( bcx: &'a Block<'a>, pat: Gc, @@ -1641,22 +1636,6 @@ fn bind_irrefutable_pat<'a>( binding_mode: IrrefutablePatternBindingMode, cleanup_scope: cleanup::ScopeId) -> &'a Block<'a> { - /*! - * A simple version of the pattern matching code that only handles - * irrefutable patterns. This is used in let/argument patterns, - * not in match statements. Unifying this code with the code above - * sounds nice, but in practice it produces very inefficient code, - * since the match code is so much more general. In most cases, - * LLVM is able to optimize the code, but it causes longer compile - * times and makes the generated code nigh impossible to read. - * - * # Arguments - * - bcx: starting basic block context - * - pat: the irrefutable pattern being matched. - * - val: the value being matched -- must be an lvalue (by ref, with cleanup) - * - binding_mode: is this for an argument or a local variable? - */ - debug!("bind_irrefutable_pat(bcx={}, pat={}, binding_mode={:?})", bcx.to_str(), pat.repr(bcx.tcx()), diff --git a/src/librustc/middle/trans/adt.rs b/src/librustc/middle/trans/adt.rs index f7fb6646938fd..6bb2b68b5ee1f 100644 --- a/src/librustc/middle/trans/adt.rs +++ b/src/librustc/middle/trans/adt.rs @@ -8,40 +8,38 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * # Representation of Algebraic Data Types - * - * This module determines how to represent enums, structs, and tuples - * based on their monomorphized types; it is responsible both for - * choosing a representation and translating basic operations on - * values of those types. (Note: exporting the representations for - * debuggers is handled in debuginfo.rs, not here.) - * - * Note that the interface treats everything as a general case of an - * enum, so structs/tuples/etc. have one pseudo-variant with - * discriminant 0; i.e., as if they were a univariant enum. - * - * Having everything in one place will enable improvements to data - * structure representation; possibilities include: - * - * - User-specified alignment (e.g., cacheline-aligning parts of - * concurrently accessed data structures); LLVM can't represent this - * directly, so we'd have to insert padding fields in any structure - * that might contain one and adjust GEP indices accordingly. See - * issue #4578. - * - * - Store nested enums' discriminants in the same word. Rather, if - * some variants start with enums, and those enums representations - * have unused alignment padding between discriminant and body, the - * outer enum's discriminant can be stored there and those variants - * can start at offset 0. Kind of fancy, and might need work to - * make copies of the inner enum type cooperate, but it could help - * with `Option` or `Result` wrapped around another enum. - * - * - Tagged pointers would be neat, but given that any type can be - * used unboxed and any field can have pointers (including mutable) - * taken to it, implementing them for Rust seems difficult. - */ +//! # Representation of Algebraic Data Types +//! +//! This module determines how to represent enums, structs, and tuples +//! based on their monomorphized types; it is responsible both for +//! choosing a representation and translating basic operations on +//! values of those types. (Note: exporting the representations for +//! debuggers is handled in debuginfo.rs, not here.) +//! +//! Note that the interface treats everything as a general case of an +//! enum, so structs/tuples/etc. have one pseudo-variant with +//! discriminant 0; i.e., as if they were a univariant enum. +//! +//! Having everything in one place will enable improvements to data +//! structure representation; possibilities include: +//! +//! - User-specified alignment (e.g., cacheline-aligning parts of +//! concurrently accessed data structures); LLVM can't represent this +//! directly, so we'd have to insert padding fields in any structure +//! that might contain one and adjust GEP indices accordingly. See +//! issue #4578. +//! +//! - Store nested enums' discriminants in the same word. Rather, if +//! some variants start with enums, and those enums representations +//! have unused alignment padding between discriminant and body, the +//! outer enum's discriminant can be stored there and those variants +//! can start at offset 0. Kind of fancy, and might need work to +//! make copies of the inner enum type cooperate, but it could help +//! with `Option` or `Result` wrapped around another enum. +//! +//! - Tagged pointers would be neat, but given that any type can be +//! used unboxed and any field can have pointers (including mutable) +//! taken to it, implementing them for Rust seems difficult. #![allow(unsigned_negate)] @@ -72,42 +70,34 @@ type Hint = attr::ReprAttr; pub enum Repr { /// C-like enums; basically an int. CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) - /** - * Single-case variants, and structs/tuples/records. - * - * Structs with destructors need a dynamic destroyedness flag to - * avoid running the destructor too many times; this is included - * in the `Struct` if present. - */ + /// Single-case variants, and structs/tuples/records. + /// + /// Structs with destructors need a dynamic destroyedness flag to + /// avoid running the destructor too many times; this is included + /// in the `Struct` if present. Univariant(Struct, bool), - /** - * General-case enums: for each case there is a struct, and they - * all start with a field for the discriminant. - */ + /// General-case enums: for each case there is a struct, and they + /// all start with a field for the discriminant. General(IntType, Vec), - /** - * Two cases distinguished by a nullable pointer: the case with discriminant - * `nndiscr` must have single field which is known to be nonnull due to its type. - * The other case is known to be zero sized. Hence we represent the enum - * as simply a nullable pointer: if not null it indicates the `nndiscr` variant, - * otherwise it indicates the other case. - */ + /// Two cases distinguished by a nullable pointer: the case with discriminant + /// `nndiscr` must have single field which is known to be nonnull due to its type. + /// The other case is known to be zero sized. Hence we represent the enum + /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, + /// otherwise it indicates the other case. RawNullablePointer { pub nndiscr: Disr, pub nnty: ty::t, pub nullfields: Vec }, - /** - * Two cases distinguished by a nullable pointer: the case with discriminant - * `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th - * field is known to be nonnull due to its type; if that field is null, then - * it represents the other case, which is inhabited by at most one value - * (and all other fields are undefined/unused). - * - * For example, `std::option::Option` instantiated at a safe pointer type - * is represented such that `None` is a null pointer and `Some` is the - * identity function. - */ + /// Two cases distinguished by a nullable pointer: the case with discriminant + /// `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th + /// field is known to be nonnull due to its type; if that field is null, then + /// it represents the other case, which is inhabited by at most one value + /// (and all other fields are undefined/unused). + /// + /// For example, `std::option::Option` instantiated at a safe pointer type + /// is represented such that `None` is a null pointer and `Some` is the + /// identity function. StructWrappedNullablePointer { pub nonnull: Struct, pub nndiscr: Disr, @@ -124,11 +114,9 @@ pub struct Struct { pub fields: Vec, } -/** - * Convenience for `represent_type`. There should probably be more or - * these, for places in trans where the `ty::t` isn't directly - * available. - */ +/// Convenience for `represent_type`. There should probably be more or +/// these, for places in trans where the `ty::t` isn't directly +/// available. pub fn represent_node(bcx: &Block, node: ast::NodeId) -> Rc { represent_type(bcx.ccx(), node_id_type(bcx, node)) } @@ -411,16 +399,14 @@ pub fn ty_of_inttype(ity: IntType) -> ty::t { } -/** - * LLVM-level types are a little complicated. - * - * C-like enums need to be actual ints, not wrapped in a struct, - * because that changes the ABI on some platforms (see issue #10308). - * - * For nominal types, in some cases, we need to use LLVM named structs - * and fill in the actual contents in a second pass to prevent - * unbounded recursion; see also the comments in `trans::type_of`. - */ +/// LLVM-level types are a little complicated. +/// +/// C-like enums need to be actual ints, not wrapped in a struct, +/// because that changes the ABI on some platforms (see issue #10308). +/// +/// For nominal types, in some cases, we need to use LLVM named structs +/// and fill in the actual contents in a second pass to prevent +/// unbounded recursion; see also the comments in `trans::type_of`. pub fn type_of(cx: &CrateContext, r: &Repr) -> Type { generic_type_of(cx, r, None, false) } @@ -506,12 +492,10 @@ fn struct_llfields(cx: &CrateContext, st: &Struct, sizing: bool) -> Vec { } } -/** - * Obtain a representation of the discriminant sufficient to translate - * destructuring; this may or may not involve the actual discriminant. - * - * This should ideally be less tightly tied to `_match`. - */ +/// Obtain a representation of the discriminant sufficient to translate +/// destructuring; this may or may not involve the actual discriminant. +/// +/// This should ideally be less tightly tied to `_match`. pub fn trans_switch(bcx: &Block, r: &Repr, scrutinee: ValueRef) -> (_match::branch_kind, Option) { match *r { @@ -595,12 +579,10 @@ fn load_discr(bcx: &Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) } } -/** - * Yield information about how to dispatch a case of the - * discriminant-like value returned by `trans_switch`. - * - * This should ideally be less tightly tied to `_match`. - */ +/// Yield information about how to dispatch a case of the +/// discriminant-like value returned by `trans_switch`. +/// +/// This should ideally be less tightly tied to `_match`. pub fn trans_case<'a>(bcx: &'a Block<'a>, r: &Repr, discr: Disr) -> _match::opt_result<'a> { match *r { @@ -623,11 +605,9 @@ pub fn trans_case<'a>(bcx: &'a Block<'a>, r: &Repr, discr: Disr) } } -/** - * Begin initializing a new value of the given case of the given - * representation. The fields, if any, should then be initialized via - * `trans_field_ptr`. - */ +/// Begin initializing a new value of the given case of the given +/// representation. The fields, if any, should then be initialized via +/// `trans_field_ptr`. pub fn trans_start_init(bcx: &Block, r: &Repr, val: ValueRef, discr: Disr) { match *r { CEnum(ity, min, max) => { @@ -671,10 +651,8 @@ fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { } } -/** - * The number of fields in a given case; for use when obtaining this - * information from the type or definition is less convenient. - */ +/// The number of fields in a given case; for use when obtaining this +/// information from the type or definition is less convenient. pub fn num_args(r: &Repr, discr: Disr) -> uint { match *r { CEnum(..) => 0, @@ -756,27 +734,25 @@ pub fn trans_drop_flag_ptr(bcx: &Block, r: &Repr, val: ValueRef) -> ValueRef { } } -/** - * Construct a constant value, suitable for initializing a - * GlobalVariable, given a case and constant values for its fields. - * Note that this may have a different LLVM type (and different - * alignment!) from the representation's `type_of`, so it needs a - * pointer cast before use. - * - * The LLVM type system does not directly support unions, and only - * pointers can be bitcast, so a constant (and, by extension, the - * GlobalVariable initialized by it) will have a type that can vary - * depending on which case of an enum it is. - * - * To understand the alignment situation, consider `enum E { V64(u64), - * V32(u32, u32) }` on win32. The type has 8-byte alignment to - * accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, - * i32, i32}`, which is 4-byte aligned. - * - * Currently the returned value has the same size as the type, but - * this could be changed in the future to avoid allocating unnecessary - * space after values of shorter-than-maximum cases. - */ +/// Construct a constant value, suitable for initializing a +/// GlobalVariable, given a case and constant values for its fields. +/// Note that this may have a different LLVM type (and different +/// alignment!) from the representation's `type_of`, so it needs a +/// pointer cast before use. +/// +/// The LLVM type system does not directly support unions, and only +/// pointers can be bitcast, so a constant (and, by extension, the +/// GlobalVariable initialized by it) will have a type that can vary +/// depending on which case of an enum it is. +/// +/// To understand the alignment situation, consider `enum E { V64(u64), +/// V32(u32, u32) }` on win32. The type has 8-byte alignment to +/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, +/// i32, i32}`, which is 4-byte aligned. +/// +/// Currently the returned value has the same size as the type, but +/// this could be changed in the future to avoid allocating unnecessary +/// space after values of shorter-than-maximum cases. pub fn trans_const(ccx: &CrateContext, r: &Repr, discr: Disr, vals: &[ValueRef]) -> ValueRef { match *r { @@ -829,9 +805,7 @@ pub fn trans_const(ccx: &CrateContext, r: &Repr, discr: Disr, } } -/** - * Compute struct field offsets relative to struct begin. - */ +/// Compute struct field offsets relative to struct begin. fn compute_struct_field_offsets(ccx: &CrateContext, st: &Struct) -> Vec { let mut offsets = vec!(); @@ -849,16 +823,14 @@ fn compute_struct_field_offsets(ccx: &CrateContext, st: &Struct) -> Vec { offsets } -/** - * Building structs is a little complicated, because we might need to - * insert padding if a field's value is less aligned than its type. - * - * Continuing the example from `trans_const`, a value of type `(u32, - * E)` should have the `E` at offset 8, but if that field's - * initializer is 4-byte aligned then simply translating the tuple as - * a two-element struct will locate it at offset 4, and accesses to it - * will read the wrong memory. - */ +/// Building structs is a little complicated, because we might need to +/// insert padding if a field's value is less aligned than its type. +/// +/// Continuing the example from `trans_const`, a value of type `(u32, +/// E)` should have the `E` at offset 8, but if that field's +/// initializer is 4-byte aligned then simply translating the tuple as +/// a two-element struct will locate it at offset 4, and accesses to it +/// will read the wrong memory. fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef]) -> Vec { assert_eq!(vals.len(), st.fields.len()); @@ -918,7 +890,7 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) Univariant(..) => 0, RawNullablePointer { nndiscr, .. } => { if is_null(val) { - /* subtraction as uint is ok because nndiscr is either 0 or 1 */ + // subtraction as uint is ok because nndiscr is either 0 or 1 (1 - nndiscr) as Disr } else { nndiscr @@ -926,7 +898,7 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) } StructWrappedNullablePointer { nndiscr, ptrfield, .. } => { if is_null(const_struct_field(ccx, val, ptrfield)) { - /* subtraction as uint is ok because nndiscr is either 0 or 1 */ + // subtraction as uint is ok because nndiscr is either 0 or 1 (1 - nndiscr) as Disr } else { nndiscr @@ -935,13 +907,11 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) } } -/** - * Extract a field of a constant value, as appropriate for its - * representation. - * - * (Not to be confused with `common::const_get_elt`, which operates on - * raw LLVM-level structs and arrays.) - */ +/// Extract a field of a constant value, as appropriate for its +/// representation. +/// +/// (Not to be confused with `common::const_get_elt`, which operates on +/// raw LLVM-level structs and arrays.) pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef, _discr: Disr, ix: uint) -> ValueRef { match *r { diff --git a/src/librustc/middle/trans/asm.rs b/src/librustc/middle/trans/asm.rs index 81bb50a83afdf..5b889b1921060 100644 --- a/src/librustc/middle/trans/asm.rs +++ b/src/librustc/middle/trans/asm.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -# Translation of inline assembly. -*/ +//! Translation of inline assembly. use lib; use middle::trans::build::*; diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 08cdde38c3460..7f428c1b700ad 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -962,13 +962,11 @@ pub fn load_if_immediate(cx: &Block, v: ValueRef, t: ty::t) -> ValueRef { return v; } +/// Helper for loading values from memory. Does the necessary conversion if +/// the in-memory type differs from the type used for SSA values. Also +/// handles various special cases where the type gives us better information +/// about what we are loading. pub fn load_ty(cx: &Block, ptr: ValueRef, t: ty::t) -> ValueRef { - /*! - * Helper for loading values from memory. Does the necessary conversion if - * the in-memory type differs from the type used for SSA values. Also - * handles various special cases where the type gives us better information - * about what we are loading. - */ if type_is_zero_size(cx.ccx(), t) { C_undef(type_of::type_of(cx.ccx(), t)) } else if ty::type_is_bool(t) { @@ -982,11 +980,9 @@ pub fn load_ty(cx: &Block, ptr: ValueRef, t: ty::t) -> ValueRef { } } +/// Helper for storing values in memory. Does the necessary conversion if +/// the in-memory type differs from the type used for SSA values. pub fn store_ty(cx: &Block, v: ValueRef, dst: ValueRef, t: ty::t) { - /*! - * Helper for storing values in memory. Does the necessary conversion if - * the in-memory type differs from the type used for SSA values. - */ if ty::type_is_bool(t) { Store(cx, ZExt(cx, v, Type::i8(cx.ccx())), dst); } else { diff --git a/src/librustc/middle/trans/basic_block.rs b/src/librustc/middle/trans/basic_block.rs index 303ad5fbce2ce..5ae587ec5812a 100644 --- a/src/librustc/middle/trans/basic_block.rs +++ b/src/librustc/middle/trans/basic_block.rs @@ -16,9 +16,7 @@ pub struct BasicBlock(pub BasicBlockRef); pub type Preds<'a> = Map<'a, Value, BasicBlock, Filter<'a, Value, Users>>; -/** - * Wrapper for LLVM BasicBlockRef - */ +/// Wrapper for LLVM BasicBlockRef impl BasicBlock { pub fn get(&self) -> BasicBlockRef { let BasicBlock(v) = *self; v diff --git a/src/librustc/middle/trans/build.rs b/src/librustc/middle/trans/build.rs index e1c02f543bf9e..39b77e699eefe 100644 --- a/src/librustc/middle/trans/build.rs +++ b/src/librustc/middle/trans/build.rs @@ -143,7 +143,7 @@ pub fn _Undef(val: ValueRef) -> ValueRef { } } -/* Arithmetic */ +// Arithmetic pub fn Add(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { if cx.unreachable.get() { return _Undef(lhs); } B(cx).add(lhs, rhs) @@ -299,7 +299,7 @@ pub fn Not(cx: &Block, v: ValueRef) -> ValueRef { B(cx).not(v) } -/* Memory */ +// Memory pub fn Malloc(cx: &Block, ty: Type) -> ValueRef { unsafe { if cx.unreachable.get() { @@ -471,7 +471,7 @@ pub fn GlobalStringPtr(cx: &Block, _str: *const c_char) -> ValueRef { } } -/* Casts */ +// Casts pub fn Trunc(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } @@ -608,7 +608,7 @@ pub fn FPCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef { } -/* Comparisons */ +// Comparisons pub fn ICmp(cx: &Block, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef { unsafe { @@ -629,7 +629,7 @@ pub fn FCmp(cx: &Block, op: RealPredicate, lhs: ValueRef, rhs: ValueRef) } } -/* Miscellaneous instructions */ +// Miscellaneous instructions pub fn EmptyPhi(cx: &Block, ty: Type) -> ValueRef { unsafe { if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } diff --git a/src/librustc/middle/trans/builder.rs b/src/librustc/middle/trans/builder.rs index a9c1adac3d7cf..bde5eaf8324d4 100644 --- a/src/librustc/middle/trans/builder.rs +++ b/src/librustc/middle/trans/builder.rs @@ -189,7 +189,7 @@ impl<'a> Builder<'a> { } } - /* Arithmetic */ + // Arithmetic pub fn add(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("add"); unsafe { @@ -407,7 +407,7 @@ impl<'a> Builder<'a> { } } - /* Memory */ + // Memory pub fn malloc(&self, ty: Type) -> ValueRef { self.count_insn("malloc"); unsafe { @@ -586,7 +586,7 @@ impl<'a> Builder<'a> { } } - /* Casts */ + // Casts pub fn trunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef { self.count_insn("trunc"); unsafe { @@ -721,7 +721,7 @@ impl<'a> Builder<'a> { } - /* Comparisons */ + // Comparisons pub fn icmp(&self, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("icmp"); unsafe { @@ -736,7 +736,7 @@ impl<'a> Builder<'a> { } } - /* Miscellaneous instructions */ + // Miscellaneous instructions pub fn empty_phi(&self, ty: Type) -> ValueRef { self.count_insn("emptyphi"); unsafe { diff --git a/src/librustc/middle/trans/callee.rs b/src/librustc/middle/trans/callee.rs index c5361045549eb..43a3bb9605490 100644 --- a/src/librustc/middle/trans/callee.rs +++ b/src/librustc/middle/trans/callee.rs @@ -8,13 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Handles translation of callees as well as other call-related - * things. Callees are a superset of normal rust values and sometimes - * have different representations. In particular, top-level fn items - * and methods are represented as just a fn ptr and not a full - * closure. - */ +//! Handles translation of callees as well as other call-related +//! things. Callees are a superset of normal rust values and sometimes +//! have different representations. In particular, top-level fn items +//! and methods are represented as just a fn ptr and not a full +//! closure. use arena::TypedArena; use back::abi; @@ -162,13 +160,10 @@ fn trans<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> { } } +/// Translates a reference (with id `ref_id`) to the fn/method +/// with id `def_id` into a function pointer. This may require +/// monomorphization or inlining. pub fn trans_fn_ref(bcx: &Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef { - /*! - * Translates a reference (with id `ref_id`) to the fn/method - * with id `def_id` into a function pointer. This may require - * monomorphization or inlining. - */ - let _icx = push_ctxt("trans_fn_ref"); let substs = node_id_substs(bcx, node); @@ -350,29 +345,25 @@ pub fn trans_unboxing_shim(bcx: &Block, llfn } +/// Translates a reference to a fn/method item, monomorphizing and +/// inlining as it goes. +/// +/// # Parameters +/// +/// - `bcx`: the current block where the reference to the fn occurs +/// - `def_id`: def id of the fn or method item being referenced +/// - `node`: node id of the reference to the fn/method, if applicable. +/// This parameter may be zero; but, if so, the resulting value may not +/// have the right type, so it must be cast before being used. +/// - `substs`: values for each of the fn/method's parameters +/// - `vtables`: values for each bound on each of the type parameters pub fn trans_fn_ref_with_vtables( bcx: &Block, // def_id: ast::DefId, // def id of fn node: ExprOrMethodCall, // node id of use of fn; may be zero if N/A substs: subst::Substs, // values for fn's ty params vtables: typeck::vtable_res) // vtables for the call - -> ValueRef -{ - /*! - * Translates a reference to a fn/method item, monomorphizing and - * inlining as it goes. - * - * # Parameters - * - * - `bcx`: the current block where the reference to the fn occurs - * - `def_id`: def id of the fn or method item being referenced - * - `node`: node id of the reference to the fn/method, if applicable. - * This parameter may be zero; but, if so, the resulting value may not - * have the right type, so it must be cast before being used. - * - `substs`: values for each of the fn/method's parameters - * - `vtables`: values for each bound on each of the type parameters - */ - + -> ValueRef { let _icx = push_ctxt("trans_fn_ref_with_vtables"); let ccx = bcx.ccx(); let tcx = bcx.tcx(); @@ -622,6 +613,19 @@ pub fn trans_lang_call<'a>( dest) } +/// This behemoth of a function translates function calls. +/// Unfortunately, in order to generate more efficient LLVM +/// output at -O0, it has quite a complex signature (refactoring +/// this into two functions seems like a good idea). +/// +/// In particular, for lang items, it is invoked with a dest of +/// None, and in that case the return value contains the result of +/// the fn. The lang item must not return a structural type or else +/// all heck breaks loose. +/// +/// For non-lang items, `dest` is always Some, and hence the result +/// is written into memory somewhere. Nonetheless we return the +/// actual return value of the function. pub fn trans_call_inner<'a>( bcx: &'a Block<'a>, call_info: Option, @@ -632,22 +636,6 @@ pub fn trans_call_inner<'a>( args: CallArgs, dest: Option) -> Result<'a> { - /*! - * This behemoth of a function translates function calls. - * Unfortunately, in order to generate more efficient LLVM - * output at -O0, it has quite a complex signature (refactoring - * this into two functions seems like a good idea). - * - * In particular, for lang items, it is invoked with a dest of - * None, and in that case the return value contains the result of - * the fn. The lang item must not return a structural type or else - * all heck breaks loose. - * - * For non-lang items, `dest` is always Some, and hence the result - * is written into memory somewhere. Nonetheless we return the - * actual return value of the function. - */ - // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose // this cleanup is to ensure that, should a failure occur while diff --git a/src/librustc/middle/trans/cleanup.rs b/src/librustc/middle/trans/cleanup.rs index 0bcf94997cdbb..699e75b1a9823 100644 --- a/src/librustc/middle/trans/cleanup.rs +++ b/src/librustc/middle/trans/cleanup.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Code pertaining to cleanup of temporaries as well as execution of - * drop glue. See discussion in `doc.rs` for a high-level summary. - */ +//! Code pertaining to cleanup of temporaries as well as execution of +//! drop glue. See discussion in `doc.rs` for a high-level summary. use lib::llvm::{BasicBlockRef, ValueRef}; use middle::trans::base; @@ -78,12 +76,9 @@ pub enum ScopeId { } impl<'a> CleanupMethods<'a> for FunctionContext<'a> { + /// Invoked when we start to trans the code contained + /// within a new cleanup scope. fn push_ast_cleanup_scope(&self, id: ast::NodeId) { - /*! - * Invoked when we start to trans the code contained - * within a new cleanup scope. - */ - debug!("push_ast_cleanup_scope({})", self.ccx.tcx.map.node_to_str(id)); @@ -122,16 +117,13 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { CustomScopeIndex { index: index } } + /// Removes the cleanup scope for id `cleanup_scope`, which + /// must be at the top of the cleanup stack, and generates the + /// code to do its cleanups for normal exit. fn pop_and_trans_ast_cleanup_scope(&self, bcx: &'a Block<'a>, cleanup_scope: ast::NodeId) -> &'a Block<'a> { - /*! - * Removes the cleanup scope for id `cleanup_scope`, which - * must be at the top of the cleanup stack, and generates the - * code to do its cleanups for normal exit. - */ - debug!("pop_and_trans_ast_cleanup_scope({})", self.ccx.tcx.map.node_to_str(cleanup_scope)); @@ -142,15 +134,12 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { } + /// Removes the loop cleanup scope for id `cleanup_scope`, which + /// must be at the top of the cleanup stack. Does not generate + /// any cleanup code, since loop scopes should exit by + /// branching to a block generated by `normal_exit_block`. fn pop_loop_cleanup_scope(&self, cleanup_scope: ast::NodeId) { - /*! - * Removes the loop cleanup scope for id `cleanup_scope`, which - * must be at the top of the cleanup stack. Does not generate - * any cleanup code, since loop scopes should exit by - * branching to a block generated by `normal_exit_block`. - */ - debug!("pop_loop_cleanup_scope({})", self.ccx.tcx.map.node_to_str(cleanup_scope)); @@ -159,28 +148,23 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { let _ = self.pop_scope(); } + /// Removes the top cleanup scope from the stack without + /// executing its cleanups. The top cleanup scope must + /// be the temporary scope `custom_scope`. fn pop_custom_cleanup_scope(&self, custom_scope: CustomScopeIndex) { - /*! - * Removes the top cleanup scope from the stack without - * executing its cleanups. The top cleanup scope must - * be the temporary scope `custom_scope`. - */ - debug!("pop_custom_cleanup_scope({})", custom_scope.index); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); let _ = self.pop_scope(); } + /// Removes the top cleanup scope from the stack, which must be + /// a temporary scope, and generates the code to do its + /// cleanups for normal exit. fn pop_and_trans_custom_cleanup_scope(&self, bcx: &'a Block<'a>, custom_scope: CustomScopeIndex) -> &'a Block<'a> { - /*! - * Removes the top cleanup scope from the stack, which must be - * a temporary scope, and generates the code to do its - * cleanups for normal exit. - */ debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -189,11 +173,8 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.trans_scope_cleanups(bcx, &scope) } + /// Returns the id of the top-most loop scope fn top_loop_scope(&self) -> ast::NodeId { - /*! - * Returns the id of the top-most loop scope - */ - for scope in self.scopes.borrow().iter().rev() { match scope.kind { LoopScopeKind(id, _) => { @@ -205,36 +186,27 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.ccx.sess().bug("no loop scope found"); } + /// Returns a block to branch to which will perform all pending + /// cleanups and then break/continue (depending on `exit`) out + /// of the loop with id `cleanup_scope` fn normal_exit_block(&'a self, cleanup_scope: ast::NodeId, exit: uint) -> BasicBlockRef { - /*! - * Returns a block to branch to which will perform all pending - * cleanups and then break/continue (depending on `exit`) out - * of the loop with id `cleanup_scope` - */ - self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) } + /// Returns a block to branch to which will perform all pending + /// cleanups and then return from this function fn return_exit_block(&'a self) -> BasicBlockRef { - /*! - * Returns a block to branch to which will perform all pending - * cleanups and then return from this function - */ - self.trans_cleanups_to_exit_scope(ReturnExit) } + /// Schedules a (deep) drop of `val`, which is a pointer to an + /// instance of `ty` fn schedule_drop_mem(&self, cleanup_scope: ScopeId, val: ValueRef, ty: ty::t) { - /*! - * Schedules a (deep) drop of `val`, which is a pointer to an - * instance of `ty` - */ - if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; } let drop = box DropValue { is_immediate: false, @@ -252,15 +224,12 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.schedule_clean(cleanup_scope, drop as Box); } + /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer + /// to an instance of `ty` fn schedule_drop_and_zero_mem(&self, cleanup_scope: ScopeId, val: ValueRef, ty: ty::t) { - /*! - * Schedules a (deep) drop and zero-ing of `val`, which is a pointer - * to an instance of `ty` - */ - if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; } let drop = box DropValue { is_immediate: false, @@ -279,14 +248,11 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.schedule_clean(cleanup_scope, drop as Box); } + /// Schedules a (deep) drop of `val`, which is an instance of `ty` fn schedule_drop_immediate(&self, cleanup_scope: ScopeId, val: ValueRef, ty: ty::t) { - /*! - * Schedules a (deep) drop of `val`, which is an instance of `ty` - */ - if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; } let drop = box DropValue { is_immediate: true, @@ -304,16 +270,13 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.schedule_clean(cleanup_scope, drop as Box); } + /// Schedules a call to `free(val)`. Note that this is a shallow + /// operation. fn schedule_free_value(&self, cleanup_scope: ScopeId, val: ValueRef, heap: Heap, content_ty: ty::t) { - /*! - * Schedules a call to `free(val)`. Note that this is a shallow - * operation. - */ - let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; debug!("schedule_free_value({:?}, val={}, heap={:?})", @@ -333,15 +296,12 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { } } + /// Schedules a cleanup to occur upon exit from `cleanup_scope`. + /// If `cleanup_scope` is not provided, then the cleanup is scheduled + /// in the topmost scope, which must be a temporary scope. fn schedule_clean_in_ast_scope(&self, cleanup_scope: ast::NodeId, cleanup: Box) { - /*! - * Schedules a cleanup to occur upon exit from `cleanup_scope`. - * If `cleanup_scope` is not provided, then the cleanup is scheduled - * in the topmost scope, which must be a temporary scope. - */ - debug!("schedule_clean_in_ast_scope(cleanup_scope={:?})", cleanup_scope); @@ -361,14 +321,11 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { self.ccx.tcx.map.node_to_str(cleanup_scope)).as_slice()); } + /// Schedules a cleanup to occur in the top-most scope, + /// which must be a temporary scope. fn schedule_clean_in_custom_scope(&self, custom_scope: CustomScopeIndex, cleanup: Box) { - /*! - * Schedules a cleanup to occur in the top-most scope, - * which must be a temporary scope. - */ - debug!("schedule_clean_in_custom_scope(custom_scope={})", custom_scope.index); @@ -380,22 +337,16 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { scope.clear_cached_exits(); } + /// Returns true if there are pending cleanups that should + /// execute on failure. fn needs_invoke(&self) -> bool { - /*! - * Returns true if there are pending cleanups that should - * execute on failure. - */ - self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) } + /// Returns a basic block to branch to in the event of a failure. + /// This block will run the failure cleanups and eventually + /// invoke the LLVM `Resume` instruction. fn get_landing_pad(&'a self) -> BasicBlockRef { - /*! - * Returns a basic block to branch to in the event of a failure. - * This block will run the failure cleanups and eventually - * invoke the LLVM `Resume` instruction. - */ - let _icx = base::push_ctxt("get_landing_pad"); debug!("get_landing_pad"); @@ -428,10 +379,8 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> { } impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> { + /// Returns the id of the current top-most AST scope, if any. fn top_ast_scope(&self) -> Option { - /*! - * Returns the id of the current top-most AST scope, if any. - */ for scope in self.scopes.borrow().iter().rev() { match scope.kind { CustomScopeKind | LoopScopeKind(..) => {} @@ -458,11 +407,10 @@ impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> { scopes.get(custom_scope.index).kind.is_temp() } + /// Generates the cleanups for `scope` into `bcx` fn trans_scope_cleanups(&self, // cannot borrow self, will recurse bcx: &'a Block<'a>, scope: &CleanupScope) -> &'a Block<'a> { - /*! Generates the cleanups for `scope` into `bcx` */ - let mut bcx = bcx; if !bcx.unreachable.get() { for cleanup in scope.cleanups.iter().rev() { @@ -492,37 +440,34 @@ impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> { f(self.scopes.borrow().last().unwrap()) } + /// Used when the caller wishes to jump to an early exit, such + /// as a return, break, continue, or unwind. This function will + /// generate all cleanups between the top of the stack and the + /// exit `label` and return a basic block that the caller can + /// branch to. + /// + /// For example, if the current stack of cleanups were as follows: + /// + /// AST 22 + /// Custom 1 + /// AST 23 + /// Loop 23 + /// Custom 2 + /// AST 24 + /// + /// and the `label` specifies a break from `Loop 23`, then this + /// function would generate a series of basic blocks as follows: + /// + /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk + /// + /// where `break_blk` is the block specified in `Loop 23` as + /// the target for breaks. The return value would be the first + /// basic block in that sequence (`Cleanup(AST 24)`). The + /// caller could then branch to `Cleanup(AST 24)` and it will + /// perform all cleanups and finally branch to the `break_blk`. fn trans_cleanups_to_exit_scope(&'a self, label: EarlyExitLabel) -> BasicBlockRef { - /*! - * Used when the caller wishes to jump to an early exit, such - * as a return, break, continue, or unwind. This function will - * generate all cleanups between the top of the stack and the - * exit `label` and return a basic block that the caller can - * branch to. - * - * For example, if the current stack of cleanups were as follows: - * - * AST 22 - * Custom 1 - * AST 23 - * Loop 23 - * Custom 2 - * AST 24 - * - * and the `label` specifies a break from `Loop 23`, then this - * function would generate a series of basic blocks as follows: - * - * Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk - * - * where `break_blk` is the block specified in `Loop 23` as - * the target for breaks. The return value would be the first - * basic block in that sequence (`Cleanup(AST 24)`). The - * caller could then branch to `Cleanup(AST 24)` and it will - * perform all cleanups and finally branch to the `break_blk`. - */ - debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", label, self.scopes_len()); @@ -654,20 +599,17 @@ impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> { prev_llbb } + /// Creates a landing pad for the top scope, if one does not + /// exist. The landing pad will perform all cleanups necessary + /// for an unwind and then `resume` to continue error + /// propagation: + /// + /// landing_pad -> ... cleanups ... -> [resume] + /// + /// (The cleanups and resume instruction are created by + /// `trans_cleanups_to_exit_scope()`, not in this function + /// itself.) fn get_or_create_landing_pad(&'a self) -> BasicBlockRef { - /*! - * Creates a landing pad for the top scope, if one does not - * exist. The landing pad will perform all cleanups necessary - * for an unwind and then `resume` to continue error - * propagation: - * - * landing_pad -> ... cleanups ... -> [resume] - * - * (The cleanups and resume instruction are created by - * `trans_cleanups_to_exit_scope()`, not in this function - * itself.) - */ - let pad_bcx; debug!("get_or_create_landing_pad"); @@ -778,19 +720,15 @@ impl<'a> CleanupScope<'a> { cleanup_block: blk }); } + /// True if this scope has cleanups for use during unwinding fn needs_invoke(&self) -> bool { - /*! True if this scope has cleanups for use during unwinding */ - self.cached_landing_pad.is_some() || self.cleanups.iter().any(|c| c.clean_on_unwind()) } + /// Returns a suitable name to use for the basic block that + /// handles this cleanup scope fn block_name(&self, prefix: &str) -> String { - /*! - * Returns a suitable name to use for the basic block that - * handles this cleanup scope - */ - match self.kind { CustomScopeKind => format!("{}_custom_", prefix), AstScopeKind(id) => format!("{}_ast_{}_", prefix, id), @@ -821,14 +759,11 @@ impl<'a> CleanupScopeKind<'a> { } } + /// If this is a loop scope with id `id`, return the early + /// exit block `exit`, else `None` fn early_exit_block(&self, id: ast::NodeId, exit: uint) -> Option { - /*! - * If this is a loop scope with id `id`, return the early - * exit block `exit`, else `None` - */ - match *self { LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb), _ => None, diff --git a/src/librustc/middle/trans/closure.rs b/src/librustc/middle/trans/closure.rs index b2564936fa406..28c9ab4e3b372 100644 --- a/src/librustc/middle/trans/closure.rs +++ b/src/librustc/middle/trans/closure.rs @@ -309,6 +309,15 @@ fn fill_fn_pair(bcx: &Block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) Store(bcx, llenvptr, GEPi(bcx, pair, [0u, abi::fn_field_box])); } +/// Translates the body of a closure expression. +/// +/// - `store` +/// - `decl` +/// - `body` +/// - `id`: The id of the closure expression. +/// - `cap_clause`: information about captured variables, if any. +/// - `dest`: where to write the closure value, which must be a +/// (fn ptr, env) pair pub fn trans_expr_fn<'a>( bcx: &'a Block<'a>, store: ty::TraitStore, @@ -317,19 +326,6 @@ pub fn trans_expr_fn<'a>( id: ast::NodeId, dest: expr::Dest) -> &'a Block<'a> { - /*! - * - * Translates the body of a closure expression. - * - * - `store` - * - `decl` - * - `body` - * - `id`: The id of the closure expression. - * - `cap_clause`: information about captured variables, if any. - * - `dest`: where to write the closure value, which must be a - (fn ptr, env) pair - */ - let _icx = push_ctxt("closure::trans_expr_fn"); let dest_addr = match dest { diff --git a/src/librustc/middle/trans/common.rs b/src/librustc/middle/trans/common.rs index de5de64e346c3..88553bd4e4776 100644 --- a/src/librustc/middle/trans/common.rs +++ b/src/librustc/middle/trans/common.rs @@ -80,26 +80,20 @@ pub fn type_is_immediate(ccx: &CrateContext, ty: ty::t) -> bool { } } +/// Identify types which have size zero at runtime. pub fn type_is_zero_size(ccx: &CrateContext, ty: ty::t) -> bool { - /*! - * Identify types which have size zero at runtime. - */ - use middle::trans::machine::llsize_of_alloc; use middle::trans::type_of::sizing_type_of; let llty = sizing_type_of(ccx, ty); llsize_of_alloc(ccx, llty) == 0 } +/// Identifies types which we declare to be equivalent to `void` +/// in C for the purpose of function return types. These are +/// `()`, bot, and uninhabited enums. Note that all such types +/// are also zero-size, but not all zero-size types use a `void` +/// return type (in order to aid with C ABI compatibility). pub fn return_type_is_void(ccx: &CrateContext, ty: ty::t) -> bool { - /*! - * Identifies types which we declare to be equivalent to `void` - * in C for the purpose of function return types. These are - * `()`, bot, and uninhabited enums. Note that all such types - * are also zero-size, but not all zero-size types use a `void` - * return type (in order to aid with C ABI compatibility). - */ - ty::type_is_nil(ty) || ty::type_is_bot(ty) || ty::type_is_empty(ccx.tcx(), ty) } @@ -121,31 +115,28 @@ pub struct tydesc_info { pub visit_glue: Cell>, } -/* - * A note on nomenclature of linking: "extern", "foreign", and "upcall". - * - * An "extern" is an LLVM symbol we wind up emitting an undefined external - * reference to. This means "we don't have the thing in this compilation unit, - * please make sure you link it in at runtime". This could be a reference to - * C code found in a C library, or rust code found in a rust crate. - * - * Most "externs" are implicitly declared (automatically) as a result of a - * user declaring an extern _module_ dependency; this causes the rust driver - * to locate an extern crate, scan its compilation metadata, and emit extern - * declarations for any symbols used by the declaring crate. - * - * A "foreign" is an extern that references C (or other non-rust ABI) code. - * There is no metadata to scan for extern references so in these cases either - * a header-digester like bindgen, or manual function prototypes, have to - * serve as declarators. So these are usually given explicitly as prototype - * declarations, in rust code, with ABI attributes on them noting which ABI to - * link via. - * - * An "upcall" is a foreign call generated by the compiler (not corresponding - * to any user-written call in the code) into the runtime library, to perform - * some helper task such as bringing a task to life, allocating memory, etc. - * - */ +// A note on nomenclature of linking: "extern", "foreign", and "upcall". +// +// An "extern" is an LLVM symbol we wind up emitting an undefined external +// reference to. This means "we don't have the thing in this compilation unit, +// please make sure you link it in at runtime". This could be a reference to +// C code found in a C library, or rust code found in a rust crate. +// +// Most "externs" are implicitly declared (automatically) as a result of a +// user declaring an extern _module_ dependency; this causes the rust driver +// to locate an extern crate, scan its compilation metadata, and emit extern +// declarations for any symbols used by the declaring crate. +// +// A "foreign" is an extern that references C (or other non-rust ABI) code. +// There is no metadata to scan for extern references so in these cases either +// a header-digester like bindgen, or manual function prototypes, have to +// serve as declarators. So these are usually given explicitly as prototype +// declarations, in rust code, with ABI attributes on them noting which ABI to +// link via. +// +// An "upcall" is a foreign call generated by the compiler (not corresponding +// to any user-written call in the code) into the runtime library, to perform +// some helper task such as bringing a task to life, allocating memory, etc. pub struct NodeInfo { pub id: ast::NodeId, diff --git a/src/librustc/middle/trans/consts.rs b/src/librustc/middle/trans/consts.rs index 527ce5dfaae45..e45398ab9b978 100644 --- a/src/librustc/middle/trans/consts.rs +++ b/src/librustc/middle/trans/consts.rs @@ -314,8 +314,8 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr, let te2 = base::cast_shift_const_rhs(b, te1, te2); - /* Neither type is bottom, and we expect them to be unified - * already, so the following is safe. */ + // Neither type is bottom, and we expect them to be unified + // already, so the following is safe. let ty = ty::expr_ty(cx.tcx(), &**e1); let is_float = ty::type_is_fp(ty); let signed = ty::type_is_signed(ty); diff --git a/src/librustc/middle/trans/datum.rs b/src/librustc/middle/trans/datum.rs index b93469ad2fba3..de52a3e826aaf 100644 --- a/src/librustc/middle/trans/datum.rs +++ b/src/librustc/middle/trans/datum.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * See the section on datums in `doc.rs` for an overview of what - * Datums are and how they are intended to be used. - */ +//! See the section on datums in `doc.rs` for an overview of what +//! Datums are and how they are intended to be used. use lib::llvm::ValueRef; use middle::trans::base::*; @@ -27,12 +25,10 @@ use util::ppaux::{ty_to_str}; use syntax::ast; -/** - * A `Datum` encapsulates the result of evaluating an expression. It - * describes where the value is stored, what Rust type the value has, - * whether it is addressed by reference, and so forth. Please refer - * the section on datums in `doc.rs` for more details. - */ +/// A `Datum` encapsulates the result of evaluating an expression. It +/// describes where the value is stored, what Rust type the value has, +/// whether it is addressed by reference, and so forth. Please refer +/// the section on datums in `doc.rs` for more details. #[deriving(Clone)] pub struct Datum { /// The llvm value. This is either a pointer to the Rust value or @@ -101,6 +97,12 @@ pub fn immediate_rvalue_bcx<'a>(bcx: &'a Block<'a>, } +/// Allocates temporary space on the stack using alloca() and +/// returns a by-ref Datum pointing to it. The memory will be +/// dropped upon exit from `scope`. The callback `populate` should +/// initialize the memory. If `zero` is true, the space will be +/// zeroed when it is allocated; this is not necessary unless `bcx` +/// does not dominate the end of `scope`. pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>, ty: ty::t, name: &str, @@ -110,15 +112,6 @@ pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>, populate: |A, &'a Block<'a>, ValueRef| -> &'a Block<'a>) -> DatumBlock<'a, Lvalue> { - /*! - * Allocates temporary space on the stack using alloca() and - * returns a by-ref Datum pointing to it. The memory will be - * dropped upon exit from `scope`. The callback `populate` should - * initialize the memory. If `zero` is true, the space will be - * zeroed when it is allocated; this is not necessary unless `bcx` - * does not dominate the end of `scope`. - */ - let llty = type_of::type_of(bcx.ccx(), ty); let scratch = alloca_maybe_zeroed(bcx, llty, name, zero); @@ -129,32 +122,26 @@ pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>, DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue)) } +/// Allocates temporary space on the stack using alloca() and +/// returns a by-ref Datum pointing to it. If `zero` is true, the +/// space will be zeroed when it is allocated; this is normally not +/// necessary, but in the case of automatic rooting in match +/// statements it is possible to have temporaries that may not get +/// initialized if a certain arm is not taken, so we must zero +/// them. You must arrange any cleanups etc yourself! pub fn rvalue_scratch_datum(bcx: &Block, ty: ty::t, name: &str) -> Datum { - /*! - * Allocates temporary space on the stack using alloca() and - * returns a by-ref Datum pointing to it. If `zero` is true, the - * space will be zeroed when it is allocated; this is normally not - * necessary, but in the case of automatic rooting in match - * statements it is possible to have temporaries that may not get - * initialized if a certain arm is not taken, so we must zero - * them. You must arrange any cleanups etc yourself! - */ - let llty = type_of::type_of(bcx.ccx(), ty); let scratch = alloca_maybe_zeroed(bcx, llty, name, false); Datum::new(scratch, ty, Rvalue::new(ByRef)) } +/// Indicates the "appropriate" mode for this value, +/// which is either by ref or by value, depending +/// on whether type is immediate or not. pub fn appropriate_rvalue_mode(ccx: &CrateContext, ty: ty::t) -> RvalueMode { - /*! - * Indicates the "appropriate" mode for this value, - * which is either by ref or by value, depending - * on whether type is immediate or not. - */ - if type_is_immediate(ccx, ty) { ByValue } else { @@ -175,25 +162,19 @@ fn add_rvalue_clean(mode: RvalueMode, pub trait KindOps { - /** - * Take appropriate action after the value in `datum` has been - * stored to a new location. - */ + /// Take appropriate action after the value in `datum` has been + /// stored to a new location. fn post_store<'a>(&self, bcx: &'a Block<'a>, val: ValueRef, ty: ty::t) -> &'a Block<'a>; - /** - * True if this mode is a reference mode, meaning that the datum's - * val field is a pointer to the actual value - */ + /// True if this mode is a reference mode, meaning that the datum's + /// val field is a pointer to the actual value fn is_by_ref(&self) -> bool; - /** - * Converts to an Expr kind - */ + /// Converts to an Expr kind fn to_expr_kind(self) -> Expr; } @@ -219,17 +200,14 @@ impl KindOps for Rvalue { } impl KindOps for Lvalue { + /// If an lvalue is moved, we must zero out the memory in which + /// it resides so as to cancel cleanup. If an @T lvalue is + /// copied, we must increment the reference count. fn post_store<'a>(&self, bcx: &'a Block<'a>, val: ValueRef, ty: ty::t) -> &'a Block<'a> { - /*! - * If an lvalue is moved, we must zero out the memory in which - * it resides so as to cancel cleanup. If an @T lvalue is - * copied, we must increment the reference count. - */ - if ty::type_needs_drop(bcx.tcx(), ty) { if ty::type_moves_by_default(bcx.tcx(), ty) { // cancel cleanup of affine values by zeroing out @@ -278,31 +256,26 @@ impl KindOps for Expr { } impl Datum { + /// Schedules a cleanup for this datum in the given scope. + /// That means that this datum is no longer an rvalue datum; + /// hence, this function consumes the datum and returns the + /// contained ValueRef. pub fn add_clean(self, fcx: &FunctionContext, scope: cleanup::ScopeId) -> ValueRef { - /*! - * Schedules a cleanup for this datum in the given scope. - * That means that this datum is no longer an rvalue datum; - * hence, this function consumes the datum and returns the - * contained ValueRef. - */ - add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty); self.val } + /// Returns an lvalue datum (that is, a by ref datum with + /// cleanup scheduled). If `self` is not already an lvalue, + /// cleanup will be scheduled in the temporary scope for `expr_id`. pub fn to_lvalue_datum_in_scope<'a>(self, bcx: &'a Block<'a>, name: &str, scope: cleanup::ScopeId) -> DatumBlock<'a, Lvalue> { - /*! - * Returns an lvalue datum (that is, a by ref datum with - * cleanup scheduled). If `self` is not already an lvalue, - * cleanup will be scheduled in the temporary scope for `expr_id`. - */ let fcx = bcx.fcx; match self.kind.mode { @@ -351,14 +324,12 @@ impl Datum { } } -/** - * Methods suitable for "expr" datums that could be either lvalues or - * rvalues. These include coercions into lvalues/rvalues but also a number - * of more general operations. (Some of those operations could be moved to - * the more general `impl Datum`, but it's convenient to have them - * here since we can `match self.kind` rather than having to implement - * generic methods in `KindOps`.) - */ +/// Methods suitable for "expr" datums that could be either lvalues or +/// rvalues. These include coercions into lvalues/rvalues but also a number +/// of more general operations. (Some of those operations could be moved to +/// the more general `impl Datum`, but it's convenient to have them +/// here since we can `match self.kind` rather than having to implement +/// generic methods in `KindOps`.) impl Datum { fn match_kind(self, if_lvalue: |Datum| -> R, @@ -371,22 +342,16 @@ impl Datum { } } + /// Asserts that this datum *is* an lvalue and returns it. #[allow(dead_code)] // potentially useful pub fn assert_lvalue(self, bcx: &Block) -> Datum { - /*! - * Asserts that this datum *is* an lvalue and returns it. - */ - self.match_kind( |d| d, |_| bcx.sess().bug("assert_lvalue given rvalue")) } + /// Asserts that this datum *is* an lvalue and returns it. pub fn assert_rvalue(self, bcx: &Block) -> Datum { - /*! - * Asserts that this datum *is* an lvalue and returns it. - */ - self.match_kind( |_| bcx.sess().bug("assert_rvalue given lvalue"), |r| r) @@ -408,14 +373,11 @@ impl Datum { } } + /// Arranges cleanup for `self` if it is an rvalue. Use when + /// you are done working with a value that may need drop. pub fn add_clean_if_rvalue<'a>(self, bcx: &'a Block<'a>, expr_id: ast::NodeId) { - /*! - * Arranges cleanup for `self` if it is an rvalue. Use when - * you are done working with a value that may need drop. - */ - self.match_kind( |_| { /* Nothing to do, cleanup already arranged */ }, |r| { @@ -424,16 +386,13 @@ impl Datum { }) } + /// Ensures that `self` will get cleaned up, if it is not an lvalue + /// already. pub fn clean<'a>(self, bcx: &'a Block<'a>, name: &'static str, expr_id: ast::NodeId) -> &'a Block<'a> { - /*! - * Ensures that `self` will get cleaned up, if it is not an lvalue - * already. - */ - self.to_lvalue_datum(bcx, name, expr_id).bcx } @@ -450,15 +409,12 @@ impl Datum { }) } + /// Ensures that we have an rvalue datum (that is, a datum with + /// no cleanup scheduled). pub fn to_rvalue_datum<'a>(self, bcx: &'a Block<'a>, name: &'static str) -> DatumBlock<'a, Rvalue> { - /*! - * Ensures that we have an rvalue datum (that is, a datum with - * no cleanup scheduled). - */ - self.match_kind( |l| { let mut bcx = bcx; @@ -480,19 +436,14 @@ impl Datum { } -/** - * Methods suitable only for lvalues. These include the various - * operations to extract components out of compound data structures, - * such as extracting the field from a struct or a particular element - * from an array. - */ +/// Methods suitable only for lvalues. These include the various +/// operations to extract components out of compound data structures, +/// such as extracting the field from a struct or a particular element +/// from an array. impl Datum { + /// Converts a datum into a by-ref value. The datum type must + /// be one which is always passed by reference. pub fn to_llref(self) -> ValueRef { - /*! - * Converts a datum into a by-ref value. The datum type must - * be one which is always passed by reference. - */ - self.val } @@ -507,16 +458,14 @@ impl Datum { } } + /// Converts a vector into the slice pair. pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) { - //! Converts a vector into the slice pair. - tvec::get_base_and_len(bcx, self.val, self.ty) } } -/** - * Generic methods applicable to any sort of datum. - */ +/// Generic methods applicable to any sort of datum. + impl Datum { pub fn new(val: ValueRef, ty: ty::t, kind: K) -> Datum { Datum { val: val, ty: ty, kind: kind } @@ -527,41 +476,35 @@ impl Datum { Datum { val: val, ty: ty, kind: kind.to_expr_kind() } } + /// Moves or copies this value into a new home, as appropriate + /// depending on the type of the datum. This method consumes + /// the datum, since it would be incorrect to go on using the + /// datum if the value represented is affine (and hence the value + /// is moved). pub fn store_to<'a>(self, bcx: &'a Block<'a>, dst: ValueRef) -> &'a Block<'a> { - /*! - * Moves or copies this value into a new home, as appropriate - * depending on the type of the datum. This method consumes - * the datum, since it would be incorrect to go on using the - * datum if the value represented is affine (and hence the value - * is moved). - */ - self.shallow_copy(bcx, dst); self.kind.post_store(bcx, self.val, self.ty) } + /// Helper function that performs a shallow copy of this value + /// into `dst`, which should be a pointer to a memory location + /// suitable for `self.ty`. `dst` should contain uninitialized + /// memory (either newly allocated, zeroed, or dropped). + /// + /// This function is private to datums because it leaves memory + /// in an unstable state, where the source value has been + /// copied but not zeroed. Public methods are `store_to` (if + /// you no longer need the source value) or + /// `shallow_copy_and_take` (if you wish the source value to + /// remain valid). fn shallow_copy<'a>(&self, bcx: &'a Block<'a>, dst: ValueRef) -> &'a Block<'a> { - /*! - * Helper function that performs a shallow copy of this value - * into `dst`, which should be a pointer to a memory location - * suitable for `self.ty`. `dst` should contain uninitialized - * memory (either newly allocated, zeroed, or dropped). - * - * This function is private to datums because it leaves memory - * in an unstable state, where the source value has been - * copied but not zeroed. Public methods are `store_to` (if - * you no longer need the source value) or - * `shallow_copy_and_take` (if you wish the source value to - * remain valid). - */ - let _icx = push_ctxt("copy_to_no_check"); if type_is_zero_size(bcx.ccx(), self.ty) { @@ -577,18 +520,15 @@ impl Datum { return bcx; } + /// Copies the value into a new location and runs any necessary + /// take glue on the new location. This function always + /// preserves the existing datum as a valid value. Therefore, + /// it does not consume `self` and, also, cannot be applied to + /// affine values (since they must never be duplicated). pub fn shallow_copy_and_take<'a>(&self, bcx: &'a Block<'a>, dst: ValueRef) -> &'a Block<'a> { - /*! - * Copies the value into a new location and runs any necessary - * take glue on the new location. This function always - * preserves the existing datum as a valid value. Therefore, - * it does not consume `self` and, also, cannot be applied to - * affine values (since they must never be duplicated). - */ - assert!(!ty::type_moves_by_default(bcx.tcx(), self.ty)); let mut bcx = bcx; bcx = self.shallow_copy(bcx, dst); @@ -603,22 +543,19 @@ impl Datum { self.kind) } + /// See the `appropriate_rvalue_mode()` function pub fn appropriate_rvalue_mode(&self, ccx: &CrateContext) -> RvalueMode { - /*! See the `appropriate_rvalue_mode()` function */ appropriate_rvalue_mode(ccx, self.ty) } + /// Converts `self` into a by-value `ValueRef`. Consumes this + /// datum (i.e., absolves you of responsibility to cleanup the + /// value). For this to work, the value must be something + /// scalar-ish (like an int or a pointer) which (1) does not + /// require drop glue and (2) is naturally passed around by + /// value, and not by reference. pub fn to_llscalarish<'a>(self, bcx: &'a Block<'a>) -> ValueRef { - /*! - * Converts `self` into a by-value `ValueRef`. Consumes this - * datum (i.e., absolves you of responsibility to cleanup the - * value). For this to work, the value must be something - * scalar-ish (like an int or a pointer) which (1) does not - * require drop glue and (2) is naturally passed around by - * value, and not by reference. - */ - assert!(!ty::type_needs_drop(bcx.tcx(), self.ty)); assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue); if self.kind.is_by_ref() { diff --git a/src/librustc/middle/trans/debuginfo.rs b/src/librustc/middle/trans/debuginfo.rs index 7eb3fcce7803f..99b4372f6db85 100644 --- a/src/librustc/middle/trans/debuginfo.rs +++ b/src/librustc/middle/trans/debuginfo.rs @@ -8,175 +8,180 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -# Debug Info Module - -This module serves the purpose of generating debug symbols. We use LLVM's -[source level debugging](http://llvm.org/docs/SourceLevelDebugging.html) -features for generating the debug information. The general principle is this: - -Given the right metadata in the LLVM IR, the LLVM code generator is able to -create DWARF debug symbols for the given code. The -[metadata](http://llvm.org/docs/LangRef.html#metadata-type) is structured much -like DWARF *debugging information entries* (DIE), representing type information -such as datatype layout, function signatures, block layout, variable location -and scope information, etc. It is the purpose of this module to generate correct -metadata and insert it into the LLVM IR. - -As the exact format of metadata trees may change between different LLVM -versions, we now use LLVM -[DIBuilder](http://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) to -create metadata where possible. This will hopefully ease the adaption of this -module to future LLVM versions. - -The public API of the module is a set of functions that will insert the correct -metadata into the LLVM IR when called with the right parameters. The module is -thus driven from an outside client with functions like -`debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`. - -Internally the module will try to reuse already created metadata by utilizing a -cache. The way to get a shared metadata node when needed is thus to just call -the corresponding function in this module: - - let file_metadata = file_metadata(crate_context, path); - -The function will take care of probing the cache for an existing node for that -exact file path. - -All private state used by the module is stored within either the -CrateDebugContext struct (owned by the CrateContext) or the FunctionDebugContext -(owned by the FunctionContext). - -This file consists of three conceptual sections: -1. The public interface of the module -2. Module-internal metadata creation functions -3. Minor utility functions - - -## Recursive Types Some kinds of types, such as structs and enums can be -recursive. That means that the type definition of some type X refers to some -other type which in turn (transitively) refers to X. This introduces cycles into -the type referral graph. A naive algorithm doing an on-demand, depth-first -traversal of this graph when describing types, can get trapped in an endless -loop when it reaches such a cycle. - -For example, the following simple type for a singly-linked list... - -``` -struct List { - value: int, - tail: Option>, -} -``` - -will generate the following callstack with a naive DFS algorithm: - -``` -describe(t = List) - describe(t = int) - describe(t = Option>) - describe(t = Box) - describe(t = List) // at the beginning again... - ... -``` - -To break cycles like these, we use "forward declarations". That is, when the -algorithm encounters a possibly recursive type (any struct or enum), it -immediately creates a type description node and inserts it into the cache -*before* describing the members of the type. This type description is just a -stub (as type members are not described and added to it yet) but it allows the -algorithm to already refer to the type. After the stub is inserted into the -cache, the algorithm continues as before. If it now encounters a recursive -reference, it will hit the cache and does not try to describe the type anew. - -This behaviour is encapsulated in the 'RecursiveTypeDescription' enum, which -represents a kind of continuation, storing all state needed to continue -traversal at the type members after the type has been registered with the cache. -(This implementation approach might be a tad over-engineered and may change in -the future) - - -## Source Locations and Line Information In addition to data type descriptions -the debugging information must also allow to map machine code locations back to -source code locations in order to be useful. This functionality is also handled -in this module. The following functions allow to control source mappings: - -+ set_source_location() -+ clear_source_location() -+ start_emitting_source_locations() - -`set_source_location()` allows to set the current source location. All IR -instructions created after a call to this function will be linked to the given -source location, until another location is specified with -`set_source_location()` or the source location is cleared with -`clear_source_location()`. In the later case, subsequent IR instruction will not -be linked to any source location. As you can see, this is a stateful API -(mimicking the one in LLVM), so be careful with source locations set by previous -calls. It's probably best to not rely on any specific state being present at a -given point in code. - -One topic that deserves some extra attention is *function prologues*. At the -beginning of a function's machine code there are typically a few instructions -for loading argument values into allocas and checking if there's enough stack -space for the function to execute. This *prologue* is not visible in the source -code and LLVM puts a special PROLOGUE END marker into the line table at the -first non-prologue instruction of the function. In order to find out where the -prologue ends, LLVM looks for the first instruction in the function body that is -linked to a source location. So, when generating prologue instructions we have -to make sure that we don't emit source location information until the 'real' -function body begins. For this reason, source location emission is disabled by -default for any new function being translated and is only activated after a call -to the third function from the list above, `start_emitting_source_locations()`. -This function should be called right before regularly starting to translate the -top-level block of the given function. - -There is one exception to the above rule: `llvm.dbg.declare` instruction must be -linked to the source location of the variable being declared. For function -parameters these `llvm.dbg.declare` instructions typically occur in the middle -of the prologue, however, they are ignored by LLVM's prologue detection. The -`create_argument_metadata()` and related functions take care of linking the -`llvm.dbg.declare` instructions to the correct source locations even while -source location emission is still disabled, so there is no need to do anything -special with source location handling here. - -## Unique Type Identification In order for link-time optimization to work -properly, LLVM needs a unique type identifier that tells it across compilation -units which types are the same as others. This type identifier is created by -TypeMap::get_unique_type_id_of_type() using the following algorithm: - -(1) Primitive types have their name as ID -(2) Structs, enums and traits have a multipart identifier - - (1) The first part is the SVH (strict version hash) of the crate they were - originally defined in - - (2) The second part is the ast::NodeId of the definition in their original - crate - - (3) The final part is a concatenation of the type IDs of their concrete type - arguments if they are generic types. - -(3) Tuple-, pointer and function types are structurally identified, which means - that they are equivalent if their component types are equivalent (i.e. (int, - int) is the same regardless in which crate it is used). - -This algorithm also provides a stable ID for types that are defined in one crate -but instantiated from metadata within another crate. We just have to take care -to always map crate and node IDs back to the original crate context. - -As a side-effect these unique type IDs also help to solve a problem arising from -lifetime parameters. Since lifetime parameters are completely omitted in -debuginfo, more than one `ty::t` instance may map to the same debuginfo type -metadata, that is, some struct `Struct<'a>` may have N instantiations with -different concrete substitutions for `'a`, and thus there will be N `ty::t` -instances for the type `Struct<'a>` even though it is not generic otherwise. -Unfortunately this means that we cannot use `ty::type_id()` as cheap identifier -for type metadata---we have done this in the past, but it led to unnecessary -metadata duplication in the best case and LLVM assertions in the worst. However, -the unique type ID as described above *can* be used as identifier. Since it is -comparatively expensive to construct, though, `ty::type_id()` is still used -additionally as an optimization for cases where the exact same type has been -seen before (which is most of the time). */ +//! # Debug Info Module +//! +//! This module serves the purpose of generating debug symbols. We use LLVM's +//! [source level debugging](http://llvm.org/docs/SourceLevelDebugging.html) +//! features for generating the debug information. The general principle +//! is this: +//! +//! Given the right metadata in the LLVM IR, the LLVM code generator is able to +//! create DWARF debug symbols for the given code. The +//! [metadata](http://llvm.org/docs/LangRef.html#metadata-type) is structured +//! much like DWARF *debugging information entries* (DIE), representing type +//! information such as datatype layout, function signatures, block layout, +//! variable location and scope information, etc. It is the purpose of this +//! module to generate correct metadata and insert it into the LLVM IR. +//! +//! As the exact format of metadata trees may change between different LLVM +//! versions, we now use LLVM +//! [DIBuilder](http://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) +//! to create metadata where possible. This will hopefully ease the adaption of +//! this module to future LLVM versions. +//! +//! The public API of the module is a set of functions that will insert the +//! correct metadata into the LLVM IR when called with the right parameters. The +//! module is thus driven from an outside client with functions like +//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`. +//! +//! Internally the module will try to reuse already created metadata by +//! utilizing a cache. The way to get a shared metadata node when needed is thus +//! to just call the corresponding function in this module: +//! +//! let file_metadata = file_metadata(crate_context, path); +//! +//! The function will take care of probing the cache for an existing node for +//! that exact file path. +//! +//! All private state used by the module is stored within either the +//! CrateDebugContext struct (owned by the CrateContext) or the +//! FunctionDebugContext (owned by the FunctionContext). +//! +//! This file consists of three conceptual sections: +//! 1. The public interface of the module +//! 2. Module-internal metadata creation functions +//! 3. Minor utility functions +//! +//! +//! ## Recursive Types Some kinds of types, such as structs and enums can be +//! recursive. That means that the type definition of some type X refers to some +//! other type which in turn (transitively) refers to X. This introduces cycles +//! into the type referral graph. A naive algorithm doing an on-demand, +//! depth-first traversal of this graph when describing types, can get trapped +//! in an endless loop when it reaches such a cycle. +//! +//! For example, the following simple type for a singly-linked list... +//! +//! ``` +//! struct List { +//! value: int, +//! tail: Option>, +//! } +//! ``` +//! +//! will generate the following callstack with a naive DFS algorithm: +//! +//! ``` +//! describe(t = List) +//! describe(t = int) +//! describe(t = Option>) +//! describe(t = Box) +//! describe(t = List) // at the beginning again... +//! ... +//! ``` +//! +//! To break cycles like these, we use "forward declarations". That is, when the +//! algorithm encounters a possibly recursive type (any struct or enum), it +//! immediately creates a type description node and inserts it into the cache +//! *before* describing the members of the type. This type description is just a +//! stub (as type members are not described and added to it yet) but it allows +//! the algorithm to already refer to the type. After the stub is inserted into +//! the cache, the algorithm continues as before. If it now encounters a +//! recursive reference, it will hit the cache and does not try to describe +//! the type anew. +//! +//! This behaviour is encapsulated in the 'RecursiveTypeDescription' enum, which +//! represents a kind of continuation, storing all state needed to continue +//! traversal at the type members after the type has been registered with the +//! cache. (This implementation approach might be a tad over-engineered and may +//! change in the future) +//! +//! +//! ## Source Locations and Line Information In addition to data type +//! descriptions the debugging information must also allow to map machine code +//! locations back to source code locations in order to be useful. This +//! functionality is also handled in this module. The following functions allow +//! to control source mappings: +//! +//! + set_source_location() +//! + clear_source_location() +//! + start_emitting_source_locations() +//! +//! `set_source_location()` allows to set the current source location. All IR +//! instructions created after a call to this function will be linked to the +//! given source location, until another location is specified with +//! `set_source_location()` or the source location is cleared with +//! `clear_source_location()`. In the later case, subsequent IR instruction will +//! not be linked to any source location. As you can see, this is a stateful API +//! (mimicking the one in LLVM), so be careful with source locations set by +//! previous calls. It's probably best to not rely on any specific state being +//! present at a given point in code. +//! +//! One topic that deserves some extra attention is *function prologues*. At the +//! beginning of a function's machine code there are typically a few +//! instructions for loading argument values into allocas and checking if +//! there's enough stack space for the function to execute. This *prologue* is +//! not visible in the source code and LLVM puts a special PROLOGUE END marker +//! into the line table at the first non-prologue instruction of the function. +//! In order to find out where the prologue ends, LLVM looks for the first +//! instruction in the function body that is linked to a source location. So, +//! when generating prologue instructions we have to make sure that we don't +//! emit source location information until the 'real' function body begins. For +//! this reason, source location emission is disabled by default for any new +//! function being translated and is only activated after a call to the third +//! function from the list above, `start_emitting_source_locations()`. This +//! function should be called right before regularly starting to translate the +//! top-level block of the given function. +//! +//! There is one exception to the above rule: `llvm.dbg.declare` instruction +//! must be linked to the source location of the variable being declared. For +//! function parameters these `llvm.dbg.declare` instructions typically occur +//! in the middle of the prologue, however, they are ignored by LLVM's prologue +//! detection. The `create_argument_metadata()` and related functions take care +//! of linking the `llvm.dbg.declare` instructions to the correct source +//! locations even while source location emission is still disabled, so there is +//! no need to do anything special with source location handling here. +//! +//! ## Unique Type Identification In order for link-time optimization to work +//! properly, LLVM needs a unique type identifier that tells it across +//! compilation units which types are the same as others. This type identifier +//! is created by TypeMap::get_unique_type_id_of_type() using the following +//! algorithm: +//! +//! (1) Primitive types have their name as ID +//! (2) Structs, enums and traits have a multipart identifier +//! +//! (1) The first part is the SVH (strict version hash) of the crate they +//! were originally defined in +//! +//! (2) The second part is the ast::NodeId of the definition in their +//! original crate +//! +//! (3) The final part is a concatenation of the type IDs of their concrete +//! type arguments if they are generic types. +//! +//! (3) Tuple-, pointer and function types are structurally identified, which +//! means that they are equivalent if their component types are equivalent +//! (i.e. (int, int) is the same regardless in which crate it is used). +//! +//! This algorithm also provides a stable ID for types that are defined in one +//! crate but instantiated from metadata within another crate. We just have to +//! take care to always map crate and node IDs back to the original crate +//! context. +//! +//! As a side-effect these unique type IDs also help to solve a problem arising +//! from lifetime parameters. Since lifetime parameters are completely omitted +//! in debuginfo, more than one `ty::t` instance may map to the same debuginfo +//! type metadata, that is, some struct `Struct<'a>` may have N instantiations +//! with different concrete substitutions for `'a`, and thus there will be N +//! `ty::t` instances for the type `Struct<'a>` even though it is not generic +//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as +//! cheap identifier for type metadata---we have done this in the past, but it +//! led to unnecessary metadata duplication in the best case and LLVM assertions +//! in the worst. However, the unique type ID as described above *can* be used +//! as identifier. Since it is comparatively expensive to construct, though, +//! `ty::type_id()` is still used additionally as an optimization for cases +//! where the exact same type has been seen before (which is most of the time). use driver::config; use driver::config::{FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; diff --git a/src/librustc/middle/trans/expr.rs b/src/librustc/middle/trans/expr.rs index ac33f9bd1a87d..c8fe5db7a47b6 100644 --- a/src/librustc/middle/trans/expr.rs +++ b/src/librustc/middle/trans/expr.rs @@ -8,28 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * # Translation of Expressions - * - * Public entry points: - * - * - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression, - * storing the result into `dest`. This is the preferred form, if you - * can manage it. - * - * - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding - * `Datum` with the result. You can then store the datum, inspect - * the value, etc. This may introduce temporaries if the datum is a - * structural type. - * - * - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an - * expression and ensures that the result has a cleanup associated with it, - * creating a temporary stack slot if necessary. - * - * - `trans_local_var -> Datum`: looks up a local variable or upvar. - * - * See doc.rs for more comments. - */ +//! # Translation of Expressions +//! +//! Public entry points: +//! +//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression, +//! storing the result into `dest`. This is the preferred form, if you +//! can manage it. +//! +//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding +//! `Datum` with the result. You can then store the datum, inspect +//! the value, etc. This may introduce temporaries if the datum is a +//! structural type. +//! +//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an +//! expression and ensures that the result has a cleanup associated with it, +//! creating a temporary stack slot if necessary. +//! +//! - `trans_local_var -> Datum`: looks up a local variable or upvar. +//! +//! See doc.rs for more comments. #![allow(non_camel_case_types)] @@ -99,15 +97,12 @@ impl Dest { } } +/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` +/// but it may generate better optimized LLVM code. pub fn trans_into<'a>(bcx: &'a Block<'a>, expr: &ast::Expr, dest: Dest) -> &'a Block<'a> { - /*! - * This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` - * but it may generate better optimized LLVM code. - */ - let mut bcx = bcx; if bcx.tcx().adjustments.borrow().contains_key(&expr.id) { @@ -138,16 +133,13 @@ pub fn trans_into<'a>(bcx: &'a Block<'a>, bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id) } +/// Translates an expression, returning a datum (and new block) +/// encapsulating the result. When possible, it is preferred to +/// use `trans_into`, as that may avoid creating a temporary on +/// the stack. pub fn trans<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> DatumBlock<'a, Expr> { - /*! - * Translates an expression, returning a datum (and new block) - * encapsulating the result. When possible, it is preferred to - * use `trans_into`, as that may avoid creating a temporary on - * the stack. - */ - debug!("trans(expr={})", bcx.expr_to_str(expr)); let mut bcx = bcx; @@ -160,15 +152,12 @@ pub fn trans<'a>(bcx: &'a Block<'a>, return DatumBlock::new(bcx, datum); } +/// Helper for trans that apply adjustments from `expr` to `datum`, +/// which should be the unadjusted translation of `expr`. fn apply_adjustments<'a>(bcx: &'a Block<'a>, expr: &ast::Expr, datum: Datum) -> DatumBlock<'a, Expr> { - /*! - * Helper for trans that apply adjustments from `expr` to `datum`, - * which should be the unadjusted translation of `expr`. - */ - let mut bcx = bcx; let mut datum = datum; let adjustment = match bcx.tcx().adjustments.borrow().find_copy(&expr.id) { @@ -295,34 +284,28 @@ fn apply_adjustments<'a>(bcx: &'a Block<'a>, } } +/// Translates an expression in "lvalue" mode -- meaning that it +/// returns a reference to the memory that the expr represents. +/// +/// If this expression is an rvalue, this implies introducing a +/// temporary. In other words, something like `x().f` is +/// translated into roughly the equivalent of +/// +/// { tmp = x(); tmp.f } pub fn trans_to_lvalue<'a>(bcx: &'a Block<'a>, expr: &ast::Expr, name: &str) -> DatumBlock<'a, Lvalue> { - /*! - * Translates an expression in "lvalue" mode -- meaning that it - * returns a reference to the memory that the expr represents. - * - * If this expression is an rvalue, this implies introducing a - * temporary. In other words, something like `x().f` is - * translated into roughly the equivalent of - * - * { tmp = x(); tmp.f } - */ - let mut bcx = bcx; let datum = unpack_datum!(bcx, trans(bcx, expr)); return datum.to_lvalue_datum(bcx, name, expr.id); } +/// A version of `trans` that ignores adjustments. You almost +/// certainly do not want to call this directly. fn trans_unadjusted<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> DatumBlock<'a, Expr> { - /*! - * A version of `trans` that ignores adjustments. You almost - * certainly do not want to call this directly. - */ - let mut bcx = bcx; debug!("trans_unadjusted(expr={})", bcx.expr_to_str(expr)); @@ -870,14 +853,11 @@ fn trans_def_fn_unadjusted<'a>(bcx: &'a Block<'a>, DatumBlock::new(bcx, Datum::new(llfn, fn_ty, RvalueExpr(Rvalue::new(ByValue)))) } +/// Translates a reference to a local variable or argument. +/// This always results in an lvalue datum. pub fn trans_local_var<'a>(bcx: &'a Block<'a>, def: def::Def) -> Datum { - /*! - * Translates a reference to a local variable or argument. - * This always results in an lvalue datum. - */ - let _icx = push_ctxt("trans_local_var"); return match def { @@ -924,18 +904,15 @@ pub fn trans_local_var<'a>(bcx: &'a Block<'a>, } } +/// Helper for enumerating the field types of structs, enums, or records. +/// The optional node ID here is the node ID of the path identifying the enum +/// variant in use. If none, this cannot possibly an enum variant (so, if it +/// is and `node_id_opt` is none, this function fails). pub fn with_field_tys(tcx: &ty::ctxt, ty: ty::t, node_id_opt: Option, op: |ty::Disr, (&[ty::field])| -> R) -> R { - /*! - * Helper for enumerating the field types of structs, enums, or records. - * The optional node ID here is the node ID of the path identifying the enum - * variant in use. If none, this cannot possibly an enum variant (so, if it - * is and `node_id_opt` is none, this function fails). - */ - match ty::get(ty).sty { ty::ty_struct(did, ref substs) => { op(0, struct_fields(tcx, did, substs).as_slice()) @@ -1033,30 +1010,26 @@ fn trans_rec_or_struct<'a>( }) } -/** - * Information that `trans_adt` needs in order to fill in the fields - * of a struct copied from a base struct (e.g., from an expression - * like `Foo { a: b, ..base }`. - * - * Note that `fields` may be empty; the base expression must always be - * evaluated for side-effects. - */ +/// Information that `trans_adt` needs in order to fill in the fields +/// of a struct copied from a base struct (e.g., from an expression +/// like `Foo { a: b, ..base }`. +/// +/// Note that `fields` may be empty; the base expression must always be +/// evaluated for side-effects. struct StructBaseInfo { /// The base expression; will be evaluated after all explicit fields. expr: Gc, /// The indices of fields to copy paired with their types. fields: Vec<(uint, ty::t)> } -/** - * Constructs an ADT instance: - * - * - `fields` should be a list of field indices paired with the - * expression to store into that field. The initializers will be - * evaluated in the order specified by `fields`. - * - * - `optbase` contains information on the base struct (if any) from - * which remaining fields are copied; see comments on `StructBaseInfo`. - */ +/// Constructs an ADT instance: +/// +/// - `fields` should be a list of field indices paired with the +/// expression to store into that field. The initializers will be +/// evaluated in the order specified by `fields`. +/// +/// - `optbase` contains information on the base struct (if any) from +/// which remaining fields are copied; see comments on `StructBaseInfo`. fn trans_adt<'a>( bcx: &'a Block<'a>, repr: &adt::Repr, @@ -1826,24 +1799,21 @@ fn deref_once<'a>(bcx: &'a Block<'a>, return r; + /// We microoptimize derefs of owned pointers a bit here. + /// Basically, the idea is to make the deref of an rvalue + /// result in an rvalue. This helps to avoid intermediate stack + /// slots in the resulting LLVM. The idea here is that, if the + /// `Box` pointer is an rvalue, then we can schedule a *shallow* + /// free of the `Box` pointer, and then return a ByRef rvalue + /// into the pointer. Because the free is shallow, it is legit + /// to return an rvalue, because we know that the contents are + /// not yet scheduled to be freed. The language rules ensure that the + /// contents will be used (or moved) before the free occurs. fn deref_owned_pointer<'a>(bcx: &'a Block<'a>, expr: &ast::Expr, datum: Datum, content_ty: ty::t) -> DatumBlock<'a, Expr> { - /*! - * We microoptimize derefs of owned pointers a bit here. - * Basically, the idea is to make the deref of an rvalue - * result in an rvalue. This helps to avoid intermediate stack - * slots in the resulting LLVM. The idea here is that, if the - * `Box` pointer is an rvalue, then we can schedule a *shallow* - * free of the `Box` pointer, and then return a ByRef rvalue - * into the pointer. Because the free is shallow, it is legit - * to return an rvalue, because we know that the contents are - * not yet scheduled to be freed. The language rules ensure that the - * contents will be used (or moved) before the free occurs. - */ - match datum.kind { RvalueExpr(Rvalue { mode: ByRef }) => { let scope = cleanup::temporary_scope(bcx.tcx(), expr.id); diff --git a/src/librustc/middle/trans/foreign.rs b/src/librustc/middle/trans/foreign.rs index 9d7261f809414..f06a0e5955c22 100644 --- a/src/librustc/middle/trans/foreign.rs +++ b/src/librustc/middle/trans/foreign.rs @@ -177,13 +177,10 @@ pub fn register_static(ccx: &CrateContext, } } +/// Registers a foreign function found in a library. +/// Just adds a LLVM global. pub fn register_foreign_item_fn(ccx: &CrateContext, abi: Abi, fty: ty::t, name: &str, span: Option) -> ValueRef { - /*! - * Registers a foreign function found in a library. - * Just adds a LLVM global. - */ - debug!("register_foreign_item_fn(abi={}, \ ty={}, \ name={})", @@ -234,6 +231,20 @@ pub fn register_foreign_item_fn(ccx: &CrateContext, abi: Abi, fty: ty::t, llfn } +/// Prepares a call to a native function. This requires adapting +/// from the Rust argument passing rules to the native rules. +/// +/// # Parameters +/// +/// - `callee_ty`: Rust type for the function we are calling +/// - `llfn`: the function pointer we are calling +/// - `llretptr`: where to store the return value of the function +/// - `llargs_rust`: a list of the argument values, prepared +/// as they would be if calling a Rust function +/// - `passed_arg_tys`: Rust type for the arguments. Normally we +/// can derive these from callee_ty but in the case of variadic +/// functions passed_arg_tys will include the Rust type of all +/// the arguments including the ones not specified in the fn's signature. pub fn trans_native_call<'a>( bcx: &'a Block<'a>, callee_ty: ty::t, @@ -242,23 +253,6 @@ pub fn trans_native_call<'a>( llargs_rust: &[ValueRef], passed_arg_tys: Vec ) -> &'a Block<'a> { - /*! - * Prepares a call to a native function. This requires adapting - * from the Rust argument passing rules to the native rules. - * - * # Parameters - * - * - `callee_ty`: Rust type for the function we are calling - * - `llfn`: the function pointer we are calling - * - `llretptr`: where to store the return value of the function - * - `llargs_rust`: a list of the argument values, prepared - * as they would be if calling a Rust function - * - `passed_arg_tys`: Rust type for the arguments. Normally we - * can derive these from callee_ty but in the case of variadic - * functions passed_arg_tys will include the Rust type of all - * the arguments including the ones not specified in the fn's signature. - */ - let ccx = bcx.ccx(); let tcx = bcx.tcx(); @@ -834,16 +828,13 @@ pub fn link_name(i: &ast::ForeignItem) -> InternedString { } } +/// The ForeignSignature is the LLVM types of the arguments/return type +/// of a function. Note that these LLVM types are not quite the same +/// as the LLVM types would be for a native Rust function because foreign +/// functions just plain ignore modes. They also don't pass aggregate +/// values by pointer like we do. fn foreign_signature(ccx: &CrateContext, fn_sig: &ty::FnSig, arg_tys: &[ty::t]) -> LlvmSignature { - /*! - * The ForeignSignature is the LLVM types of the arguments/return type - * of a function. Note that these LLVM types are not quite the same - * as the LLVM types would be for a native Rust function because foreign - * functions just plain ignore modes. They also don't pass aggregate - * values by pointer like we do. - */ - let llarg_tys = arg_tys.iter().map(|&arg| arg_type_of(ccx, arg)).collect(); let llret_ty = type_of::arg_type_of(ccx, fn_sig.output); LlvmSignature { diff --git a/src/librustc/middle/trans/meth.rs b/src/librustc/middle/trans/meth.rs index e1d43c5240059..f7820a6152c39 100644 --- a/src/librustc/middle/trans/meth.rs +++ b/src/librustc/middle/trans/meth.rs @@ -40,12 +40,11 @@ use syntax::abi::Rust; use syntax::parse::token; use syntax::{ast, ast_map, visit}; -/** -The main "translation" pass for methods. Generates code -for non-monomorphized methods only. Other methods will -be generated once they are invoked with specific type parameters, -see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`. -*/ + +/// The main "translation" pass for methods. Generates code +/// for non-monomorphized methods only. Other methods will +/// be generated once they are invoked with specific type parameters, +/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`. pub fn trans_impl(ccx: &CrateContext, name: ast::Ident, methods: &[Gc], @@ -259,29 +258,25 @@ fn trans_monomorphized_callee<'a>(bcx: &'a Block<'a>, } } +/// Creates a concatenated set of substitutions which includes +/// those from the impl and those from the method. This are +/// some subtle complications here. Statically, we have a list +/// of type parameters like `[T0, T1, T2, M1, M2, M3]` where +/// `Tn` are type parameters that appear on the receiver. For +/// example, if the receiver is a method parameter `A` with a +/// bound like `trait` then `Tn` would be `[B,C,D]`. +/// +/// The weird part is that the type `A` might now be bound to +/// any other type, such as `foo`. In that case, the vector +/// we want is: `[X, M1, M2, M3]`. Therefore, what we do now is +/// to slice off the method type parameters and append them to +/// the type parameters from the type that the receiver is +/// mapped to. fn combine_impl_and_methods_tps(bcx: &Block, node: ExprOrMethodCall, rcvr_substs: subst::Substs, rcvr_origins: typeck::vtable_res) - -> (subst::Substs, typeck::vtable_res) -{ - /*! - * Creates a concatenated set of substitutions which includes - * those from the impl and those from the method. This are - * some subtle complications here. Statically, we have a list - * of type parameters like `[T0, T1, T2, M1, M2, M3]` where - * `Tn` are type parameters that appear on the receiver. For - * example, if the receiver is a method parameter `A` with a - * bound like `trait` then `Tn` would be `[B,C,D]`. - * - * The weird part is that the type `A` might now be bound to - * any other type, such as `foo`. In that case, the vector - * we want is: `[X, M1, M2, M3]`. Therefore, what we do now is - * to slice off the method type parameters and append them to - * the type parameters from the type that the receiver is - * mapped to. - */ - + -> (subst::Substs, typeck::vtable_res) { let ccx = bcx.ccx(); let vtable_key = match node { @@ -313,21 +308,18 @@ fn combine_impl_and_methods_tps(bcx: &Block, (ty_substs, vtables) } +/// Create a method callee where the method is coming from a trait +/// object (e.g., Box type). In this case, we must pull the fn +/// pointer out of the vtable that is packaged up with the object. +/// Objects are represented as a pair, so we first evaluate the self +/// expression and then extract the self data and vtable out of the +/// pair. fn trans_trait_callee<'a>(bcx: &'a Block<'a>, method_ty: ty::t, n_method: uint, self_expr: &ast::Expr, arg_cleanup_scope: cleanup::ScopeId) -> Callee<'a> { - /*! - * Create a method callee where the method is coming from a trait - * object (e.g., Box type). In this case, we must pull the fn - * pointer out of the vtable that is packaged up with the object. - * Objects are represented as a pair, so we first evaluate the self - * expression and then extract the self data and vtable out of the - * pair. - */ - let _icx = push_ctxt("meth::trans_trait_callee"); let mut bcx = bcx; @@ -357,16 +349,13 @@ fn trans_trait_callee<'a>(bcx: &'a Block<'a>, trans_trait_callee_from_llval(bcx, method_ty, n_method, llval) } +/// Same as `trans_trait_callee()` above, except that it is given +/// a by-ref pointer to the object pair. pub fn trans_trait_callee_from_llval<'a>(bcx: &'a Block<'a>, callee_ty: ty::t, n_method: uint, llpair: ValueRef) -> Callee<'a> { - /*! - * Same as `trans_trait_callee()` above, except that it is given - * a by-ref pointer to the object pair. - */ - let _icx = push_ctxt("meth::trans_trait_callee"); let ccx = bcx.ccx(); @@ -514,18 +503,15 @@ fn emit_vtable_methods(bcx: &Block, }).collect() } +/// Generates the code to convert from a pointer (`Box`, `&T`, etc) +/// into an object (`Box`, `&Trait`, etc). This means creating a +/// pair where the first word is the vtable and the second word is +/// the pointer. pub fn trans_trait_cast<'a>(bcx: &'a Block<'a>, datum: Datum, id: ast::NodeId, dest: expr::Dest) -> &'a Block<'a> { - /*! - * Generates the code to convert from a pointer (`Box`, `&T`, etc) - * into an object (`Box`, `&Trait`, etc). This means creating a - * pair where the first word is the vtable and the second word is - * the pointer. - */ - let mut bcx = bcx; let _icx = push_ctxt("meth::trans_cast"); diff --git a/src/librustc/middle/trans/tvec.rs b/src/librustc/middle/trans/tvec.rs index 65bf0b8500821..8d353cfaa7482 100644 --- a/src/librustc/middle/trans/tvec.rs +++ b/src/librustc/middle/trans/tvec.rs @@ -112,18 +112,15 @@ pub fn trans_fixed_vstore<'a>( }; } +/// &[...] allocates memory on the stack and writes the values into it, +/// returning a slice (pair of ptr, len). &"..." is similar except that +/// the memory can be statically allocated. pub fn trans_slice_vstore<'a>( bcx: &'a Block<'a>, vstore_expr: &ast::Expr, content_expr: &ast::Expr, dest: expr::Dest) -> &'a Block<'a> { - /*! - * &[...] allocates memory on the stack and writes the values into it, - * returning a slice (pair of ptr, len). &"..." is similar except that - * the memory can be statically allocated. - */ - let fcx = bcx.fcx; let ccx = fcx.ccx; let mut bcx = bcx; @@ -188,18 +185,15 @@ pub fn trans_slice_vstore<'a>( return bcx; } +/// Literal strings translate to slices into static memory. This is +/// different from trans_slice_vstore() above because it does need to copy +/// the content anywhere. pub fn trans_lit_str<'a>( bcx: &'a Block<'a>, lit_expr: &ast::Expr, str_lit: InternedString, dest: Dest) -> &'a Block<'a> { - /*! - * Literal strings translate to slices into static memory. This is - * different from trans_slice_vstore() above because it does need to copy - * the content anywhere. - */ - debug!("trans_lit_str(lit_expr={}, dest={})", bcx.expr_to_str(lit_expr), dest.to_str(bcx.ccx())); @@ -223,15 +217,12 @@ pub fn trans_lit_str<'a>( } +/// ~[...] and "...".to_string() allocate boxes in the exchange heap and write +/// the array elements into them. pub fn trans_uniq_vstore<'a>(bcx: &'a Block<'a>, vstore_expr: &ast::Expr, content_expr: &ast::Expr) -> DatumBlock<'a, Expr> { - /*! - * ~[...] and "...".to_string() allocate boxes in the exchange heap and write - * the array elements into them. - */ - debug!("trans_uniq_vstore(vstore_expr={})", bcx.expr_to_str(vstore_expr)); let fcx = bcx.fcx; let ccx = fcx.ccx; @@ -450,16 +441,13 @@ pub fn elements_required(bcx: &Block, content_expr: &ast::Expr) -> uint { } } +/// Converts a fixed-length vector into the slice pair. +/// The vector should be stored in `llval` which should be by ref. pub fn get_fixed_base_and_byte_len(bcx: &Block, llval: ValueRef, unit_ty: ty::t, vec_length: uint) -> (ValueRef, ValueRef) { - /*! - * Converts a fixed-length vector into the slice pair. - * The vector should be stored in `llval` which should be by ref. - */ - let ccx = bcx.ccx(); let vt = vec_types(bcx, unit_ty); @@ -468,18 +456,15 @@ pub fn get_fixed_base_and_byte_len(bcx: &Block, (base, len) } +/// Converts a vector into the slice pair. The vector should be +/// stored in `llval` which should be by-reference. If you have a +/// datum, you would probably prefer to call +/// `Datum::get_base_and_len()` which will handle any conversions +/// for you. pub fn get_base_and_len(bcx: &Block, llval: ValueRef, vec_ty: ty::t) -> (ValueRef, ValueRef) { - /*! - * Converts a vector into the slice pair. The vector should be - * stored in `llval` which should be by-reference. If you have a - * datum, you would probably prefer to call - * `Datum::get_base_and_len()` which will handle any conversions - * for you. - */ - let ccx = bcx.ccx(); match ty::get(vec_ty).sty { diff --git a/src/librustc/middle/trans/type_.rs b/src/librustc/middle/trans/type_.rs index b10f6eda88053..9461ca23767d4 100644 --- a/src/librustc/middle/trans/type_.rs +++ b/src/librustc/middle/trans/type_.rs @@ -32,9 +32,7 @@ macro_rules! ty ( ($e:expr) => ( Type::from_ref(unsafe { $e })) ) -/** - * Wrapper for LLVM TypeRef - */ +/// Wrapper for LLVM TypeRef impl Type { #[inline(always)] pub fn from_ref(r: TypeRef) -> Type { diff --git a/src/librustc/middle/trans/value.rs b/src/librustc/middle/trans/value.rs index e627b859f4295..0704c324ec6eb 100644 --- a/src/librustc/middle/trans/value.rs +++ b/src/librustc/middle/trans/value.rs @@ -24,9 +24,7 @@ macro_rules! opt_val ( ($e:expr) => ( } )) -/** - * Wrapper for LLVM ValueRef - */ +/// Wrapper for LLVM ValueRef impl Value { /// Returns the native ValueRef pub fn get(&self) -> ValueRef { @@ -126,9 +124,7 @@ impl Value { pub struct Use(UseRef); -/** - * Wrapper for LLVM UseRef - */ +/// Wrapper for LLVM UseRef impl Use { pub fn get(&self) -> UseRef { let Use(v) = *self; v diff --git a/src/librustc/middle/ty.rs b/src/librustc/middle/ty.rs index 141731ded9562..0ee5578602357 100644 --- a/src/librustc/middle/ty.rs +++ b/src/librustc/middle/ty.rs @@ -184,8 +184,8 @@ impl Hash for intern_key { } pub enum ast_ty_to_ty_cache_entry { - atttce_unresolved, /* not resolved yet */ - atttce_resolved(t) /* resolved to a type, irrespective of region */ + atttce_unresolved, // not resolved yet + atttce_resolved(t) // resolved to a type, irrespective of region } #[deriving(Clone, PartialEq, Decodable, Encodable)] @@ -208,8 +208,8 @@ pub enum AutoAdjustment { AutoDerefRef(AutoDerefRef), AutoObject(ty::TraitStore, ty::BuiltinBounds, - ast::DefId, /* Trait ID */ - subst::Substs /* Trait substitutions */) + ast::DefId, // Trait ID + subst::Substs) // Trait substitutions } #[deriving(Clone, Decodable, Encodable)] @@ -452,18 +452,16 @@ pub struct ClosureTy { pub sig: FnSig, } -/** - * Signature of a function type, which I have arbitrarily - * decided to use to refer to the input/output types. - * - * - `binder_id` is the node id where this fn type appeared; - * it is used to identify all the bound regions appearing - * in the input/output types that are bound by this fn type - * (vs some enclosing or enclosed fn type) - * - `inputs` is the list of arguments and their modes. - * - `output` is the return type. - * - `variadic` indicates whether this is a varidic function. (only true for foreign fns) - */ +/// Signature of a function type, which I have arbitrarily +/// decided to use to refer to the input/output types. +/// +/// - `binder_id` is the node id where this fn type appeared; +/// it is used to identify all the bound regions appearing +/// in the input/output types that are bound by this fn type +/// (vs some enclosing or enclosed fn type) +/// - `inputs` is the list of arguments and their modes. +/// - `output` is the return type. +/// - `variadic` indicates whether this is a varidic function. (only true for foreign fns) #[deriving(Clone, PartialEq, Eq, Hash)] pub struct FnSig { pub binder_id: ast::NodeId, @@ -519,11 +517,9 @@ pub enum Region { ReEmpty, } -/** - * Upvars do not get their own node-id. Instead, we use the pair of - * the original var id (that is, the root variable that is referenced - * by the upvar) and the id of the closure expression. - */ +/// Upvars do not get their own node-id. Instead, we use the pair of +/// the original var id (that is, the root variable that is referenced +/// by the upvar) and the id of the closure expression. #[deriving(Clone, PartialEq, Eq, Hash)] pub struct UpvarId { pub var_id: ast::NodeId, @@ -576,55 +572,53 @@ pub enum BorrowKind { MutBorrow } -/** - * Information describing the borrowing of an upvar. This is computed - * during `typeck`, specifically by `regionck`. The general idea is - * that the compiler analyses treat closures like: - * - * let closure: &'e fn() = || { - * x = 1; // upvar x is assigned to - * use(y); // upvar y is read - * foo(&z); // upvar z is borrowed immutably - * }; - * - * as if they were "desugared" to something loosely like: - * - * struct Vars<'x,'y,'z> { x: &'x mut int, - * y: &'y const int, - * z: &'z int } - * let closure: &'e fn() = { - * fn f(env: &Vars) { - * *env.x = 1; - * use(*env.y); - * foo(env.z); - * } - * let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x, - * y: &'y const y, - * z: &'z z }; - * (env, f) - * }; - * - * This is basically what happens at runtime. The closure is basically - * an existentially quantified version of the `(env, f)` pair. - * - * This data structure indicates the region and mutability of a single - * one of the `x...z` borrows. - * - * It may not be obvious why each borrowed variable gets its own - * lifetime (in the desugared version of the example, these are indicated - * by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition). - * Each such lifetime must encompass the lifetime `'e` of the closure itself, - * but need not be identical to it. The reason that this makes sense: - * - * - Callers are only permitted to invoke the closure, and hence to - * use the pointers, within the lifetime `'e`, so clearly `'e` must - * be a sublifetime of `'x...'z`. - * - The closure creator knows which upvars were borrowed by the closure - * and thus `x...z` will be reserved for `'x...'z` respectively. - * - Through mutation, the borrowed upvars can actually escape - * the closure, so sometimes it is necessary for them to be larger - * than the closure lifetime itself. - */ +/// Information describing the borrowing of an upvar. This is computed +/// during `typeck`, specifically by `regionck`. The general idea is +/// that the compiler analyses treat closures like: +/// +/// let closure: &'e fn() = || { +/// x = 1; // upvar x is assigned to +/// use(y); // upvar y is read +/// foo(&z); // upvar z is borrowed immutably +/// }; +/// +/// as if they were "desugared" to something loosely like: +/// +/// struct Vars<'x,'y,'z> { x: &'x mut int, +/// y: &'y const int, +/// z: &'z int } +/// let closure: &'e fn() = { +/// fn f(env: &Vars) { +/// *env.x = 1; +/// use(*env.y); +/// foo(env.z); +/// } +/// let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x, +/// y: &'y const y, +/// z: &'z z }; +/// (env, f) +/// }; +/// +/// This is basically what happens at runtime. The closure is basically +/// an existentially quantified version of the `(env, f)` pair. +/// +/// This data structure indicates the region and mutability of a single +/// one of the `x...z` borrows. +/// +/// It may not be obvious why each borrowed variable gets its own +/// lifetime (in the desugared version of the example, these are indicated +/// by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition). +/// Each such lifetime must encompass the lifetime `'e` of the closure itself, +/// but need not be identical to it. The reason that this makes sense: +/// +/// - Callers are only permitted to invoke the closure, and hence to +/// use the pointers, within the lifetime `'e`, so clearly `'e` must +/// be a sublifetime of `'x...'z`. +/// - The closure creator knows which upvars were borrowed by the closure +/// and thus `x...z` will be reserved for `'x...'z` respectively. +/// - Through mutation, the borrowed upvars can actually escape +/// the closure, so sometimes it is necessary for them to be larger +/// than the closure lifetime itself. #[deriving(PartialEq, Clone)] pub struct UpvarBorrow { pub kind: BorrowKind, @@ -1639,11 +1633,9 @@ pub fn type_is_unique(ty: t) -> bool { } } -/* - A scalar type is one that denotes an atomic datum, with no sub-components. - (A ty_ptr is scalar because it represents a non-managed pointer, so its - contents are abstract to rustc.) -*/ +// A scalar type is one that denotes an atomic datum, with no sub-components. +// (A ty_ptr is scalar because it represents a non-managed pointer, so its +// contents are abstract to rustc.) pub fn type_is_scalar(ty: t) -> bool { match get(ty).sty { ty_nil | ty_bool | ty_char | ty_int(_) | ty_float(_) | ty_uint(_) | @@ -1738,18 +1730,16 @@ fn type_needs_unwind_cleanup_(cx: &ctxt, ty: t, return needs_unwind_cleanup; } -/** - * Type contents is how the type checker reasons about kinds. - * They track what kinds of things are found within a type. You can - * think of them as kind of an "anti-kind". They track the kinds of values - * and thinks that are contained in types. Having a larger contents for - * a type tends to rule that type *out* from various kinds. For example, - * a type that contains a reference is not sendable. - * - * The reason we compute type contents and not kinds is that it is - * easier for me (nmatsakis) to think about what is contained within - * a type than to think about what is *not* contained within a type. - */ +/// Type contents is how the type checker reasons about kinds. +/// They track what kinds of things are found within a type. You can +/// think of them as kind of an "anti-kind". They track the kinds of values +/// and thinks that are contained in types. Having a larger contents for +/// a type tends to rule that type *out* from various kinds. For example, +/// a type that contains a reference is not sendable. +/// +/// The reason we compute type contents and not kinds is that it is +/// easier for me (nmatsakis) to think about what is contained within +/// a type than to think about what is *not* contained within a type. pub struct TypeContents { pub bits: u64 } @@ -1891,38 +1881,30 @@ impl TypeContents { self.intersects(TC::NeedsDrop) } + /// Includes only those bits that still apply + /// when indirected through a `Box` pointer pub fn owned_pointer(&self) -> TypeContents { - /*! - * Includes only those bits that still apply - * when indirected through a `Box` pointer - */ TC::OwnsOwned | ( *self & (TC::OwnsAll | TC::ReachesAll)) } + /// Includes only those bits that still apply + /// when indirected through a reference (`&`) pub fn reference(&self, bits: TypeContents) -> TypeContents { - /*! - * Includes only those bits that still apply - * when indirected through a reference (`&`) - */ bits | ( *self & TC::ReachesAll) } + /// Includes only those bits that still apply + /// when indirected through a managed pointer (`@`) pub fn managed_pointer(&self) -> TypeContents { - /*! - * Includes only those bits that still apply - * when indirected through a managed pointer (`@`) - */ TC::Managed | ( *self & TC::ReachesAll) } + /// Includes only those bits that still apply + /// when indirected through an unsafe pointer (`*`) pub fn unsafe_pointer(&self) -> TypeContents { - /*! - * Includes only those bits that still apply - * when indirected through an unsafe pointer (`*`) - */ *self & TC::ReachesAll } @@ -2150,14 +2132,11 @@ pub fn type_contents(cx: &ctxt, ty: t) -> TypeContents { } } + /// Type contents due to containing a reference + /// with the region `region` and borrow kind `bk` fn borrowed_contents(region: ty::Region, mutbl: ast::Mutability) -> TypeContents { - /*! - * Type contents due to containing a reference - * with the region `region` and borrow kind `bk` - */ - let b = match mutbl { ast::MutMutable => TC::ReachesMutable | TC::OwnsAffine, ast::MutImmutable => TC::None, @@ -2731,20 +2710,16 @@ pub fn expr_ty_opt(cx: &ctxt, expr: &ast::Expr) -> Option { return node_id_to_type_opt(cx, expr.id); } +/// Returns the type of `expr`, considering any `AutoAdjustment` +/// entry recorded for that expression. +/// +/// It would almost certainly be better to store the adjusted ty in with +/// the `AutoAdjustment`, but I opted not to do this because it would +/// require serializing and deserializing the type and, although that's not +/// hard to do, I just hate that code so much I didn't want to touch it +/// unless it was to fix it properly, which seemed a distraction from the +/// task at hand! -nmatsakis pub fn expr_ty_adjusted(cx: &ctxt, expr: &ast::Expr) -> t { - /*! - * - * Returns the type of `expr`, considering any `AutoAdjustment` - * entry recorded for that expression. - * - * It would almost certainly be better to store the adjusted ty in with - * the `AutoAdjustment`, but I opted not to do this because it would - * require serializing and deserializing the type and, although that's not - * hard to do, I just hate that code so much I didn't want to touch it - * unless it was to fix it properly, which seemed a distraction from the - * task at hand! -nmatsakis - */ - adjust_ty(cx, expr.span, expr.id, expr_ty(cx, expr), cx.adjustments.borrow().find(&expr.id), |method_call| cx.method_map.borrow().find(&method_call).map(|method| method.ty)) @@ -2790,6 +2765,7 @@ pub fn local_var_name_str(cx: &ctxt, id: NodeId) -> InternedString { } } +/// See `expr_ty_adjusted` pub fn adjust_ty(cx: &ctxt, span: Span, expr_id: ast::NodeId, @@ -2797,8 +2773,6 @@ pub fn adjust_ty(cx: &ctxt, adjustment: Option<&AutoAdjustment>, method_type: |typeck::MethodCall| -> Option) -> ty::t { - /*! See `expr_ty_adjusted` */ - return match adjustment { Some(adjustment) => { match *adjustment { @@ -3253,16 +3227,13 @@ pub fn ty_sort_str(cx: &ctxt, t: t) -> String { } } +/// Explains the source of a type err in a short, +/// human readable way. This is meant to be placed in +/// parentheses after some larger message. You should +/// also invoke `note_and_explain_type_err()` afterwards +/// to present additional details, particularly when +/// it comes to lifetime-related errors. pub fn type_err_to_str(cx: &ctxt, err: &type_err) -> String { - /*! - * - * Explains the source of a type err in a short, - * human readable way. This is meant to be placed in - * parentheses after some larger message. You should - * also invoke `note_and_explain_type_err()` afterwards - * to present additional details, particularly when - * it comes to lifetime-related errors. */ - fn tstore_to_closure(s: &TraitStore) -> String { match s { &UniqTraitStore => "proc".to_string(), @@ -3483,21 +3454,18 @@ pub fn trait_ref_supertraits(cx: &ctxt, trait_ref: &ty::TraitRef) -> Vec( descr: &str, def_id: ast::DefId, map: &mut DefIdMap, load_external: || -> V) -> V { - /*! - * Helper for looking things up in the various maps - * that are populated during typeck::collect (e.g., - * `cx.methods`, `cx.tcache`, etc). All of these share - * the pattern that if the id is local, it should have - * been loaded into the map by the `typeck::collect` phase. - * If the def-id is external, then we have to go consult - * the crate loading code (and cache the result for the future). - */ - match map.find_copy(&def_id) { Some(v) => { return v; } None => { } @@ -3726,8 +3694,8 @@ impl DtorKind { } } -/* If struct_id names a struct with a dtor, return Some(the dtor's id). - Otherwise return none. */ +// If struct_id names a struct with a dtor, return Some(the dtor's id). +// Otherwise return none. pub fn ty_dtor(cx: &ctxt, struct_id: DefId) -> DtorKind { match cx.destructor_for_type.borrow().find(&struct_id) { Some(&method_def_id) => { @@ -3771,11 +3739,9 @@ pub fn enum_variants(cx: &ctxt, id: ast::DefId) -> Rc>> { let result = if ast::LOCAL_CRATE != id.krate { Rc::new(csearch::get_enum_variants(cx, id)) } else { - /* - Although both this code and check_enum_variants in typeck/check - call eval_const_expr, it should never get called twice for the same - expr, since check_enum_variants also updates the enum_var_cache - */ + // Although both this code and check_enum_variants in typeck/check + // call eval_const_expr, it should never get called twice for the same + // expr, since check_enum_variants also updates the enum_var_cache match cx.map.get(id.node) { ast_map::NodeItem(item) => { match item.node { @@ -4628,14 +4594,12 @@ impl Variance { } } +/// See `ParameterEnvironment` struct def'n for details pub fn construct_parameter_environment( tcx: &ctxt, generics: &ty::Generics, free_id: ast::NodeId) - -> ParameterEnvironment -{ - /*! See `ParameterEnvironment` struct def'n for details */ - + -> ParameterEnvironment { // // Construct the free substs. // diff --git a/src/librustc/middle/typeck/astconv.rs b/src/librustc/middle/typeck/astconv.rs index 286cb5364758a..1381cb0382b57 100644 --- a/src/librustc/middle/typeck/astconv.rs +++ b/src/librustc/middle/typeck/astconv.rs @@ -8,46 +8,44 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Conversion from AST representation of types to the ty.rs - * representation. The main routine here is `ast_ty_to_ty()`: each use - * is parameterized by an instance of `AstConv` and a `RegionScope`. - * - * The parameterization of `ast_ty_to_ty()` is because it behaves - * somewhat differently during the collect and check phases, - * particularly with respect to looking up the types of top-level - * items. In the collect phase, the crate context is used as the - * `AstConv` instance; in this phase, the `get_item_ty()` function - * triggers a recursive call to `ty_of_item()` (note that - * `ast_ty_to_ty()` will detect recursive types and report an error). - * In the check phase, when the FnCtxt is used as the `AstConv`, - * `get_item_ty()` just looks up the item type in `tcx.tcache`. - * - * The `RegionScope` trait controls what happens when the user does - * not specify a region in some location where a region is required - * (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`). - * See the `rscope` module for more details. - * - * Unlike the `AstConv` trait, the region scope can change as we descend - * the type. This is to accommodate the fact that (a) fn types are binding - * scopes and (b) the default region may change. To understand case (a), - * consider something like: - * - * type foo = { x: &a.int, y: |&a.int| } - * - * The type of `x` is an error because there is no region `a` in scope. - * In the type of `y`, however, region `a` is considered a bound region - * as it does not already appear in scope. - * - * Case (b) says that if you have a type: - * type foo<'a> = ...; - * type bar = fn(&foo, &a.foo) - * The fully expanded version of type bar is: - * type bar = fn(&'foo &, &a.foo<'a>) - * Note that the self region for the `foo` defaulted to `&` in the first - * case but `&a` in the second. Basically, defaults that appear inside - * an rptr (`&r.T`) use the region `r` that appears in the rptr. - */ +//! Conversion from AST representation of types to the ty.rs +//! representation. The main routine here is `ast_ty_to_ty()`: each use +//! is parameterized by an instance of `AstConv` and a `RegionScope`. +//! +//! The parameterization of `ast_ty_to_ty()` is because it behaves +//! somewhat differently during the collect and check phases, +//! particularly with respect to looking up the types of top-level +//! items. In the collect phase, the crate context is used as the +//! `AstConv` instance; in this phase, the `get_item_ty()` function +//! triggers a recursive call to `ty_of_item()` (note that +//! `ast_ty_to_ty()` will detect recursive types and report an error). +//! In the check phase, when the FnCtxt is used as the `AstConv`, +//! `get_item_ty()` just looks up the item type in `tcx.tcache`. +//! +//! The `RegionScope` trait controls what happens when the user does +//! not specify a region in some location where a region is required +//! (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`). +//! See the `rscope` module for more details. +//! +//! Unlike the `AstConv` trait, the region scope can change as we descend +//! the type. This is to accommodate the fact that (a) fn types are binding +//! scopes and (b) the default region may change. To understand case (a), +//! consider something like: +//! +//! type foo = { x: &a.int, y: |&a.int| } +//! +//! The type of `x` is an error because there is no region `a` in scope. +//! In the type of `y`, however, region `a` is considered a bound region +//! as it does not already appear in scope. +//! +//! Case (b) says that if you have a type: +//! type foo<'a> = ...; +//! type bar = fn(&foo, &a.foo) +//! The fully expanded version of type bar is: +//! type bar = fn(&'foo &, &a.foo<'a>) +//! Note that the self region for the `foo` defaulted to `&` in the first +//! case but `&a` in the second. Basically, defaults that appear inside +//! an rptr (`&r.T`) use the region `r` that appears in the rptr. use middle::const_eval; use middle::def; @@ -148,19 +146,15 @@ pub fn opt_ast_region_to_region( r } +/// Given a path `path` that refers to an item `I` with the +/// declared generics `decl_generics`, returns an appropriate +/// set of substitutions for this particular reference to `I`. fn ast_path_substs( this: &AC, rscope: &RS, decl_generics: &ty::Generics, self_ty: Option, - path: &ast::Path) -> Substs -{ - /*! - * Given a path `path` that refers to an item `I` with the - * declared generics `decl_generics`, returns an appropriate - * set of substitutions for this particular reference to `I`. - */ - + path: &ast::Path) -> Substs { let tcx = this.tcx(); // ast_path_substs() is only called to convert paths that are diff --git a/src/librustc/middle/typeck/check/method.rs b/src/librustc/middle/typeck/check/method.rs index 6e44665fb3b46..a41c5319ad800 100644 --- a/src/librustc/middle/typeck/check/method.rs +++ b/src/librustc/middle/typeck/check/method.rs @@ -8,77 +8,72 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -# Method lookup - -Method lookup can be rather complex due to the interaction of a number -of factors, such as self types, autoderef, trait lookup, etc. The -algorithm is divided into two parts: candidate collection and -candidate selection. - -## Candidate collection - -A `Candidate` is a method item that might plausibly be the method -being invoked. Candidates are grouped into two kinds, inherent and -extension. Inherent candidates are those that are derived from the -type of the receiver itself. So, if you have a receiver of some -nominal type `Foo` (e.g., a struct), any methods defined within an -impl like `impl Foo` are inherent methods. Nothing needs to be -imported to use an inherent method, they are associated with the type -itself (note that inherent impls can only be defined in the same -module as the type itself). - -Inherent candidates are not always derived from impls. If you have a -trait instance, such as a value of type `Box`, then the trait -methods (`to_str()`, in this case) are inherently associated with it. -Another case is type parameters, in which case the methods of their -bounds are inherent. - -Extension candidates are derived from imported traits. If I have the -trait `ToStr` imported, and I call `to_str()` on a value of type `T`, -then we will go off to find out whether there is an impl of `ToStr` -for `T`. These kinds of method calls are called "extension methods". -They can be defined in any module, not only the one that defined `T`. -Furthermore, you must import the trait to call such a method. - -For better or worse, we currently give weight to inherent methods over -extension methods during candidate selection (below). - -## Candidate selection - -Once we know the set of candidates, we can go off and try to select -which one is actually being called. We do this by taking the type of -the receiver, let's call it R, and checking whether it matches against -the expected receiver type for each of the collected candidates. We -first check for inherent candidates and see whether we get exactly one -match (zero means keep searching, more than one is an error). If so, -we return that as the candidate. Otherwise we search the extension -candidates in the same way. - -If find no matching candidate at all, we proceed to auto-deref the -receiver type and search again. We keep doing that until we cannot -auto-deref any longer. At each step, we also check for candidates -based on "autoptr", which if the current type is `T`, checks for `&mut -T`, `&const T`, and `&T` receivers. Finally, at the very end, we will -also try autoslice, which converts `~[]` to `&[]` (there is no point -at trying autoslice earlier, because no autoderefable type is also -sliceable). - -## Why two phases? - -You might wonder why we first collect the candidates and then select. -Both the inherent candidate collection and the candidate selection -proceed by progressively deref'ing the receiver type, after all. The -answer is that two phases are needed to elegantly deal with explicit -self. After all, if there is an impl for the type `Foo`, it can -define a method with the type `Box`, which means that it expects a -receiver of type `Box`. If we have a receiver of type `Box`, but we -waited to search for that impl until we have deref'd the `Box` away and -obtained the type `Foo`, we would never match this method. - -*/ - +//! # Method lookup +//! +//! Method lookup can be rather complex due to the interaction of a number +//! of factors, such as self types, autoderef, trait lookup, etc. The +//! algorithm is divided into two parts: candidate collection and +//! candidate selection. +//! +//! ## Candidate collection +//! +//! A `Candidate` is a method item that might plausibly be the method +//! being invoked. Candidates are grouped into two kinds, inherent and +//! extension. Inherent candidates are those that are derived from the +//! type of the receiver itself. So, if you have a receiver of some +//! nominal type `Foo` (e.g., a struct), any methods defined within an +//! impl like `impl Foo` are inherent methods. Nothing needs to be +//! imported to use an inherent method, they are associated with the type +//! itself (note that inherent impls can only be defined in the same +//! module as the type itself). +//! +//! Inherent candidates are not always derived from impls. If you have a +//! trait instance, such as a value of type `Box`, then the trait +//! methods (`to_str()`, in this case) are inherently associated with it. +//! Another case is type parameters, in which case the methods of their +//! bounds are inherent. +//! +//! Extension candidates are derived from imported traits. If I have the +//! trait `ToStr` imported, and I call `to_str()` on a value of type `T`, +//! then we will go off to find out whether there is an impl of `ToStr` +//! for `T`. These kinds of method calls are called "extension methods". +//! They can be defined in any module, not only the one that defined `T`. +//! Furthermore, you must import the trait to call such a method. +//! +//! For better or worse, we currently give weight to inherent methods over +//! extension methods during candidate selection (below). +//! +//! ## Candidate selection +//! +//! Once we know the set of candidates, we can go off and try to select +//! which one is actually being called. We do this by taking the type of +//! the receiver, let's call it R, and checking whether it matches against +//! the expected receiver type for each of the collected candidates. We +//! first check for inherent candidates and see whether we get exactly one +//! match (zero means keep searching, more than one is an error). If so, +//! we return that as the candidate. Otherwise we search the extension +//! candidates in the same way. +//! +//! If find no matching candidate at all, we proceed to auto-deref the +//! receiver type and search again. We keep doing that until we cannot +//! auto-deref any longer. At each step, we also check for candidates +//! based on "autoptr", which if the current type is `T`, checks for `&mut +//! T`, `&const T`, and `&T` receivers. Finally, at the very end, we will +//! also try autoslice, which converts `~[]` to `&[]` (there is no point +//! at trying autoslice earlier, because no autoderefable type is also +//! sliceable). +//! +//! ## Why two phases? +//! +//! You might wonder why we first collect the candidates and then select. +//! Both the inherent candidate collection and the candidate selection +//! proceed by progressively deref'ing the receiver type, after all. The +//! answer is that two phases are needed to elegantly deal with explicit +//! self. After all, if there is an impl for the type `Foo`, it can +//! define a method with the type `Box`, which means that it expects a +//! receiver of type `Box`. If we have a receiver of type `Box`, but +//! we waited to search for that impl until we have deref'd the `Box` away and +//! obtained the type `Foo`, we would never match this method. use middle::subst; use middle::subst::Subst; @@ -230,35 +225,31 @@ fn get_method_index(tcx: &ty::ctxt, method_count + n_method } +/// This is a bit tricky. We have a match against a trait method +/// being invoked on an object, and we want to generate the +/// self-type. As an example, consider a trait +/// +/// trait Foo { +/// fn r_method<'a>(&'a self); +/// fn u_method(Box); +/// } +/// +/// Now, assuming that `r_method` is being called, we want the +/// result to be `&'a Foo`. Assuming that `u_method` is being +/// called, we want the result to be `Box`. Of course, +/// this transformation has already been done as part of +/// `method_ty.fty.sig.inputs[0]`, but there the type +/// is expressed in terms of `Self` (i.e., `&'a Self`, `Box`). +/// Because objects are not standalone types, we can't just substitute +/// `s/Self/Foo/`, so we must instead perform this kind of hokey +/// match below. fn construct_transformed_self_ty_for_object( tcx: &ty::ctxt, span: Span, trait_def_id: ast::DefId, rcvr_substs: &subst::Substs, method_ty: &ty::Method) - -> ty::t -{ - /*! - * This is a bit tricky. We have a match against a trait method - * being invoked on an object, and we want to generate the - * self-type. As an example, consider a trait - * - * trait Foo { - * fn r_method<'a>(&'a self); - * fn u_method(Box); - * } - * - * Now, assuming that `r_method` is being called, we want the - * result to be `&'a Foo`. Assuming that `u_method` is being - * called, we want the result to be `Box`. Of course, - * this transformation has already been done as part of - * `method_ty.fty.sig.inputs[0]`, but there the type - * is expressed in terms of `Self` (i.e., `&'a Self`, `Box`). - * Because objects are not standalone types, we can't just substitute - * `s/Self/Foo/`, so we must instead perform this kind of hokey - * match below. - */ - + -> ty::t { let mut obj_substs = rcvr_substs.clone(); // The subst we get in has Err as the "Self" type. For an object @@ -320,10 +311,8 @@ struct LookupContext<'a> { report_statics: StaticMethodsFlag, } -/** - * A potential method that might be called, assuming the receiver - * is of a suitable type. - */ +/// A potential method that might be called, assuming the receiver +/// is of a suitable type. #[deriving(Clone)] struct Candidate { rcvr_match_condition: RcvrMatchCondition, @@ -423,16 +412,13 @@ impl<'a> LookupContext<'a> { self.extension_candidates = Vec::new(); } + /// Collect all inherent candidates into + /// `self.inherent_candidates`. See comment at the start of + /// the file. To find the inherent candidates, we repeatedly + /// deref the self-ty to find the "base-type". So, for + /// example, if the receiver is Box> where `C` is a struct type, + /// we'll want to find the inherent impls for `C`. fn push_inherent_candidates(&mut self, self_ty: ty::t) { - /*! - * Collect all inherent candidates into - * `self.inherent_candidates`. See comment at the start of - * the file. To find the inherent candidates, we repeatedly - * deref the self-ty to find the "base-type". So, for - * example, if the receiver is Box> where `C` is a struct type, - * we'll want to find the inherent impls for `C`. - */ - let span = self.self_expr.map_or(self.span, |e| e.span); check::autoderef(self.fcx, span, self_ty, None, PreferMutLvalue, |self_ty, _| { match get(self_ty).sty { @@ -745,27 +731,24 @@ impl<'a> LookupContext<'a> { } } + /// In the event that we are invoking a method with a receiver + /// of a borrowed type like `&T`, `&mut T`, or `&mut [T]`, + /// we will "reborrow" the receiver implicitly. For example, if + /// you have a call `r.inc()` and where `r` has type `&mut T`, + /// then we treat that like `(&mut *r).inc()`. This avoids + /// consuming the original pointer. + /// + /// You might think that this would be a natural byproduct of + /// the auto-deref/auto-ref process. This is true for `Box` + /// but not for an `&mut T` receiver. With `Box`, we would + /// begin by testing for methods with a self type `Box`, + /// then autoderef to `T`, then autoref to `&mut T`. But with + /// an `&mut T` receiver the process begins with `&mut T`, only + /// without any autoadjustments. fn consider_reborrow(&self, self_ty: ty::t, autoderefs: uint) -> (ty::t, ty::AutoDerefRef) { - /*! - * In the event that we are invoking a method with a receiver - * of a borrowed type like `&T`, `&mut T`, or `&mut [T]`, - * we will "reborrow" the receiver implicitly. For example, if - * you have a call `r.inc()` and where `r` has type `&mut T`, - * then we treat that like `(&mut *r).inc()`. This avoids - * consuming the original pointer. - * - * You might think that this would be a natural byproduct of - * the auto-deref/auto-ref process. This is true for `Box` - * but not for an `&mut T` receiver. With `Box`, we would - * begin by testing for methods with a self type `Box`, - * then autoderef to `T`, then autoref to `&mut T`. But with - * an `&mut T` receiver the process begins with `&mut T`, only - * without any autoadjustments. - */ - let tcx = self.tcx(); return match ty::get(self_ty).sty { ty::ty_rptr(_, self_mt) if default_method_hack(self_mt) => { @@ -877,15 +860,12 @@ impl<'a> LookupContext<'a> { } } + /// Searches for a candidate by converting things like + /// `~[]` to `&[]`. fn search_for_autosliced_method(&self, self_ty: ty::t, autoderefs: uint) -> Option { - /*! - * Searches for a candidate by converting things like - * `~[]` to `&[]`. - */ - debug!("search_for_autosliced_method {}", ppaux::ty_to_str(self.tcx(), self_ty)); let sty = ty::get(self_ty).sty.clone(); @@ -913,14 +893,10 @@ impl<'a> LookupContext<'a> { } } + /// Converts any type `T` to `&M T` where `M` is an + /// appropriate mutability. fn search_for_autoptrd_method(&self, self_ty: ty::t, autoderefs: uint) -> Option { - /*! - * - * Converts any type `T` to `&M T` where `M` is an - * appropriate mutability. - */ - let tcx = self.tcx(); match ty::get(self_ty).sty { ty_bare_fn(..) | ty_box(..) | ty_uniq(..) | ty_rptr(..) | @@ -1203,15 +1179,12 @@ impl<'a> LookupContext<'a> { } } + /// There are some limitations to calling functions through an + /// object, because (a) the self type is not known + /// (that's the whole point of a trait instance, after all, to + /// obscure the self type) and (b) the call must go through a + /// vtable and hence cannot be monomorphized. fn enforce_object_limitations(&self, candidate: &Candidate) { - /*! - * There are some limitations to calling functions through an - * object, because (a) the self type is not known - * (that's the whole point of a trait instance, after all, to - * obscure the self type) and (b) the call must go through a - * vtable and hence cannot be monomorphized. - */ - match candidate.origin { MethodStatic(..) | MethodParam(..) => { return; // not a call to a trait instance diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index e4d9bcfad61ce..9aa078d151e21 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -8,74 +8,69 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - -# check.rs - -Within the check phase of type check, we check each item one at a time -(bodies of function expressions are checked as part of the containing -function). Inference is used to supply types wherever they are -unknown. - -By far the most complex case is checking the body of a function. This -can be broken down into several distinct phases: - -- gather: creates type variables to represent the type of each local - variable and pattern binding. - -- main: the main pass does the lion's share of the work: it - determines the types of all expressions, resolves - methods, checks for most invalid conditions, and so forth. In - some cases, where a type is unknown, it may create a type or region - variable and use that as the type of an expression. - - In the process of checking, various constraints will be placed on - these type variables through the subtyping relationships requested - through the `demand` module. The `typeck::infer` module is in charge - of resolving those constraints. - -- regionck: after main is complete, the regionck pass goes over all - types looking for regions and making sure that they did not escape - into places they are not in scope. This may also influence the - final assignments of the various region variables if there is some - flexibility. - -- vtable: find and records the impls to use for each trait bound that - appears on a type parameter. - -- writeback: writes the final types within a function body, replacing - type variables with their final inferred types. These final types - are written into the `tcx.node_types` table, which should *never* contain - any reference to a type variable. - -## Intermediate types - -While type checking a function, the intermediate types for the -expressions, blocks, and so forth contained within the function are -stored in `fcx.node_types` and `fcx.item_substs`. These types -may contain unresolved type variables. After type checking is -complete, the functions in the writeback module are used to take the -types from this table, resolve them, and then write them into their -permanent home in the type context `ccx.tcx`. - -This means that during inferencing you should use `fcx.write_ty()` -and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of -nodes within the function. - -The types of top-level items, which never contain unbound type -variables, are stored directly into the `tcx` tables. - -n.b.: A type variable is not the same thing as a type parameter. A -type variable is rather an "instance" of a type parameter: that is, -given a generic function `fn foo(t: T)`: while checking the -function `foo`, the type `ty_param(0)` refers to the type `T`, which -is treated in abstract. When `foo()` is called, however, `T` will be -substituted for a fresh type variable `N`. This variable will -eventually be resolved to some concrete type (which might itself be -type parameter). - -*/ - +//! # check.rs +//! +//! Within the check phase of type check, we check each item one at a time +//! (bodies of function expressions are checked as part of the containing +//! function). Inference is used to supply types wherever they are +//! unknown. +//! +//! By far the most complex case is checking the body of a function. This +//! can be broken down into several distinct phases: +//! +//! - gather: creates type variables to represent the type of each local +//! variable and pattern binding. +//! +//! - main: the main pass does the lion's share of the work: it +//! determines the types of all expressions, resolves +//! methods, checks for most invalid conditions, and so forth. In +//! some cases, where a type is unknown, it may create a type or region +//! variable and use that as the type of an expression. +//! +//! In the process of checking, various constraints will be placed on +//! these type variables through the subtyping relationships requested +//! through the `demand` module. The `typeck::infer` module is in charge +//! of resolving those constraints. +//! +//! - regionck: after main is complete, the regionck pass goes over all +//! types looking for regions and making sure that they did not escape +//! into places they are not in scope. This may also influence the +//! final assignments of the various region variables if there is some +//! flexibility. +//! +//! - vtable: find and records the impls to use for each trait bound that +//! appears on a type parameter. +//! +//! - writeback: writes the final types within a function body, replacing +//! type variables with their final inferred types. These final types +//! are written into the `tcx.node_types` table, which should *never* contain +//! any reference to a type variable. +//! +//! ## Intermediate types +//! +//! While type checking a function, the intermediate types for the +//! expressions, blocks, and so forth contained within the function are +//! stored in `fcx.node_types` and `fcx.item_substs`. These types +//! may contain unresolved type variables. After type checking is +//! complete, the functions in the writeback module are used to take the +//! types from this table, resolve them, and then write them into their +//! permanent home in the type context `ccx.tcx`. +//! +//! This means that during inferencing you should use `fcx.write_ty()` +//! and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of +//! nodes within the function. +//! +//! The types of top-level items, which never contain unbound type +//! variables, are stored directly into the `tcx` tables. +//! +//! n.b.: A type variable is not the same thing as a type parameter. A +//! type variable is rather an "instance" of a type parameter: that is, +//! given a generic function `fn foo(t: T)`: while checking the +//! function `foo`, the type `ty_param(0)` refers to the type `T`, which +//! is treated in abstract. When `foo()` is called, however, `T` will be +//! substituted for a fresh type variable `N`. This variable will +//! eventually be resolved to some concrete type (which might itself be +//! type parameter). use middle::const_eval; use middle::def; @@ -434,24 +429,20 @@ impl<'a> Visitor<()> for GatherLocalsVisitor<'a> { } +/// Helper used by check_bare_fn and check_expr_fn. Does the +/// grungy work of checking a function body and returns the +/// function context used for that purpose, since in the case of a +/// fn item there is still a bit more to do. +/// +/// - ... +/// - inherited: other fields inherited from the enclosing fn (if any) fn check_fn<'a>(ccx: &'a CrateCtxt<'a>, fn_style: ast::FnStyle, fn_sig: &ty::FnSig, decl: &ast::FnDecl, id: ast::NodeId, body: &ast::Block, - inherited: &'a Inherited<'a>) -> FnCtxt<'a> -{ - /*! - * Helper used by check_bare_fn and check_expr_fn. Does the - * grungy work of checking a function body and returns the - * function context used for that purpose, since in the case of a - * fn item there is still a bit more to do. - * - * - ... - * - inherited: other fields inherited from the enclosing fn (if any) - */ - + inherited: &'a Inherited<'a>) -> FnCtxt<'a> { let tcx = ccx.tcx; let err_count_on_creation = tcx.sess.err_count(); @@ -738,19 +729,16 @@ pub fn check_item(ccx: &CrateCtxt, it: &ast::Item) { } } +/// Type checks a method body. +/// +/// # Parameters +/// - `item_generics`: generics defined on the impl/trait that contains +/// the method +/// - `self_bound`: bound for the `Self` type parameter, if any +/// - `method`: the method definition fn check_method_body(ccx: &CrateCtxt, item_generics: &ty::Generics, method: &ast::Method) { - /*! - * Type checks a method body. - * - * # Parameters - * - `item_generics`: generics defined on the impl/trait that contains - * the method - * - `self_bound`: bound for the `Self` type parameter, if any - * - `method`: the method definition - */ - debug!("check_method_body(item_generics={}, method.id={})", item_generics.repr(ccx.tcx), method.id); @@ -833,19 +821,17 @@ fn check_impl_methods_against_trait(ccx: &CrateCtxt, } } -/** - * Checks that a method from an impl/class conforms to the signature of - * the same method as declared in the trait. - * - * # Parameters - * - * - impl_generics: the generics declared on the impl itself (not the method!) - * - impl_m: type of the method we are checking - * - impl_m_span: span to use for reporting errors - * - impl_m_body_id: id of the method body - * - trait_m: the method in the trait - * - trait_to_impl_substs: the substitutions used on the type of the trait - */ +/// Checks that a method from an impl/class conforms to the signature of +/// the same method as declared in the trait. +/// +/// # Parameters +/// +/// - impl_generics: the generics declared on the impl itself (not the method!) +/// - impl_m: type of the method we are checking +/// - impl_m_span: span to use for reporting errors +/// - impl_m_body_id: id of the method body +/// - trait_m: the method in the trait +/// - trait_to_impl_substs: the substitutions used on the type of the trait fn compare_impl_method(tcx: &ty::ctxt, impl_m: &ty::Method, impl_m_span: Span, @@ -1196,24 +1182,22 @@ fn check_cast(fcx: &FnCtxt, // need to special-case obtaining an unsafe pointer // from a region pointer to a vector. - /* this cast is only allowed from &[T] to *T or - &T to *T. */ + // this cast is only allowed from &[T] to *T or + // &T to *T. match (&ty::get(t_e).sty, &ty::get(t_1).sty) { (&ty::ty_rptr(_, ty::mt { ty: mt1, mutbl: ast::MutImmutable }), &ty::ty_ptr(ty::mt { ty: mt2, mutbl: ast::MutImmutable })) if types_compatible(fcx, e.span, mt1, mt2) => { - /* this case is allowed */ + // this case is allowed } _ => { demand::coerce(fcx, e.span, t_1, &*e); } } } else if !(ty::type_is_scalar(t_e) && t_1_is_trivial) { - /* - If more type combinations should be supported than are - supported here, then file an enhancement issue and - record the issue number in this comment. - */ + // If more type combinations should be supported than are + // supported here, then file an enhancement issue and + // record the issue number in this comment. fcx.type_error_message(span, |actual| { format!("non-scalar cast: `{}` as `{}`", actual, @@ -1482,21 +1466,18 @@ pub enum LvaluePreference { NoPreference } +/// Executes an autoderef loop for the type `t`. At each step, invokes +/// `should_stop` to decide whether to terminate the loop. Returns +/// the final type and number of derefs that it performed. +/// +/// Note: this method does not modify the adjustments table. The caller is +/// responsible for inserting an AutoAdjustment record into the `fcx` +/// using one of the suitable methods. pub fn autoderef(fcx: &FnCtxt, sp: Span, base_ty: ty::t, expr_id: Option, mut lvalue_pref: LvaluePreference, should_stop: |ty::t, uint| -> Option) -> (ty::t, uint, Option) { - /*! - * Executes an autoderef loop for the type `t`. At each step, invokes - * `should_stop` to decide whether to terminate the loop. Returns - * the final type and number of derefs that it performed. - * - * Note: this method does not modify the adjustments table. The caller is - * responsible for inserting an AutoAdjustment record into the `fcx` - * using one of the suitable methods. - */ - let mut t = base_ty; for autoderefs in range(0, fcx.tcx().sess.recursion_limit.get()) { let resolved_t = structurally_resolved_type(fcx, sp, t); @@ -1676,6 +1657,8 @@ fn check_method_argument_types(fcx: &FnCtxt, } } +/// Generic function that factors out common logic from +/// function calls, method calls and overloaded operators. fn check_argument_types(fcx: &FnCtxt, sp: Span, fn_inputs: &[ty::t], @@ -1684,12 +1667,6 @@ fn check_argument_types(fcx: &FnCtxt, deref_args: DerefArgs, variadic: bool, tuple_arguments: TupleArgumentsFlag) { - /*! - * - * Generic function that factors out common logic from - * function calls, method calls and overloaded operators. - */ - let tcx = fcx.ccx.tcx; // Grab the argument types, supplying fresh type variables @@ -4155,31 +4132,27 @@ pub fn instantiate_path(fcx: &FnCtxt, } } + /// Finds the parameters that the user provided and adds them + /// to `substs`. If too many parameters are provided, then + /// reports an error and clears the output vector. + /// + /// We clear the output vector because that will cause the + /// `adjust_XXX_parameters()` later to use inference + /// variables. This seems less likely to lead to derived + /// errors. + /// + /// Note that we *do not* check for *too few* parameters here. + /// Due to the presence of defaults etc that is more + /// complicated. I wanted however to do the reporting of *too + /// many* parameters here because we can easily use the precise + /// span of the N+1'th parameter. fn push_explicit_parameters_from_segment_to_substs( fcx: &FnCtxt, space: subst::ParamSpace, type_defs: &VecPerParamSpace, region_defs: &VecPerParamSpace, segment: &ast::PathSegment, - substs: &mut Substs) - { - /*! - * Finds the parameters that the user provided and adds them - * to `substs`. If too many parameters are provided, then - * reports an error and clears the output vector. - * - * We clear the output vector because that will cause the - * `adjust_XXX_parameters()` later to use inference - * variables. This seems less likely to lead to derived - * errors. - * - * Note that we *do not* check for *too few* parameters here. - * Due to the presence of defaults etc that is more - * complicated. I wanted however to do the reporting of *too - * many* parameters here because we can easily use the precise - * span of the N+1'th parameter. - */ - + substs: &mut Substs) { { let type_count = type_defs.len(space); assert_eq!(substs.types.len(space), 0); diff --git a/src/librustc/middle/typeck/check/regionck.rs b/src/librustc/middle/typeck/check/regionck.rs index 07fb43d0d34df..19d39a580ad4b 100644 --- a/src/librustc/middle/typeck/check/regionck.rs +++ b/src/librustc/middle/typeck/check/regionck.rs @@ -8,115 +8,111 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The region check is a final pass that runs over the AST after we have -inferred the type constraints but before we have actually finalized -the types. Its purpose is to embed a variety of region constraints. -Inserting these constraints as a separate pass is good because (1) it -localizes the code that has to do with region inference and (2) often -we cannot know what constraints are needed until the basic types have -been inferred. - -### Interaction with the borrow checker - -In general, the job of the borrowck module (which runs later) is to -check that all soundness criteria are met, given a particular set of -regions. The job of *this* module is to anticipate the needs of the -borrow checker and infer regions that will satisfy its requirements. -It is generally true that the inference doesn't need to be sound, -meaning that if there is a bug and we inferred bad regions, the borrow -checker should catch it. This is not entirely true though; for -example, the borrow checker doesn't check subtyping, and it doesn't -check that region pointers are always live when they are used. It -might be worthwhile to fix this so that borrowck serves as a kind of -verification step -- that would add confidence in the overall -correctness of the compiler, at the cost of duplicating some type -checks and effort. - -### Inferring the duration of borrows, automatic and otherwise - -Whenever we introduce a borrowed pointer, for example as the result of -a borrow expression `let x = &data`, the lifetime of the pointer `x` -is always specified as a region inference variable. `regionck` has the -job of adding constraints such that this inference variable is as -narrow as possible while still accommodating all uses (that is, every -dereference of the resulting pointer must be within the lifetime). - -#### Reborrows - -Generally speaking, `regionck` does NOT try to ensure that the data -`data` will outlive the pointer `x`. That is the job of borrowck. The -one exception is when "re-borrowing" the contents of another borrowed -pointer. For example, imagine you have a borrowed pointer `b` with -lifetime L1 and you have an expression `&*b`. The result of this -expression will be another borrowed pointer with lifetime L2 (which is -an inference variable). The borrow checker is going to enforce the -constraint that L2 < L1, because otherwise you are re-borrowing data -for a lifetime larger than the original loan. However, without the -routines in this module, the region inferencer would not know of this -dependency and thus it might infer the lifetime of L2 to be greater -than L1 (issue #3148). - -There are a number of troublesome scenarios in the tests -`region-dependent-*.rs`, but here is one example: - - struct Foo { i: int } - struct Bar { foo: Foo } - fn get_i(x: &'a Bar) -> &'a int { - let foo = &x.foo; // Lifetime L1 - &foo.i // Lifetime L2 - } - -Note that this comes up either with `&` expressions, `ref` -bindings, and `autorefs`, which are the three ways to introduce -a borrow. - -The key point here is that when you are borrowing a value that -is "guaranteed" by a borrowed pointer, you must link the -lifetime of that borrowed pointer (L1, here) to the lifetime of -the borrow itself (L2). What do I mean by "guaranteed" by a -borrowed pointer? I mean any data that is reached by first -dereferencing a borrowed pointer and then either traversing -interior offsets or owned pointers. We say that the guarantor -of such data it the region of the borrowed pointer that was -traversed. This is essentially the same as the ownership -relation, except that a borrowed pointer never owns its -contents. - -### Inferring borrow kinds for upvars - -Whenever there is a closure expression, we need to determine how each -upvar is used. We do this by initially assigning each upvar an -immutable "borrow kind" (see `ty::BorrowKind` for details) and then -"escalating" the kind as needed. The borrow kind proceeds according to -the following lattice: - - ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow - -So, for example, if we see an assignment `x = 5` to an upvar `x`, we -will promote its borrow kind to mutable borrow. If we see an `&mut x` -we'll do the same. Naturally, this applies not just to the upvar, but -to everything owned by `x`, so the result is the same for something -like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a -struct). These adjustments are performed in -`adjust_upvar_borrow_kind()` (you can trace backwards through the code -from there). - -The fact that we are inferring borrow kinds as we go results in a -semi-hacky interaction with mem-categorization. In particular, -mem-categorization will query the current borrow kind as it -categorizes, and we'll return the *current* value, but this may get -adjusted later. Therefore, in this module, we generally ignore the -borrow kind (and derived mutabilities) that are returned from -mem-categorization, since they may be inaccurate. (Another option -would be to use a unification scheme, where instead of returning a -concrete borrow kind like `ty::ImmBorrow`, we return a -`ty::InferBorrow(upvar_id)` or something like that, but this would -then mean that all later passes would have to check for these figments -and report an error, and it just seems like more mess in the end.) - -*/ +//! The region check is a final pass that runs over the AST after we have +//! inferred the type constraints but before we have actually finalized +//! the types. Its purpose is to embed a variety of region constraints. +//! Inserting these constraints as a separate pass is good because (1) it +//! localizes the code that has to do with region inference and (2) often +//! we cannot know what constraints are needed until the basic types have +//! been inferred. +//! +//! ### Interaction with the borrow checker +//! +//! In general, the job of the borrowck module (which runs later) is to +//! check that all soundness criteria are met, given a particular set of +//! regions. The job of *this* module is to anticipate the needs of the +//! borrow checker and infer regions that will satisfy its requirements. +//! It is generally true that the inference doesn't need to be sound, +//! meaning that if there is a bug and we inferred bad regions, the borrow +//! checker should catch it. This is not entirely true though; for +//! example, the borrow checker doesn't check subtyping, and it doesn't +//! check that region pointers are always live when they are used. It +//! might be worthwhile to fix this so that borrowck serves as a kind of +//! verification step -- that would add confidence in the overall +//! correctness of the compiler, at the cost of duplicating some type +//! checks and effort. +//! +//! ### Inferring the duration of borrows, automatic and otherwise +//! +//! Whenever we introduce a borrowed pointer, for example as the result of +//! a borrow expression `let x = &data`, the lifetime of the pointer `x` +//! is always specified as a region inference variable. `regionck` has the +//! job of adding constraints such that this inference variable is as +//! narrow as possible while still accommodating all uses (that is, every +//! dereference of the resulting pointer must be within the lifetime). +//! +//! #### Reborrows +//! +//! Generally speaking, `regionck` does NOT try to ensure that the data +//! `data` will outlive the pointer `x`. That is the job of borrowck. The +//! one exception is when "re-borrowing" the contents of another borrowed +//! pointer. For example, imagine you have a borrowed pointer `b` with +//! lifetime L1 and you have an expression `&*b`. The result of this +//! expression will be another borrowed pointer with lifetime L2 (which is +//! an inference variable). The borrow checker is going to enforce the +//! constraint that L2 < L1, because otherwise you are re-borrowing data +//! for a lifetime larger than the original loan. However, without the +//! routines in this module, the region inferencer would not know of this +//! dependency and thus it might infer the lifetime of L2 to be greater +//! than L1 (issue #3148). +//! +//! There are a number of troublesome scenarios in the tests +//! `region-dependent-*.rs`, but here is one example: +//! +//! struct Foo { i: int } +//! struct Bar { foo: Foo } +//! fn get_i(x: &'a Bar) -> &'a int { +//! let foo = &x.foo; // Lifetime L1 +//! &foo.i // Lifetime L2 +//! } +//! +//! Note that this comes up either with `&` expressions, `ref` +//! bindings, and `autorefs`, which are the three ways to introduce +//! a borrow. +//! +//! The key point here is that when you are borrowing a value that +//! is "guaranteed" by a borrowed pointer, you must link the +//! lifetime of that borrowed pointer (L1, here) to the lifetime of +//! the borrow itself (L2). What do I mean by "guaranteed" by a +//! borrowed pointer? I mean any data that is reached by first +//! dereferencing a borrowed pointer and then either traversing +//! interior offsets or owned pointers. We say that the guarantor +//! of such data it the region of the borrowed pointer that was +//! traversed. This is essentially the same as the ownership +//! relation, except that a borrowed pointer never owns its +//! contents. +//! +//! ### Inferring borrow kinds for upvars +//! +//! Whenever there is a closure expression, we need to determine how each +//! upvar is used. We do this by initially assigning each upvar an +//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then +//! "escalating" the kind as needed. The borrow kind proceeds according to +//! the following lattice: +//! +//! ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow +//! +//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we +//! will promote its borrow kind to mutable borrow. If we see an `&mut x` +//! we'll do the same. Naturally, this applies not just to the upvar, but +//! to everything owned by `x`, so the result is the same for something +//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a +//! struct). These adjustments are performed in +//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code +//! from there). +//! +//! The fact that we are inferring borrow kinds as we go results in a +//! semi-hacky interaction with mem-categorization. In particular, +//! mem-categorization will query the current borrow kind as it +//! categorizes, and we'll return the *current* value, but this may get +//! adjusted later. Therefore, in this module, we generally ignore the +//! borrow kind (and derived mutabilities) that are returned from +//! mem-categorization, since they may be inaccurate. (Another option +//! would be to use a unification scheme, where instead of returning a +//! concrete borrow kind like `ty::ImmBorrow`, we return a +//! `ty::InferBorrow(upvar_id)` or something like that, but this would +//! then mean that all later passes would have to check for these figments +//! and report an error, and it just seems like more mess in the end.) use middle::def; use middle::def::{DefArg, DefBinding, DefLocal, DefUpvar}; @@ -163,12 +159,9 @@ pub struct Rcx<'a> { repeating_scope: ast::NodeId, } +/// Returns the validity region of `def` -- that is, how long +/// is `def` valid? fn region_of_def(fcx: &FnCtxt, def: def::Def) -> ty::Region { - /*! - * Returns the validity region of `def` -- that is, how long - * is `def` valid? - */ - let tcx = fcx.tcx(); match def { DefLocal(node_id, _) | DefArg(node_id, _) | @@ -199,35 +192,33 @@ impl<'a> Rcx<'a> { old_scope } + /// Try to resolve the type for the given node, returning + /// t_err if an error results. Note that we never care + /// about the details of the error, the same error will be + /// detected and reported in the writeback phase. + /// + /// Note one important point: we do not attempt to resolve + /// *region variables* here. This is because regionck is + /// essentially adding constraints to those region variables + /// and so may yet influence how they are resolved. + /// + /// Consider this silly example: + /// + /// fn borrow(x: &int) -> &int {x} + /// fn foo(x: @int) -> int { // block: B + /// let b = borrow(x); // region: + /// *b + /// } + /// + /// Here, the region of `b` will be ``. `` is + /// constrainted to be some subregion of the block B and some + /// superregion of the call. If we forced it now, we'd choose + /// the smaller region (the call). But that would make the *b + /// illegal. Since we don't resolve, the type of b will be + /// `&.int` and then `*b` will require that `` be + /// bigger than the let and the `*b` expression, so we will + /// effectively resolve `` to be the block B. pub fn resolve_type(&self, unresolved_ty: ty::t) -> ty::t { - /*! - * Try to resolve the type for the given node, returning - * t_err if an error results. Note that we never care - * about the details of the error, the same error will be - * detected and reported in the writeback phase. - * - * Note one important point: we do not attempt to resolve - * *region variables* here. This is because regionck is - * essentially adding constraints to those region variables - * and so may yet influence how they are resolved. - * - * Consider this silly example: - * - * fn borrow(x: &int) -> &int {x} - * fn foo(x: @int) -> int { // block: B - * let b = borrow(x); // region: - * *b - * } - * - * Here, the region of `b` will be ``. `` is - * constrainted to be some subregion of the block B and some - * superregion of the call. If we forced it now, we'd choose - * the smaller region (the call). But that would make the *b - * illegal. Since we don't resolve, the type of b will be - * `&.int` and then `*b` will require that `` be - * bigger than the let and the `*b` expression, so we will - * effectively resolve `` to be the block B. - */ match resolve_type(self.fcx.infcx(), None, unresolved_ty, resolve_and_force_all_but_regions) { Ok(t) => t, @@ -656,16 +647,13 @@ fn check_expr_fn_block(rcx: &mut Rcx, _ => () } + /// Make sure that all free variables referenced inside the closure + /// outlive the closure itself. Also, create an entry in the + /// upvar_borrows map with a region. fn constrain_free_variables(rcx: &mut Rcx, region: ty::Region, expr: &ast::Expr, freevars: &[freevars::freevar_entry]) { - /*! - * Make sure that all free variables referenced inside the closure - * outlive the closure itself. Also, create an entry in the - * upvar_borrows map with a region. - */ - let tcx = rcx.fcx.ccx.tcx; let infcx = rcx.fcx.infcx(); debug!("constrain_free_variables({}, {})", @@ -864,15 +852,13 @@ fn constrain_call(rcx: &mut Rcx, fn_sig.output); } +/// Invoked on any auto-dereference that occurs. Checks that if +/// this is a region pointer being dereferenced, the lifetime of +/// the pointer includes the deref expr. fn constrain_autoderefs(rcx: &mut Rcx, deref_expr: &ast::Expr, derefs: uint, mut derefd_ty: ty::t) { - /*! - * Invoked on any auto-dereference that occurs. Checks that if - * this is a region pointer being dereferenced, the lifetime of - * the pointer includes the deref expr. - */ let r_deref_expr = ty::ReScope(deref_expr.id); for i in range(0u, derefs) { debug!("constrain_autoderefs(deref_expr=?, derefd_ty={}, derefs={:?}/{:?}", @@ -921,8 +907,8 @@ fn constrain_autoderefs(rcx: &mut Rcx, match ty::deref(derefd_ty, true) { Some(mt) => derefd_ty = mt.ty, - /* if this type can't be dereferenced, then there's already an error - in the session saying so. Just bail out for now */ + // if this type can't be dereferenced, then there's already an error + // in the session saying so. Just bail out for now None => break } } @@ -937,16 +923,12 @@ pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx, } +/// Invoked on any index expression that occurs. Checks that if +/// this is a slice being indexed, the lifetime of the pointer +/// includes the deref expr. fn constrain_index(rcx: &mut Rcx, index_expr: &ast::Expr, - indexed_ty: ty::t) -{ - /*! - * Invoked on any index expression that occurs. Checks that if - * this is a slice being indexed, the lifetime of the pointer - * includes the deref expr. - */ - + indexed_ty: ty::t) { debug!("constrain_index(index_expr=?, indexed_ty={}", rcx.fcx.infcx().ty_to_str(indexed_ty)); @@ -989,25 +971,21 @@ fn constrain_regions_in_type_of_node( constrain_regions_in_type(rcx, minimum_lifetime, origin, ty); } -fn constrain_regions_in_type( - rcx: &mut Rcx, - minimum_lifetime: ty::Region, - origin: infer::SubregionOrigin, - ty: ty::t) { - /*! - * Requires that any regions which appear in `ty` must be - * superregions of `minimum_lifetime`. Also enforces the constraint - * that given a pointer type `&'r T`, T must not contain regions - * that outlive 'r, as well as analogous constraints for other - * lifetime'd types. - * - * This check prevents regions from being used outside of the block in - * which they are valid. Recall that regions represent blocks of - * code or expressions: this requirement basically says "any place - * that uses or may use a region R must be within the block of - * code that R corresponds to." - */ - +/// Requires that any regions which appear in `ty` must be +/// superregions of `minimum_lifetime`. Also enforces the constraint +/// that given a pointer type `&'r T`, T must not contain regions +/// that outlive 'r, as well as analogous constraints for other +/// lifetime'd types. +/// +/// This check prevents regions from being used outside of the block in +/// which they are valid. Recall that regions represent blocks of +/// code or expressions: this requirement basically says "any place +/// that uses or may use a region R must be within the block of +/// code that R corresponds to." +fn constrain_regions_in_type(rcx: &mut Rcx, + minimum_lifetime: ty::Region, + origin: infer::SubregionOrigin, + ty: ty::t) { let tcx = rcx.fcx.ccx.tcx; debug!("constrain_regions_in_type(minimum_lifetime={}, ty={})", @@ -1036,14 +1014,11 @@ fn constrain_regions_in_type( }); } +/// Computes the guarantor for an expression `&base` and then +/// ensures that the lifetime of the resulting pointer is linked +/// to the lifetime of its guarantor (if any). fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr, mutability: ast::Mutability, base: &ast::Expr) { - /*! - * Computes the guarantor for an expression `&base` and then - * ensures that the lifetime of the resulting pointer is linked - * to the lifetime of its guarantor (if any). - */ - debug!("link_addr_of(base=?)"); let cmt = { @@ -1053,13 +1028,10 @@ fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr, link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt); } +/// Computes the guarantors for any ref bindings in a `let` and +/// then ensures that the lifetime of the resulting pointer is +/// linked to the lifetime of the initialization expression. fn link_local(rcx: &Rcx, local: &ast::Local) { - /*! - * Computes the guarantors for any ref bindings in a `let` and - * then ensures that the lifetime of the resulting pointer is - * linked to the lifetime of the initialization expression. - */ - debug!("regionck::for_local()"); let init_expr = match local.init { None => { return; } @@ -1070,13 +1042,10 @@ fn link_local(rcx: &Rcx, local: &ast::Local) { link_pattern(rcx, mc, discr_cmt, &*local.pat); } +/// Computes the guarantors for any ref bindings in a match and +/// then ensures that the lifetime of the resulting pointer is +/// linked to the lifetime of its guarantor (if any). fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) { - /*! - * Computes the guarantors for any ref bindings in a match and - * then ensures that the lifetime of the resulting pointer is - * linked to the lifetime of its guarantor (if any). - */ - debug!("regionck::for_match()"); let mc = mc::MemCategorizationContext::new(rcx); let discr_cmt = ignore_err!(mc.cat_expr(discr)); @@ -1088,15 +1057,12 @@ fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) { } } +/// Link lifetimes of any ref bindings in `root_pat` to +/// the pointers found in the discriminant, if needed. fn link_pattern(rcx: &Rcx, mc: mc::MemCategorizationContext, discr_cmt: mc::cmt, root_pat: &ast::Pat) { - /*! - * Link lifetimes of any ref bindings in `root_pat` to - * the pointers found in the discriminant, if needed. - */ - let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| { match sub_pat.node { // `ref x` pattern @@ -1122,15 +1088,12 @@ fn link_pattern(rcx: &Rcx, }); } +/// Link lifetime of borrowed pointer resulting from autoref +/// to lifetimes in the value being autoref'd. fn link_autoref(rcx: &Rcx, expr: &ast::Expr, autoderefs: uint, autoref: &ty::AutoRef) { - /*! - * Link lifetime of borrowed pointer resulting from autoref - * to lifetimes in the value being autoref'd. - */ - debug!("link_autoref(autoref={:?})", autoref); let mc = mc::MemCategorizationContext::new(rcx); let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs)); @@ -1158,15 +1121,12 @@ fn link_autoref(rcx: &Rcx, } } +/// Computes the guarantor for cases where the `expr` is +/// being passed by implicit reference and must outlive +/// `callee_scope`. fn link_by_ref(rcx: &Rcx, expr: &ast::Expr, callee_scope: ast::NodeId) { - /*! - * Computes the guarantor for cases where the `expr` is - * being passed by implicit reference and must outlive - * `callee_scope`. - */ - let tcx = rcx.tcx(); debug!("link_by_ref(expr={}, callee_scope={})", expr.repr(tcx), callee_scope); @@ -1176,17 +1136,14 @@ fn link_by_ref(rcx: &Rcx, link_region(rcx, expr.span, region_min, ty::ImmBorrow, expr_cmt); } +/// Like `link_region()`, except that the region is +/// extracted from the type of `id`, which must be some +/// reference (`&T`, `&str`, etc). fn link_region_from_node_type(rcx: &Rcx, span: Span, id: ast::NodeId, mutbl: ast::Mutability, cmt_borrowed: mc::cmt) { - /*! - * Like `link_region()`, except that the region is - * extracted from the type of `id`, which must be some - * reference (`&T`, `&str`, etc). - */ - let rptr_ty = rcx.resolve_node_type(id); if !ty::type_is_bot(rptr_ty) && !ty::type_is_error(rptr_ty) { let tcx = rcx.fcx.ccx.tcx; @@ -1197,19 +1154,16 @@ fn link_region_from_node_type(rcx: &Rcx, } } +/// Informs the inference engine that a borrow of `cmt` +/// must have the borrow kind `kind` and lifetime `region_min`. +/// If `cmt` is a deref of a region pointer with +/// lifetime `r_borrowed`, this will add the constraint that +/// `region_min <= r_borrowed`. fn link_region(rcx: &Rcx, span: Span, region_min: ty::Region, kind: ty::BorrowKind, cmt_borrowed: mc::cmt) { - /*! - * Informs the inference engine that a borrow of `cmt` - * must have the borrow kind `kind` and lifetime `region_min`. - * If `cmt` is a deref of a region pointer with - * lifetime `r_borrowed`, this will add the constraint that - * `region_min <= r_borrowed`. - */ - // Iterate through all the things that must be live at least // for the lifetime `region_min` for the borrow to be valid: let mut cmt_borrowed = cmt_borrowed; @@ -1309,14 +1263,11 @@ fn link_region(rcx: &Rcx, } } +/// Adjusts the inferred borrow_kind as needed to account +/// for upvars that are assigned to in an assignment +/// expression. fn adjust_borrow_kind_for_assignment_lhs(rcx: &Rcx, lhs: &ast::Expr) { - /*! - * Adjusts the inferred borrow_kind as needed to account - * for upvars that are assigned to in an assignment - * expression. - */ - let mc = mc::MemCategorizationContext::new(rcx); let cmt = ignore_err!(mc.cat_expr(lhs)); adjust_upvar_borrow_kind_for_mut(rcx, cmt); @@ -1428,16 +1379,13 @@ fn adjust_upvar_borrow_kind_for_unique(rcx: &Rcx, cmt: mc::cmt) { } } +/// Indicates that the borrow_kind of `outer_upvar_id` must +/// permit a reborrowing with the borrow_kind of `inner_upvar_id`. +/// This occurs in nested closures, see comment above at the call to +/// this function. fn link_upvar_borrow_kind_for_nested_closures(rcx: &mut Rcx, inner_upvar_id: ty::UpvarId, outer_upvar_id: ty::UpvarId) { - /*! - * Indicates that the borrow_kind of `outer_upvar_id` must - * permit a reborrowing with the borrow_kind of `inner_upvar_id`. - * This occurs in nested closures, see comment above at the call to - * this function. - */ - debug!("link_upvar_borrow_kind: inner_upvar_id={:?} outer_upvar_id={:?}", inner_upvar_id, outer_upvar_id); @@ -1460,17 +1408,14 @@ fn adjust_upvar_borrow_kind_for_loan(upvar_id: ty::UpvarId, adjust_upvar_borrow_kind(upvar_id, upvar_borrow, kind) } +/// We infer the borrow_kind with which to borrow upvars in a stack +/// closure. The borrow_kind basically follows a lattice of +/// `imm < unique-imm < mut`, moving from left to right as needed (but never +/// right to left). Here the argument `mutbl` is the borrow_kind that +/// is required by some particular use. fn adjust_upvar_borrow_kind(upvar_id: ty::UpvarId, upvar_borrow: &mut ty::UpvarBorrow, kind: ty::BorrowKind) { - /*! - * We infer the borrow_kind with which to borrow upvars in a stack - * closure. The borrow_kind basically follows a lattice of - * `imm < unique-imm < mut`, moving from left to right as needed (but never - * right to left). Here the argument `mutbl` is the borrow_kind that - * is required by some particular use. - */ - debug!("adjust_upvar_borrow_kind: id={:?} kind=({:?} -> {:?})", upvar_id, upvar_borrow.kind, kind); diff --git a/src/librustc/middle/typeck/check/regionmanip.rs b/src/librustc/middle/typeck/check/regionmanip.rs index 146b42a00ffaf..338e7a185e0ad 100644 --- a/src/librustc/middle/typeck/check/regionmanip.rs +++ b/src/librustc/middle/typeck/check/regionmanip.rs @@ -44,36 +44,33 @@ pub fn replace_late_bound_regions_in_fn_sig( (map, fn_sig) } +/// This rather specialized function walks each region `r` that appear +/// in `ty` and invokes `relate_op(r_encl, r)` for each one. `r_encl` +/// here is the region of any enclosing `&'r T` pointer. If there is +/// no enclosing pointer, and `opt_region` is Some, then `opt_region.get()` +/// is used instead. Otherwise, no callback occurs at all). +/// +/// Here are some examples to give you an intution: +/// +/// - `relate_nested_regions(Some('r1), &'r2 uint)` invokes +/// - `relate_op('r1, 'r2)` +/// - `relate_nested_regions(Some('r1), &'r2 &'r3 uint)` invokes +/// - `relate_op('r1, 'r2)` +/// - `relate_op('r2, 'r3)` +/// - `relate_nested_regions(None, &'r2 &'r3 uint)` invokes +/// - `relate_op('r2, 'r3)` +/// - `relate_nested_regions(None, &'r2 &'r3 &'r4 uint)` invokes +/// - `relate_op('r2, 'r3)` +/// - `relate_op('r2, 'r4)` +/// - `relate_op('r3, 'r4)` +/// +/// This function is used in various pieces of code because we enforce the +/// constraint that a region pointer cannot outlive the things it points at. +/// Hence, in the second example above, `'r2` must be a subregion of `'r3`. pub fn relate_nested_regions(tcx: &ty::ctxt, opt_region: Option, ty: ty::t, relate_op: |ty::Region, ty::Region|) { - /*! - * This rather specialized function walks each region `r` that appear - * in `ty` and invokes `relate_op(r_encl, r)` for each one. `r_encl` - * here is the region of any enclosing `&'r T` pointer. If there is - * no enclosing pointer, and `opt_region` is Some, then `opt_region.get()` - * is used instead. Otherwise, no callback occurs at all). - * - * Here are some examples to give you an intution: - * - * - `relate_nested_regions(Some('r1), &'r2 uint)` invokes - * - `relate_op('r1, 'r2)` - * - `relate_nested_regions(Some('r1), &'r2 &'r3 uint)` invokes - * - `relate_op('r1, 'r2)` - * - `relate_op('r2, 'r3)` - * - `relate_nested_regions(None, &'r2 &'r3 uint)` invokes - * - `relate_op('r2, 'r3)` - * - `relate_nested_regions(None, &'r2 &'r3 &'r4 uint)` invokes - * - `relate_op('r2, 'r3)` - * - `relate_op('r2, 'r4)` - * - `relate_op('r3, 'r4)` - * - * This function is used in various pieces of code because we enforce the - * constraint that a region pointer cannot outlive the things it points at. - * Hence, in the second example above, `'r2` must be a subregion of `'r3`. - */ - let mut rr = RegionRelator { tcx: tcx, stack: Vec::new(), relate_op: relate_op }; @@ -132,19 +129,16 @@ pub fn relate_nested_regions(tcx: &ty::ctxt, } } +/// This function populates the region map's `free_region_map`. +/// It walks over the transformed self type and argument types +/// for each function just before we check the body of that +/// function, looking for types where you have a borrowed +/// pointer to other borrowed data (e.g., `&'a &'b [uint]`. +/// We do not allow references to outlive the things they +/// point at, so we can assume that `'a <= 'b`. +/// +/// Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs` pub fn relate_free_regions(tcx: &ty::ctxt, fn_sig: &ty::FnSig) { - /*! - * This function populates the region map's `free_region_map`. - * It walks over the transformed self type and argument types - * for each function just before we check the body of that - * function, looking for types where you have a borrowed - * pointer to other borrowed data (e.g., `&'a &'b [uint]`. - * We do not allow references to outlive the things they - * point at, so we can assume that `'a <= 'b`. - * - * Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs` - */ - debug!("relate_free_regions >>"); let mut all_tys = Vec::new(); diff --git a/src/librustc/middle/typeck/check/vtable.rs b/src/librustc/middle/typeck/check/vtable.rs index bda47d99ed714..c601b4dd32aac 100644 --- a/src/librustc/middle/typeck/check/vtable.rs +++ b/src/librustc/middle/typeck/check/vtable.rs @@ -172,17 +172,13 @@ fn lookup_vtables_for_param(vcx: &VtableContext, param_result } +/// Checks that an implementation of `act_trait_ref` is suitable +/// for use where `exp_trait_ref` is required and reports an +/// error otherwise. fn relate_trait_refs(vcx: &VtableContext, span: Span, act_trait_ref: Rc, exp_trait_ref: Rc) { - /*! - * - * Checks that an implementation of `act_trait_ref` is suitable - * for use where `exp_trait_ref` is required and reports an - * error otherwise. - */ - match infer::mk_sub_trait_refs(vcx.infcx, false, infer::RelateTraitRefs(span), @@ -758,34 +754,31 @@ pub fn early_resolve_expr(ex: &ast::Expr, fcx: &FnCtxt, is_early: bool) { } } +/// The situation is as follows. We have some trait like: +/// +/// trait Foo : Bar { +/// fn method() { ... } +/// } +/// +/// and an impl like: +/// +/// impl Foo for int { ... } +/// +/// We want to validate that the various requirements of the trait +/// are met: +/// +/// A:Clone, Self:Bar +/// +/// But of course after substituting the types from the impl: +/// +/// B:Clone, int:Bar +/// +/// We store these results away as the "impl_res" for use by the +/// default methods. pub fn resolve_impl(tcx: &ty::ctxt, impl_item: &ast::Item, impl_generics: &ty::Generics, impl_trait_ref: &ty::TraitRef) { - /*! - * The situation is as follows. We have some trait like: - * - * trait Foo : Bar { - * fn method() { ... } - * } - * - * and an impl like: - * - * impl Foo for int { ... } - * - * We want to validate that the various requirements of the trait - * are met: - * - * A:Clone, Self:Bar - * - * But of course after substituting the types from the impl: - * - * B:Clone, int:Bar - * - * We store these results away as the "impl_res" for use by the - * default methods. - */ - debug!("resolve_impl(impl_item.id={})", impl_item.id); diff --git a/src/librustc/middle/typeck/coherence.rs b/src/librustc/middle/typeck/coherence.rs index b9bf8e37dead8..9789e0c2e1f51 100644 --- a/src/librustc/middle/typeck/coherence.rs +++ b/src/librustc/middle/typeck/coherence.rs @@ -98,15 +98,11 @@ fn get_base_type(inference_context: &InferCtxt, } } +/// For coherence, when we have `impl Trait for Type`, we need to +/// guarantee that `Type` is "local" to the +/// crate. For our purposes, this means that it must contain +/// some nominal type defined in this crate. fn type_is_defined_in_local_crate(tcx: &ty::ctxt, original_type: t) -> bool { - /*! - * - * For coherence, when we have `impl Trait for Type`, we need to - * guarantee that `Type` is "local" to the - * crate. For our purposes, this means that it must contain - * some nominal type defined in this crate. - */ - let mut found_nominal = false; ty::walk_ty(original_type, |t| { match get(t).sty { @@ -723,17 +719,13 @@ impl<'a> CoherenceChecker<'a> { } } +/// Substitutes the values for the receiver's type parameters +/// that are found in method, leaving the method's type parameters +/// intact. pub fn make_substs_for_receiver_types(tcx: &ty::ctxt, trait_ref: &ty::TraitRef, method: &ty::Method) - -> subst::Substs -{ - /*! - * Substitutes the values for the receiver's type parameters - * that are found in method, leaving the method's type parameters - * intact. - */ - + -> subst::Substs { let meth_tps: Vec = method.generics.types.get_slice(subst::FnSpace) .iter() diff --git a/src/librustc/middle/typeck/collect.rs b/src/librustc/middle/typeck/collect.rs index bf88ec5c438f2..fe1cd8a71b2d7 100644 --- a/src/librustc/middle/typeck/collect.rs +++ b/src/librustc/middle/typeck/collect.rs @@ -8,28 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - -# Collect phase - -The collect phase of type check has the job of visiting all items, -determining their type, and writing that type into the `tcx.tcache` -table. Despite its name, this table does not really operate as a -*cache*, at least not for the types of items defined within the -current crate: we assume that after the collect phase, the types of -all local items will be present in the table. - -Unlike most of the types that are present in Rust, the types computed -for each item are in fact polytypes. In "layman's terms", this means -that they are generic types that may have type parameters (more -mathematically phrased, they are universally quantified over a set of -type parameters). Polytypes are represented by an instance of -`ty::Polytype`. This combines the core type along with a list of the -bounds for each parameter. Type parameters themselves are represented -as `ty_param()` instances. - -*/ - +//! # Collect phase +//! +//! The collect phase of type check has the job of visiting all items, +//! determining their type, and writing that type into the `tcx.tcache` +//! table. Despite its name, this table does not really operate as a +//! *cache*, at least not for the types of items defined within the +//! current crate: we assume that after the collect phase, the types of +//! all local items will be present in the table. +//! +//! Unlike most of the types that are present in Rust, the types computed +//! for each item are in fact polytypes. In "layman's terms", this means +//! that they are generic types that may have type parameters (more +//! mathematically phrased, they are universally quantified over a set of +//! type parameters). Polytypes are represented by an instance of +//! `ty::Polytype`. This combines the core type along with a list of the +//! bounds for each parameter. Type parameters themselves are represented +//! as `ty_param()` instances. use metadata::csearch; use middle::def; @@ -293,7 +288,7 @@ pub fn convert_field(ccx: &CrateCtxt, origin: ast::DefId) -> ty::field_ty { let tt = ccx.to_ty(&ExplicitRscope, &*v.node.ty); write_ty_to_tcx(ccx.tcx, v.node.id, tt); - /* add the field to the tcache */ + // add the field to the tcache ccx.tcx.tcache.borrow_mut().insert(local_def(v.node.id), ty::Polytype { generics: struct_generics.clone(), @@ -639,15 +634,12 @@ pub fn convert_foreign(ccx: &CrateCtxt, i: &ast::ForeignItem) { ccx.tcx.tcache.borrow_mut().insert(local_def(i.id), pty); } +/// Instantiates the path for the given trait reference, assuming that +/// it's bound to a valid trait type. Returns the def_id for the defining +/// trait. Fails if the type is a type other than a trait type. pub fn instantiate_trait_ref(ccx: &CrateCtxt, ast_trait_ref: &ast::TraitRef, self_ty: ty::t) -> Rc { - /*! - * Instantiates the path for the given trait reference, assuming that - * it's bound to a valid trait type. Returns the def_id for the defining - * trait. Fails if the type is a type other than a trait type. - */ - // FIXME(#5121) -- distinguish early vs late lifetime params let rscope = ExplicitRscope; @@ -1052,20 +1044,17 @@ fn ty_generics(ccx: &CrateCtxt, def } + /// Translate the AST's notion of ty param bounds (which are an + /// enum consisting of a newtyped Ty or a region) to ty's + /// notion of ty param bounds, which can either be user-defined + /// traits, or the built-in trait (formerly known as kind): Send. fn compute_bounds( ccx: &CrateCtxt, param_ty: ty::ParamTy, ast_bounds: &OwnedSlice, sized: ast::Sized, ident: ast::Ident, - span: Span) -> ty::ParamBounds - { - /*! - * Translate the AST's notion of ty param bounds (which are an - * enum consisting of a newtyped Ty or a region) to ty's - * notion of ty param bounds, which can either be user-defined - * traits, or the built-in trait (formerly known as kind): Send. - */ + span: Span) -> ty::ParamBounds { let mut param_bounds = ty::ParamBounds { builtin_bounds: ty::empty_builtin_bounds(), diff --git a/src/librustc/middle/typeck/infer/coercion.rs b/src/librustc/middle/typeck/infer/coercion.rs index f8efb3c38c200..35dbc73b1683d 100644 --- a/src/librustc/middle/typeck/infer/coercion.rs +++ b/src/librustc/middle/typeck/infer/coercion.rs @@ -8,61 +8,57 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -# Type Coercion - -Under certain circumstances we will coerce from one type to another, -for example by auto-borrowing. This occurs in situations where the -compiler has a firm 'expected type' that was supplied from the user, -and where the actual type is similar to that expected type in purpose -but not in representation (so actual subtyping is inappropriate). - -## Reborrowing - -Note that if we are expecting a reference, we will *reborrow* -even if the argument provided was already a reference. This is -useful for freezing mut/const things (that is, when the expected is &T -but you have &const T or &mut T) and also for avoiding the linearity -of mut things (when the expected is &mut T and you have &mut T). See -the various `src/test/run-pass/coerce-reborrow-*.rs` tests for -examples of where this is useful. - -## Subtle note - -When deciding what type coercions to consider, we do not attempt to -resolve any type variables we may encounter. This is because `b` -represents the expected type "as the user wrote it", meaning that if -the user defined a generic function like - - fn foo(a: A, b: A) { ... } - -and then we wrote `foo(&1, @2)`, we will not auto-borrow -either argument. In older code we went to some lengths to -resolve the `b` variable, which could mean that we'd -auto-borrow later arguments but not earlier ones, which -seems very confusing. - -## Subtler note - -However, right now, if the user manually specifies the -values for the type variables, as so: - - foo::<&int>(@1, @2) - -then we *will* auto-borrow, because we can't distinguish this from a -function that declared `&int`. This is inconsistent but it's easiest -at the moment. The right thing to do, I think, is to consider the -*unsubstituted* type when deciding whether to auto-borrow, but the -*substituted* type when considering the bounds and so forth. But most -of our methods don't give access to the unsubstituted type, and -rightly so because they'd be error-prone. So maybe the thing to do is -to actually determine the kind of coercions that should occur -separately and pass them in. Or maybe it's ok as is. Anyway, it's -sort of a minor point so I've opted to leave it for later---after all -we may want to adjust precisely when coercions occur. - -*/ +//! # Type Coercion +//! +//! Under certain circumstances we will coerce from one type to another, +//! for example by auto-borrowing. This occurs in situations where the +//! compiler has a firm 'expected type' that was supplied from the user, +//! and where the actual type is similar to that expected type in purpose +//! but not in representation (so actual subtyping is inappropriate). +//! +//! ## Reborrowing +//! +//! Note that if we are expecting a reference, we will *reborrow* +//! even if the argument provided was already a reference. This is +//! useful for freezing mut/const things (that is, when the expected is &T +//! but you have &const T or &mut T) and also for avoiding the linearity +//! of mut things (when the expected is &mut T and you have &mut T). See +//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for +//! examples of where this is useful. +//! +//! ## Subtle note +//! +//! When deciding what type coercions to consider, we do not attempt to +//! resolve any type variables we may encounter. This is because `b` +//! represents the expected type "as the user wrote it", meaning that if +//! the user defined a generic function like +//! +//! fn foo(a: A, b: A) { ... } +//! +//! and then we wrote `foo(&1, @2)`, we will not auto-borrow +//! either argument. In older code we went to some lengths to +//! resolve the `b` variable, which could mean that we'd +//! auto-borrow later arguments but not earlier ones, which +//! seems very confusing. +//! +//! ## Subtler note +//! +//! However, right now, if the user manually specifies the +//! values for the type variables, as so: +//! +//! foo::<&int>(@1, @2) +//! +//! then we *will* auto-borrow, because we can't distinguish this from a +//! function that declared `&int`. This is inconsistent but it's easiest +//! at the moment. The right thing to do, I think, is to consider the +//! *unsubstituted* type when deciding whether to auto-borrow, but the +//! *substituted* type when considering the bounds and so forth. But most +//! of our methods don't give access to the unsubstituted type, and +//! rightly so because they'd be error-prone. So maybe the thing to do is +//! to actually determine the kind of coercions that should occur +//! separately and pass them in. Or maybe it's ok as is. Anyway, it's +//! sort of a minor point so I've opted to leave it for later---after all +//! we may want to adjust precisely when coercions occur. use middle::subst; use middle::ty::{AutoPtr, AutoBorrowVec, AutoBorrowObj, AutoDerefRef}; @@ -395,14 +391,10 @@ impl<'f> Coerce<'f> { } } + /// Attempts to coerce from a bare Rust function (`extern + /// "Rust" fn`) into a closure or a `proc`. fn coerce_from_bare_fn(&self, a: ty::t, fn_ty_a: &ty::BareFnTy, b: ty::t) -> CoerceResult { - /*! - * - * Attempts to coerce from a bare Rust function (`extern - * "Rust" fn`) into a closure or a `proc`. - */ - self.unpack_actual_value(b, |sty_b| { debug!("coerce_from_bare_fn(a={}, b={})", diff --git a/src/librustc/middle/typeck/infer/error_reporting.rs b/src/librustc/middle/typeck/infer/error_reporting.rs index 053a75e72602a..b74a60d0d45b7 100644 --- a/src/librustc/middle/typeck/infer/error_reporting.rs +++ b/src/librustc/middle/typeck/infer/error_reporting.rs @@ -8,56 +8,52 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Error Reporting Code for the inference engine - -Because of the way inference, and in particular region inference, -works, it often happens that errors are not detected until far after -the relevant line of code has been type-checked. Therefore, there is -an elaborate system to track why a particular constraint in the -inference graph arose so that we can explain to the user what gave -rise to a particular error. - -The basis of the system are the "origin" types. An "origin" is the -reason that a constraint or inference variable arose. There are -different "origin" enums for different kinds of constraints/variables -(e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has -a span, but also more information so that we can generate a meaningful -error message. - -Having a catalogue of all the different reasons an error can arise is -also useful for other reasons, like cross-referencing FAQs etc, though -we are not really taking advantage of this yet. - -# Region Inference - -Region inference is particularly tricky because it always succeeds "in -the moment" and simply registers a constraint. Then, at the end, we -can compute the full graph and report errors, so we need to be able to -store and later report what gave rise to the conflicting constraints. - -# Subtype Trace - -Determing whether `T1 <: T2` often involves a number of subtypes and -subconstraints along the way. A "TypeTrace" is an extended version -of an origin that traces the types and other values that were being -compared. It is not necessarily comprehensive (in fact, at the time of -this writing it only tracks the root values being compared) but I'd -like to extend it to include significant "waypoints". For example, if -you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2 -<: T4` fails, I'd like the trace to include enough information to say -"in the 2nd element of the tuple". Similarly, failures when comparing -arguments or return types in fn types should be able to cite the -specific position, etc. - -# Reality vs plan - -Of course, there is still a LOT of code in typeck that has yet to be -ported to this system, and which relies on string concatenation at the -time of error detection. - -*/ +//! Error Reporting Code for the inference engine +//! +//! Because of the way inference, and in particular region inference, +//! works, it often happens that errors are not detected until far after +//! the relevant line of code has been type-checked. Therefore, there is +//! an elaborate system to track why a particular constraint in the +//! inference graph arose so that we can explain to the user what gave +//! rise to a particular error. +//! +//! The basis of the system are the "origin" types. An "origin" is the +//! reason that a constraint or inference variable arose. There are +//! different "origin" enums for different kinds of constraints/variables +//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has +//! a span, but also more information so that we can generate a meaningful +//! error message. +//! +//! Having a catalogue of all the different reasons an error can arise is +//! also useful for other reasons, like cross-referencing FAQs etc, though +//! we are not really taking advantage of this yet. +//! +//! # Region Inference +//! +//! Region inference is particularly tricky because it always succeeds "in +//! the moment" and simply registers a constraint. Then, at the end, we +//! can compute the full graph and report errors, so we need to be able to +//! store and later report what gave rise to the conflicting constraints. +//! +//! # Subtype Trace +//! +//! Determing whether `T1 <: T2` often involves a number of subtypes and +//! subconstraints along the way. A "TypeTrace" is an extended version +//! of an origin that traces the types and other values that were being +//! compared. It is not necessarily comprehensive (in fact, at the time of +//! this writing it only tracks the root values being compared) but I'd +//! like to extend it to include significant "waypoints". For example, if +//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2 +//! <: T4` fails, I'd like the trace to include enough information to say +//! "in the 2nd element of the tuple". Similarly, failures when comparing +//! arguments or return types in fn types should be able to cite the +//! specific position, etc. +//! +//! # Reality vs plan +//! +//! Of course, there is still a LOT of code in typeck that has yet to be +//! ported to this system, and which relies on string concatenation at the +//! time of error detection. use std::collections::HashSet; use std::gc::GC; @@ -339,7 +335,7 @@ impl<'a> ErrorReporting for InferCtxt<'a> { let expected_found_str = match self.values_str(&trace.values) { Some(v) => v, None => { - return; /* derived error */ + return; // derived error } }; @@ -374,11 +370,9 @@ impl<'a> ErrorReporting for InferCtxt<'a> { ty::note_and_explain_type_err(self.tcx, terr); } + /// Returns a string of the form "expected `{}` but found `{}`", + /// or None if this is a derived error. fn values_str(&self, values: &ValuePairs) -> Option { - /*! - * Returns a string of the form "expected `{}` but found `{}`", - * or None if this is a derived error. - */ match *values { infer::Types(ref exp_found) => { self.expected_found_str(exp_found) diff --git a/src/librustc/middle/typeck/infer/lattice.rs b/src/librustc/middle/typeck/infer/lattice.rs index 1b3d96e474e4b..8bcebc2c98276 100644 --- a/src/librustc/middle/typeck/infer/lattice.rs +++ b/src/librustc/middle/typeck/infer/lattice.rs @@ -8,29 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * - * # Lattice Variables - * - * This file contains generic code for operating on inference variables - * that are characterized by an upper- and lower-bound. The logic and - * reasoning is explained in detail in the large comment in `infer.rs`. - * - * The code in here is defined quite generically so that it can be - * applied both to type variables, which represent types being inferred, - * and fn variables, which represent function types being inferred. - * It may eventually be applied to their types as well, who knows. - * In some cases, the functions are also generic with respect to the - * operation on the lattice (GLB vs LUB). - * - * Although all the functions are generic, we generally write the - * comments in a way that is specific to type variables and the LUB - * operation. It's just easier that way. - * - * In general all of the functions are defined parametrically - * over a `LatticeValue`, which is a value defined with respect to - * a lattice. - */ +//! # Lattice Variables +//! +//! This file contains generic code for operating on inference variables +//! that are characterized by an upper- and lower-bound. The logic and +//! reasoning is explained in detail in the large comment in `infer.rs`. +//! +//! The code in here is defined quite generically so that it can be +//! applied both to type variables, which represent types being inferred, +//! and fn variables, which represent function types being inferred. +//! It may eventually be applied to their types as well, who knows. +//! In some cases, the functions are also generic with respect to the +//! operation on the lattice (GLB vs LUB). +//! +//! Although all the functions are generic, we generally write the +//! comments in a way that is specific to type variables and the LUB +//! operation. It's just easier that way. +//! +//! In general all of the functions are defined parametrically +//! over a `LatticeValue`, which is a value defined with respect to +//! a lattice. use middle::ty::{RegionVid, TyVar}; use middle::ty; @@ -108,17 +105,13 @@ pub trait CombineFieldsLatticeMethods2 { impl<'f,T:LatticeValue, K:UnifyKey>> CombineFieldsLatticeMethods for CombineFields<'f> { + /// Make one variable a subtype of another variable. This is a + /// subtle and tricky process, as described in detail at the + /// top of infer.rs. fn var_sub_var(&self, a_id: K, b_id: K) - -> ures - { - /*! - * Make one variable a subtype of another variable. This is a - * subtle and tricky process, as described in detail at the - * top of infer.rs. - */ - + -> ures { let tcx = self.infcx.tcx; let table = UnifyKey::unification_table(self.infcx); @@ -146,10 +139,10 @@ impl<'f,T:LatticeValue, K:UnifyKey>> Ok(()) => { return Ok(()); } - Err(_) => { /*fallthrough */ } + Err(_) => { /* fallthrough */ } } } - _ => { /*fallthrough*/ } + _ => { /* fallthrough */ } } // Otherwise, we need to merge A and B so as to guarantee that @@ -163,16 +156,11 @@ impl<'f,T:LatticeValue, K:UnifyKey>> new_rank) } - /// make variable a subtype of T + /// Make a variable (`a_id`) a subtype of the concrete type `b`. fn var_sub_t(&self, a_id: K, b: T) - -> ures - { - /*! - * Make a variable (`a_id`) a subtype of the concrete type `b`. - */ - + -> ures { let tcx = self.infcx.tcx; let table = UnifyKey::unification_table(self.infcx); let node_a = table.borrow_mut().get(tcx, a_id); @@ -189,15 +177,11 @@ impl<'f,T:LatticeValue, K:UnifyKey>> a_id, a_bounds, b_bounds, node_a.rank) } + /// Make a concrete type (`a`) a subtype of the variable `b_id` fn t_sub_var(&self, a: T, b_id: K) - -> ures - { - /*! - * Make a concrete type (`a`) a subtype of the variable `b_id` - */ - + -> ures { let tcx = self.infcx.tcx; let table = UnifyKey::unification_table(self.infcx); let a_bounds = &Bounds { lb: Some(a.clone()), ub: None }; @@ -214,24 +198,20 @@ impl<'f,T:LatticeValue, K:UnifyKey>> b_id, a_bounds, b_bounds, node_b.rank) } + /// Updates the bounds for the variable `v_id` to be the intersection + /// of `a` and `b`. That is, the new bounds for `v_id` will be + /// a bounds c such that: + /// c.ub <: a.ub + /// c.ub <: b.ub + /// a.lb <: c.lb + /// b.lb <: c.lb + /// If this cannot be achieved, the result is failure. fn set_var_to_merged_bounds(&self, v_id: K, a: &Bounds, b: &Bounds, rank: uint) - -> ures - { - /*! - * Updates the bounds for the variable `v_id` to be the intersection - * of `a` and `b`. That is, the new bounds for `v_id` will be - * a bounds c such that: - * c.ub <: a.ub - * c.ub <: b.ub - * a.lb <: c.lb - * b.lb <: c.lb - * If this cannot be achieved, the result is failure. - */ - + -> ures { // Think of the two diamonds, we want to find the // intersection. There are basically four possibilities (you // can swap A/B in these pictures): @@ -281,16 +261,12 @@ impl<'f,T:LatticeValue, K:UnifyKey>> impl<'f,T:LatticeValue> CombineFieldsLatticeMethods2 for CombineFields<'f> { + /// Combines two bounds into a more general bound. fn merge_bnd(&self, a: &Bound, b: &Bound, lattice_op: LatticeOp) - -> cres> - { - /*! - * Combines two bounds into a more general bound. - */ - + -> cres> { debug!("merge_bnd({},{})", a.repr(self.infcx.tcx), b.repr(self.infcx.tcx)); @@ -422,22 +398,20 @@ pub enum LatticeVarResult { ValueResult(T) } -/** - * Computes the LUB or GLB of two bounded variables. These could be any - * sort of variables, but in the comments on this function I'll assume - * we are doing an LUB on two type variables. - * - * This computation can be done in one of two ways: - * - * - If both variables have an upper bound, we may just compute the - * LUB of those bounds and return that, in which case we are - * returning a type. This is indicated with a `ValueResult` return. - * - * - If the variables do not both have an upper bound, we will unify - * the variables and return the unified variable, in which case the - * result is a variable. This is indicated with a `VarResult` - * return. - */ +/// Computes the LUB or GLB of two bounded variables. These could be any +/// sort of variables, but in the comments on this function I'll assume +/// we are doing an LUB on two type variables. +/// +/// This computation can be done in one of two ways: +/// +/// - If both variables have an upper bound, we may just compute the +/// LUB of those bounds and return that, in which case we are +/// returning a type. This is indicated with a `ValueResult` return. +/// +/// - If the variables do not both have an upper bound, we will unify +/// the variables and return the unified variable, in which case the +/// result is a variable. This is indicated with a `VarResult` +/// return. pub fn lattice_vars>>( @@ -474,10 +448,10 @@ pub fn lattice_vars { match this.infcx().try(|| lattice_dir_op(a_ty, b_ty) ) { Ok(t) => return Ok(ValueResult(t)), - Err(_) => { /*fallthrough */ } + Err(_) => { /* fallthrough */ } } } - _ => {/*fallthrough*/} + _ => { /* fallthrough */ } } // Otherwise, we need to merge A and B into one variable. We can diff --git a/src/librustc/middle/typeck/infer/mod.rs b/src/librustc/middle/typeck/infer/mod.rs index b505536a59db2..66fd984b23161 100644 --- a/src/librustc/middle/typeck/infer/mod.rs +++ b/src/librustc/middle/typeck/infer/mod.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! See doc.rs for documentation */ +//! See doc.rs for documentation #![allow(non_camel_case_types)] @@ -275,18 +275,14 @@ pub fn new_infer_ctxt<'a>(tcx: &'a ty::ctxt) -> InferCtxt<'a> { } } +/// Computes the least upper-bound of `a` and `b`. If this is +/// not possible, reports an error and returns ty::err. pub fn common_supertype(cx: &InferCtxt, origin: TypeOrigin, a_is_expected: bool, a: ty::t, b: ty::t) - -> ty::t -{ - /*! - * Computes the least upper-bound of `a` and `b`. If this is - * not possible, reports an error and returns ty::err. - */ - + -> ty::t { debug!("common_supertype({}, {})", a.repr(cx.tcx), b.repr(cx.tcx)); @@ -631,16 +627,13 @@ impl<'a> InferCtxt<'a> { .collect() } + /// Given a set of generics defined on a type or impl, returns + /// a substitution mapping each type/region parameter to a + /// fresh inference variable. pub fn fresh_substs_for_type(&self, span: Span, generics: &ty::Generics) - -> subst::Substs - { - /*! - * Given a set of generics defined on a type or impl, returns - * a substitution mapping each type/region parameter to a - * fresh inference variable. - */ + -> subst::Substs { assert!(generics.types.len(subst::SelfSpace) == 0); assert!(generics.types.len(subst::FnSpace) == 0); assert!(generics.regions.len(subst::SelfSpace) == 0); diff --git a/src/librustc/middle/typeck/infer/region_inference/mod.rs b/src/librustc/middle/typeck/infer/region_inference/mod.rs index 757b715ec9300..aae878667a0c4 100644 --- a/src/librustc/middle/typeck/infer/region_inference/mod.rs +++ b/src/librustc/middle/typeck/infer/region_inference/mod.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! See doc.rs */ - +//! See doc.rs use middle::ty; use middle::ty::{BoundRegion, FreeRegion, Region, RegionVid}; @@ -469,14 +468,12 @@ impl<'a> RegionVarBindings<'a> { .collect() } + /// Computes all regions that have been related to `r0` in any + /// way since the mark `mark` was made---`r0` itself will be + /// the first entry. This is used when checking whether + /// skolemized regions are being improperly related to other + /// regions. pub fn tainted(&self, mark: RegionMark, r0: Region) -> Vec { - /*! - * Computes all regions that have been related to `r0` in any - * way since the mark `mark` was made---`r0` itself will be - * the first entry. This is used when checking whether - * skolemized regions are being improperly related to other - * regions. - */ debug!("tainted(mark={}, r0={})", mark, r0.repr(self.tcx)); let _indenter = indenter(); @@ -546,13 +543,11 @@ impl<'a> RegionVarBindings<'a> { } } - /** - This function performs the actual region resolution. It must be - called after all constraints have been added. It performs a - fixed-point iteration to find region values which satisfy all - constraints, assuming such values can be found; if they cannot, - errors are reported. - */ + /// This function performs the actual region resolution. It must be + /// called after all constraints have been added. It performs a + /// fixed-point iteration to find region values which satisfy all + /// constraints, assuming such values can be found; if they cannot, + /// errors are reported. pub fn resolve_regions(&self) -> Vec { debug!("RegionVarBindings: resolve_regions()"); let mut errors = vec!(); @@ -636,16 +631,12 @@ impl<'a> RegionVarBindings<'a> { } } + /// Computes a region that encloses both free region arguments. + /// Guarantee that if the same two regions are given as argument, + /// in any order, a consistent result is returned. fn lub_free_regions(&self, a: &FreeRegion, - b: &FreeRegion) -> ty::Region - { - /*! - * Computes a region that encloses both free region arguments. - * Guarantee that if the same two regions are given as argument, - * in any order, a consistent result is returned. - */ - + b: &FreeRegion) -> ty::Region { return match a.cmp(b) { Less => helper(self, a, b), Greater => helper(self, b, a), @@ -736,16 +727,12 @@ impl<'a> RegionVarBindings<'a> { } } + /// Computes a region that is enclosed by both free region arguments, + /// if any. Guarantees that if the same two regions are given as argument, + /// in any order, a consistent result is returned. fn glb_free_regions(&self, a: &FreeRegion, - b: &FreeRegion) -> cres - { - /*! - * Computes a region that is enclosed by both free region arguments, - * if any. Guarantees that if the same two regions are given as argument, - * in any order, a consistent result is returned. - */ - + b: &FreeRegion) -> cres { return match a.cmp(b) { Less => helper(self, a, b), Greater => helper(self, b, a), @@ -1047,37 +1034,37 @@ impl<'a> RegionVarBindings<'a> { for idx in range(0u, self.num_vars()) { match var_data[idx].value { Value(_) => { - /* Inference successful */ + // Inference successful } NoValue => { - /* Unconstrained inference: do not report an error - until the value of this variable is requested. - After all, sometimes we make region variables but never - really use their values. */ + // Unconstrained inference: do not report an error + // until the value of this variable is requested. + // After all, sometimes we make region variables but never + // really use their values. } ErrorValue => { - /* Inference impossible, this value contains - inconsistent constraints. - - I think that in this case we should report an - error now---unlike the case above, we can't - wait to see whether the user needs the result - of this variable. The reason is that the mere - existence of this variable implies that the - region graph is inconsistent, whether or not it - is used. - - For example, we may have created a region - variable that is the GLB of two other regions - which do not have a GLB. Even if that variable - is not used, it implies that those two regions - *should* have a GLB. - - At least I think this is true. It may be that - the mere existence of a conflict in a region variable - that is not used is not a problem, so if this rule - starts to create problems we'll have to revisit - this portion of the code and think hard about it. =) */ + // Inference impossible, this value contains + // inconsistent constraints. + // + // I think that in this case we should report an + // error now---unlike the case above, we can't + // wait to see whether the user needs the result + // of this variable. The reason is that the mere + // existence of this variable implies that the + // region graph is inconsistent, whether or not it + // is used. + // + // For example, we may have created a region + // variable that is the GLB of two other regions + // which do not have a GLB. Even if that variable + // is not used, it implies that those two regions + // *should* have a GLB. + // + // At least I think this is true. It may be that + // the mere existence of a conflict in a region variable + // that is not used is not a problem, so if this rule + // starts to create problems we'll have to revisit + // this portion of the code and think hard about it. =) if opt_graph.is_none() { opt_graph = Some(self.construct_graph()); diff --git a/src/librustc/middle/typeck/infer/test.rs b/src/librustc/middle/typeck/infer/test.rs index ff9f855c98757..72e6f6a202efa 100644 --- a/src/librustc/middle/typeck/infer/test.rs +++ b/src/librustc/middle/typeck/infer/test.rs @@ -8,11 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -# Standalone Tests for the Inference Module - -*/ +//! # Standalone Tests for the Inference Module // This is only used by tests, hence allow dead code. #![allow(dead_code)] diff --git a/src/librustc/middle/typeck/infer/unify.rs b/src/librustc/middle/typeck/infer/unify.rs index 44afc04d3f0ef..e6b7c2a7ef31e 100644 --- a/src/librustc/middle/typeck/infer/unify.rs +++ b/src/librustc/middle/typeck/infer/unify.rs @@ -20,81 +20,65 @@ use std::mem; use syntax::ast; use util::ppaux::Repr; -/** - * This trait is implemented by any type that can serve as a type - * variable. We call such variables *unification keys*. For example, - * this trait is implemented by `TyVid`, which represents normal - * type variables, and `IntVid`, which represents integral variables. - * - * Each key type has an associated value type `V`. For example, - * for `TyVid`, this is `Bounds`, representing a pair of - * upper- and lower-bound types. - * - * Implementations of this trait are at the end of this file. - */ +/// This trait is implemented by any type that can serve as a type +/// variable. We call such variables *unification keys*. For example, +/// this trait is implemented by `TyVid`, which represents normal +/// type variables, and `IntVid`, which represents integral variables. +/// +/// Each key type has an associated value type `V`. For example, +/// for `TyVid`, this is `Bounds`, representing a pair of +/// upper- and lower-bound types. +/// +/// Implementations of this trait are at the end of this file. pub trait UnifyKey : Clone + Show + PartialEq + Repr { fn index(&self) -> uint; fn from_index(u: uint) -> Self; - /** - * Given an inference context, returns the unification table - * appropriate to this key type. - */ + /// Given an inference context, returns the unification table + /// appropriate to this key type. fn unification_table<'v>(infcx: &'v InferCtxt) -> &'v RefCell>; fn tag(k: Option) -> &'static str; } -/** - * Trait for valid types that a type variable can be set to. Note - * that this is typically not the end type that the value will - * take on, but rather some wrapper: for example, for normal type - * variables, the associated type is not `ty::t` but rather - * `Bounds`. - * - * Implementations of this trait are at the end of this file. - */ +/// Trait for valid types that a type variable can be set to. Note +/// that this is typically not the end type that the value will +/// take on, but rather some wrapper: for example, for normal type +/// variables, the associated type is not `ty::t` but rather +/// `Bounds`. +/// +/// Implementations of this trait are at the end of this file. pub trait UnifyValue : Clone + Repr + PartialEq { } -/** - * Value of a unification key. We implement Tarjan's union-find - * algorithm: when two keys are unified, one of them is converted - * into a "redirect" pointing at the other. These redirects form a - * DAG: the roots of the DAG (nodes that are not redirected) are each - * associated with a value of type `V` and a rank. The rank is used - * to keep the DAG relatively balanced, which helps keep the running - * time of the algorithm under control. For more information, see - * . - */ +/// Value of a unification key. We implement Tarjan's union-find +/// algorithm: when two keys are unified, one of them is converted +/// into a "redirect" pointing at the other. These redirects form a +/// DAG: the roots of the DAG (nodes that are not redirected) are each +/// associated with a value of type `V` and a rank. The rank is used +/// to keep the DAG relatively balanced, which helps keep the running +/// time of the algorithm under control. For more information, see +/// . #[deriving(PartialEq,Clone)] pub enum VarValue { Redirect(K), Root(V, uint), } -/** - * Table of unification keys and their values. - */ +/// Table of unification keys and their values. pub struct UnificationTable { - /** - * Indicates the current value of each key. - */ + /// Indicates the current value of each key. values: Vec>, - /** - * When a snapshot is active, logs each change made to the table - * so that they can be unrolled. - */ + /// When a snapshot is active, logs each change made to the table + /// so that they can be unrolled. undo_log: Vec>, } -/** - * At any time, users may snapshot a unification table. The changes - * made during the snapshot may either be *committed* or *rolled back*. - */ +/// At any time, users may snapshot a unification table. The changes +/// made during the snapshot may either be *committed* or *rolled back*. pub struct Snapshot { // Ensure that this snapshot is keyed to the table type. marker1: marker::CovariantType, @@ -121,10 +105,8 @@ enum UndoLog { SetVar(uint, VarValue), } -/** - * Internal type used to represent the result of a `get()` operation. - * Conveys the current root and value of the key. - */ +/// Internal type used to represent the result of a `get()` operation. +/// Conveys the current root and value of the key. pub struct Node { pub key: K, pub value: V, @@ -144,16 +126,14 @@ impl> UnificationTable { } } + /// True if a snapshot has been started. pub fn in_snapshot(&self) -> bool { - /*! True if a snapshot has been started. */ self.undo_log.len() > 0 } - /** - * Starts a new snapshot. Each snapshot must be either - * rolled back or committed in a "LIFO" (stack) order. - */ + /// Starts a new snapshot. Each snapshot must be either + /// rolled back or committed in a "LIFO" (stack) order. pub fn snapshot(&mut self) -> Snapshot { let length = self.undo_log.len(); debug!("{}: snapshot at length {}", @@ -173,10 +153,8 @@ impl> UnificationTable { assert!(*self.undo_log.get(snapshot.length) == OpenSnapshot); } - /** - * Reverses all changes since the last snapshot. Also - * removes any keys that have been created since then. - */ + /// Reverses all changes since the last snapshot. Also + /// removes any keys that have been created since then. pub fn rollback_to(&mut self, tcx: &ty::ctxt, snapshot: Snapshot) { debug!("{}: rollback_to({})", UnifyKey::tag(None::), @@ -212,10 +190,8 @@ impl> UnificationTable { assert!(self.undo_log.len() == snapshot.length); } - /** - * Commits all changes since the last snapshot. Of course, they - * can still be undone if there is a snapshot further out. - */ + /// Commits all changes since the last snapshot. Of course, they + /// can still be undone if there is a snapshot further out. pub fn commit(&mut self, snapshot: Snapshot) { debug!("{}: commit({})", UnifyKey::tag(None::), @@ -246,27 +222,20 @@ impl> UnificationTable { k } + /// Primitive operation to swap a value in the var array. + /// Caller should update the undo log if we are in a snapshot. fn swap_value(&mut self, index: uint, new_value: VarValue) - -> VarValue - { - /*! - * Primitive operation to swap a value in the var array. - * Caller should update the undo log if we are in a snapshot. - */ - + -> VarValue { let loc = self.values.get_mut(index); mem::replace(loc, new_value) } + /// Find the root node for `vid`. This uses the standard + /// union-find algorithm with path compression: + /// http://en.wikipedia.org/wiki/Disjoint-set_data_structure pub fn get(&mut self, tcx: &ty::ctxt, vid: K) -> Node { - /*! - * Find the root node for `vid`. This uses the standard - * union-find algorithm with path compression: - * http://en.wikipedia.org/wiki/Disjoint-set_data_structure - */ - let index = vid.index(); let value = (*self.values.get(index)).clone(); match value { @@ -299,16 +268,12 @@ impl> UnificationTable { } } + /// Sets the value for `vid` to `new_value`. `vid` MUST be a + /// root node! Also, we must be in the middle of a snapshot. pub fn set(&mut self, tcx: &ty::ctxt, key: K, - new_value: VarValue) - { - /*! - * Sets the value for `vid` to `new_value`. `vid` MUST be a - * root node! Also, we must be in the middle of a snapshot. - */ - + new_value: VarValue) { assert!(self.is_root(&key)); assert!(self.in_snapshot()); @@ -321,19 +286,15 @@ impl> UnificationTable { self.undo_log.push(SetVar(index, old_value)); } + /// Either redirects node_a to node_b or vice versa, depending + /// on the relative rank. Returns the new root and rank. You + /// should then update the value of the new root to something + /// suitable. pub fn unify(&mut self, tcx: &ty::ctxt, node_a: &Node, node_b: &Node) - -> (K, uint) - { - /*! - * Either redirects node_a to node_b or vice versa, depending - * on the relative rank. Returns the new root and rank. You - * should then update the value of the new root to something - * suitable. - */ - + -> (K, uint) { debug!("unify(node_a(id={}, rank={}), node_b(id={}, rank={}))", node_a.key.repr(tcx), node_a.rank, @@ -363,10 +324,8 @@ impl> UnificationTable { // Code to handle simple keys like ints, floats---anything that // doesn't have a subtyping relationship we need to worry about. -/** - * Indicates a type that does not have any kind of subtyping - * relationship. - */ +/// Indicates a type that does not have any kind of subtyping +/// relationship. pub trait SimplyUnifiable : Clone + PartialEq + Repr { fn to_type_err(expected_found) -> ty::type_err; } @@ -398,21 +357,16 @@ pub trait InferCtxtMethodsForSimplyUnifiableTypes>> - InferCtxtMethodsForSimplyUnifiableTypes for InferCtxt<'tcx> -{ + InferCtxtMethodsForSimplyUnifiableTypes for InferCtxt<'tcx> { + /// Unifies two simple keys. Because simple keys do + /// not have any subtyping relationships, if both keys + /// have already been associated with a value, then those two + /// values must be the same. fn simple_vars(&self, a_is_expected: bool, a_id: K, b_id: K) - -> ures - { - /*! - * Unifies two simple keys. Because simple keys do - * not have any subtyping relationships, if both keys - * have already been associated with a value, then those two - * values must be the same. - */ - + -> ures { let tcx = self.tcx; let table = UnifyKey::unification_table(self); let node_a = table.borrow_mut().get(tcx, a_id); @@ -446,19 +400,15 @@ impl<'tcx,V:SimplyUnifiable,K:UnifyKey>> return Ok(()) } + /// Sets the value of the key `a_id` to `b`. Because + /// simple keys do not have any subtyping relationships, + /// if `a_id` already has a value, it must be the same as + /// `b`. fn simple_var_t(&self, a_is_expected: bool, a_id: K, b: V) - -> ures - { - /*! - * Sets the value of the key `a_id` to `b`. Because - * simple keys do not have any subtyping relationships, - * if `a_id` already has a value, it must be the same as - * `b`. - */ - + -> ures { let tcx = self.tcx; let table = UnifyKey::unification_table(self); let node_a = table.borrow_mut().get(tcx, a_id); diff --git a/src/librustc/middle/typeck/mod.rs b/src/librustc/middle/typeck/mod.rs index 7b6935df42079..092608fa97dc1 100644 --- a/src/librustc/middle/typeck/mod.rs +++ b/src/librustc/middle/typeck/mod.rs @@ -8,56 +8,52 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - -typeck.rs, an introduction - -The type checker is responsible for: - -1. Determining the type of each expression -2. Resolving methods and traits -3. Guaranteeing that most type rules are met ("most?", you say, "why most?" - Well, dear reader, read on) - -The main entry point is `check_crate()`. Type checking operates in -several major phases: - -1. The collect phase first passes over all items and determines their - type, without examining their "innards". - -2. Variance inference then runs to compute the variance of each parameter - -3. Coherence checks for overlapping or orphaned impls - -4. Finally, the check phase then checks function bodies and so forth. - Within the check phase, we check each function body one at a time - (bodies of function expressions are checked as part of the - containing function). Inference is used to supply types wherever - they are unknown. The actual checking of a function itself has - several phases (check, regionck, writeback), as discussed in the - documentation for the `check` module. - -The type checker is defined into various submodules which are documented -independently: - -- astconv: converts the AST representation of types - into the `ty` representation - -- collect: computes the types of each top-level item and enters them into - the `cx.tcache` table for later use - -- coherence: enforces coherence rules, builds some tables - -- variance: variance inference - -- check: walks over function bodies and type checks them, inferring types for - local variables, type parameters, etc as necessary. - -- infer: finds the types to use for each type variable such that - all subtyping and assignment constraints are met. In essence, the check - module specifies the constraints, and the infer module solves them. - -*/ +//! typeck.rs, an introduction +//! +//! The type checker is responsible for: +//! +//! 1. Determining the type of each expression +//! 2. Resolving methods and traits +//! 3. Guaranteeing that most type rules are met ("most?", you say, "why most?" +//! Well, dear reader, read on) +//! +//! The main entry point is `check_crate()`. Type checking operates in +//! several major phases: +//! +//! 1. The collect phase first passes over all items and determines their +//! type, without examining their "innards". +//! +//! 2. Variance inference then runs to compute the variance of each parameter +//! +//! 3. Coherence checks for overlapping or orphaned impls +//! +//! 4. Finally, the check phase then checks function bodies and so forth. +//! Within the check phase, we check each function body one at a time +//! (bodies of function expressions are checked as part of the +//! containing function). Inference is used to supply types wherever +//! they are unknown. The actual checking of a function itself has +//! several phases (check, regionck, writeback), as discussed in the +//! documentation for the `check` module. +//! +//! The type checker is defined into various submodules which are documented +//! independently: +//! +//! - astconv: converts the AST representation of types +//! into the `ty` representation +//! +//! - collect: computes the types of each top-level item and enters them into +//! the `cx.tcache` table for later use +//! +//! - coherence: enforces coherence rules, builds some tables +//! +//! - variance: variance inference +//! +//! - check: walks over function bodies and type checks them, inferring types +//! for local variables, type parameters, etc as necessary. +//! +//! - infer: finds the types to use for each type variable such that +//! all subtyping and assignment constraints are met. In essence, the check +//! module specifies the constraints, and the infer module solves them. #![allow(non_camel_case_types)] @@ -149,20 +145,18 @@ pub struct MethodCallee { pub substs: subst::Substs } -/** - * With method calls, we store some extra information in - * side tables (i.e method_map, vtable_map). We use - * MethodCall as a key to index into these tables instead of - * just directly using the expression's NodeId. The reason - * for this being that we may apply adjustments (coercions) - * with the resulting expression also needing to use the - * side tables. The problem with this is that we don't - * assign a separate NodeId to this new expression - * and so it would clash with the base expression if both - * needed to add to the side tables. Thus to disambiguate - * we also keep track of whether there's an adjustment in - * our key. - */ +/// With method calls, we store some extra information in +/// side tables (i.e method_map, vtable_map). We use +/// MethodCall as a key to index into these tables instead of +/// just directly using the expression's NodeId. The reason +/// for this being that we may apply adjustments (coercions) +/// with the resulting expression also needing to use the +/// side tables. The problem with this is that we don't +/// assign a separate NodeId to this new expression +/// and so it would clash with the base expression if both +/// needed to add to the side tables. Thus to disambiguate +/// we also keep track of whether there's an adjustment in +/// our key. #[deriving(Clone, PartialEq, Eq, Hash, Show)] pub struct MethodCall { pub expr_id: ast::NodeId, @@ -215,29 +209,23 @@ pub type vtable_res = VecPerParamSpace; #[deriving(Clone)] pub enum vtable_origin { - /* - Statically known vtable. def_id gives the impl item - from whence comes the vtable, and tys are the type substs. - vtable_res is the vtable itself. - */ + // Statically known vtable. def_id gives the impl item + // from whence comes the vtable, and tys are the type substs. + // vtable_res is the vtable itself. vtable_static(ast::DefId, subst::Substs, vtable_res), - /* - Dynamic vtable, comes from a parameter that has a bound on it: - fn foo(a: T) -- a's vtable would have a - vtable_param origin - - The first argument is the param index (identifying T in the example), - and the second is the bound number (identifying baz) - */ + // Dynamic vtable, comes from a parameter that has a bound on it: + // fn foo(a: T) -- a's vtable would have a + // vtable_param origin + // + // The first argument is the param index (identifying T in the example), + // and the second is the bound number (identifying baz) vtable_param(param_index, uint), - /* - Asked to determine the vtable for ty_err. This is the value used - for the vtables of `Self` in a virtual call like `foo.bar()` - where `foo` is of object type. The same value is also used when - type errors occur. - */ + // Asked to determine the vtable for ty_err. This is the value used + // for the vtables of `Self` in a virtual call like `foo.bar()` + // where `foo` is of object type. The same value is also used when + // type errors occur. vtable_error, } diff --git a/src/librustc/middle/typeck/variance.rs b/src/librustc/middle/typeck/variance.rs index fb3ce391d8e34..0014990563339 100644 --- a/src/librustc/middle/typeck/variance.rs +++ b/src/librustc/middle/typeck/variance.rs @@ -8,189 +8,185 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -This file infers the variance of type and lifetime parameters. The -algorithm is taken from Section 4 of the paper "Taming the Wildcards: -Combining Definition- and Use-Site Variance" published in PLDI'11 and -written by Altidor et al., and hereafter referred to as The Paper. - -This inference is explicitly designed *not* to consider the uses of -types within code. To determine the variance of type parameters -defined on type `X`, we only consider the definition of the type `X` -and the definitions of any types it references. - -We only infer variance for type parameters found on *types*: structs, -enums, and traits. We do not infer variance for type parameters found -on fns or impls. This is because those things are not type definitions -and variance doesn't really make sense in that context. - -It is worth covering what variance means in each case. For structs and -enums, I think it is fairly straightforward. The variance of the type -or lifetime parameters defines whether `T` is a subtype of `T` -(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B` -(resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of -the variances we compute for type parameters.) - -### Variance on traits - -The meaning of variance for trait parameters is more subtle and worth -expanding upon. There are in fact two uses of the variance values we -compute. - -#### Trait variance and object types - -The first is for object types. Just as with structs and enums, we can -decide the subtyping relationship between two object types `&Trait` -and `&Trait` based on the relationship of `A` and `B`. Note that -for object types we ignore the `Self` type parameter -- it is unknown, -and the nature of dynamic dispatch ensures that we will always call a -function that is expected the appropriate `Self` type. However, we -must be careful with the other type parameters, or else we could end -up calling a function that is expecting one type but provided another. - -To see what I mean, consider a trait like so: - - trait ConvertTo { - fn convertTo(&self) -> A; - } - -Intuitively, If we had one object `O=&ConvertTo` and another -`S=&ConvertTo`, then `S <: O` because `String <: Object` -(presuming Java-like "string" and "object" types, my go to examples -for subtyping). The actual algorithm would be to compare the -(explicit) type parameters pairwise respecting their variance: here, -the type parameter A is covariant (it appears only in a return -position), and hence we require that `String <: Object`. - -You'll note though that we did not consider the binding for the -(implicit) `Self` type parameter: in fact, it is unknown, so that's -good. The reason we can ignore that parameter is precisely because we -don't need to know its value until a call occurs, and at that time (as -you said) the dynamic nature of virtual dispatch means the code we run -will be correct for whatever value `Self` happens to be bound to for -the particular object whose method we called. `Self` is thus different -from `A`, because the caller requires that `A` be known in order to -know the return type of the method `convertTo()`. (As an aside, we -have rules preventing methods where `Self` appears outside of the -receiver position from being called via an object.) - -#### Trait variance and vtable resolution - -But traits aren't only used with objects. They're also used when -deciding whether a given impl satisfies a given trait bound. To set the -scene here, imagine I had a function: - - fn convertAll>(v: &[T]) { - ... - } - -Now imagine that I have an implementation of `ConvertTo` for `Object`: - - impl ConvertTo for Object { ... } - -And I want to call `convertAll` on an array of strings. Suppose -further that for whatever reason I specifically supply the value of -`String` for the type parameter `T`: - - let mut vector = ~["string", ...]; - convertAll::(v); - -Is this legal? To put another way, can we apply the `impl` for -`Object` to the type `String`? The answer is yes, but to see why -we have to expand out what will happen: - -- `convertAll` will create a pointer to one of the entries in the - vector, which will have type `&String` -- It will then call the impl of `convertTo()` that is intended - for use with objects. This has the type: - - fn(self: &Object) -> int - - It is ok to provide a value for `self` of type `&String` because - `&String <: &Object`. - -OK, so intuitively we want this to be legal, so let's bring this back -to variance and see whether we are computing the correct result. We -must first figure out how to phrase the question "is an impl for -`Object,int` usable where an impl for `String,int` is expected?" - -Maybe it's helpful to think of a dictionary-passing implementation of -type classes. In that case, `convertAll()` takes an implicit parameter -representing the impl. In short, we *have* an impl of type: - - V_O = ConvertTo for Object - -and the function prototype expects an impl of type: - - V_S = ConvertTo for String - -As with any argument, this is legal if the type of the value given -(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`? -The answer will depend on the variance of the various parameters. In -this case, because the `Self` parameter is contravariant and `A` is -covariant, it means that: - - V_O <: V_S iff - int <: int - String <: Object - -These conditions are satisfied and so we are happy. - -### The algorithm - -The basic idea is quite straightforward. We iterate over the types -defined and, for each use of a type parameter X, accumulate a -constraint indicating that the variance of X must be valid for the -variance of that use site. We then iteratively refine the variance of -X until all constraints are met. There is *always* a sol'n, because at -the limit we can declare all type parameters to be invariant and all -constraints will be satisfied. - -As a simple example, consider: - - enum Option { Some(A), None } - enum OptionalFn { Some(|B|), None } - enum OptionalMap { Some(|C| -> C), None } - -Here, we will generate the constraints: - - 1. V(A) <= + - 2. V(B) <= - - 3. V(C) <= + - 4. V(C) <= - - -These indicate that (1) the variance of A must be at most covariant; -(2) the variance of B must be at most contravariant; and (3, 4) the -variance of C must be at most covariant *and* contravariant. All of these -results are based on a variance lattice defined as follows: - - * Top (bivariant) - - + - o Bottom (invariant) - -Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the -optimal solution. Note that there is always a naive solution which -just declares all variables to be invariant. - -You may be wondering why fixed-point iteration is required. The reason -is that the variance of a use site may itself be a function of the -variance of other type parameters. In full generality, our constraints -take the form: - - V(X) <= Term - Term := + | - | * | o | V(X) | Term x Term - -Here the notation V(X) indicates the variance of a type/region -parameter `X` with respect to its defining class. `Term x Term` -represents the "variance transform" as defined in the paper: - - If the variance of a type variable `X` in type expression `E` is `V2` - and the definition-site variance of the [corresponding] type parameter - of a class `C` is `V1`, then the variance of `X` in the type expression - `C` is `V3 = V1.xform(V2)`. - -*/ +//! This file infers the variance of type and lifetime parameters. The +//! algorithm is taken from Section 4 of the paper "Taming the Wildcards: +//! Combining Definition- and Use-Site Variance" published in PLDI'11 and +//! written by Altidor et al., and hereafter referred to as The Paper. +//! +//! This inference is explicitly designed *not* to consider the uses of +//! types within code. To determine the variance of type parameters +//! defined on type `X`, we only consider the definition of the type `X` +//! and the definitions of any types it references. +//! +//! We only infer variance for type parameters found on *types*: structs, +//! enums, and traits. We do not infer variance for type parameters found +//! on fns or impls. This is because those things are not type definitions +//! and variance doesn't really make sense in that context. +//! +//! It is worth covering what variance means in each case. For structs and +//! enums, I think it is fairly straightforward. The variance of the type +//! or lifetime parameters defines whether `T` is a subtype of `T` +//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B` +//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of +//! the variances we compute for type parameters.) +//! +//! ### Variance on traits +//! +//! The meaning of variance for trait parameters is more subtle and worth +//! expanding upon. There are in fact two uses of the variance values we +//! compute. +//! +//! #### Trait variance and object types +//! +//! The first is for object types. Just as with structs and enums, we can +//! decide the subtyping relationship between two object types `&Trait` +//! and `&Trait` based on the relationship of `A` and `B`. Note that +//! for object types we ignore the `Self` type parameter -- it is unknown, +//! and the nature of dynamic dispatch ensures that we will always call a +//! function that is expected the appropriate `Self` type. However, we +//! must be careful with the other type parameters, or else we could end +//! up calling a function that is expecting one type but provided another. +//! +//! To see what I mean, consider a trait like so: +//! +//! trait ConvertTo { +//! fn convertTo(&self) -> A; +//! } +//! +//! Intuitively, If we had one object `O=&ConvertTo` and another +//! `S=&ConvertTo`, then `S <: O` because `String <: Object` +//! (presuming Java-like "string" and "object" types, my go to examples +//! for subtyping). The actual algorithm would be to compare the +//! (explicit) type parameters pairwise respecting their variance: here, +//! the type parameter A is covariant (it appears only in a return +//! position), and hence we require that `String <: Object`. +//! +//! You'll note though that we did not consider the binding for the +//! (implicit) `Self` type parameter: in fact, it is unknown, so that's +//! good. The reason we can ignore that parameter is precisely because we +//! don't need to know its value until a call occurs, and at that time (as +//! you said) the dynamic nature of virtual dispatch means the code we run +//! will be correct for whatever value `Self` happens to be bound to for +//! the particular object whose method we called. `Self` is thus different +//! from `A`, because the caller requires that `A` be known in order to +//! know the return type of the method `convertTo()`. (As an aside, we +//! have rules preventing methods where `Self` appears outside of the +//! receiver position from being called via an object.) +//! +//! #### Trait variance and vtable resolution +//! +//! But traits aren't only used with objects. They're also used when +//! deciding whether a given impl satisfies a given trait bound. To set the +//! scene here, imagine I had a function: +//! +//! fn convertAll>(v: &[T]) { +//! ... +//! } +//! +//! Now imagine that I have an implementation of `ConvertTo` for `Object`: +//! +//! impl ConvertTo for Object { ... } +//! +//! And I want to call `convertAll` on an array of strings. Suppose +//! further that for whatever reason I specifically supply the value of +//! `String` for the type parameter `T`: +//! +//! let mut vector = ~["string", ...]; +//! convertAll::(v); +//! +//! Is this legal? To put another way, can we apply the `impl` for +//! `Object` to the type `String`? The answer is yes, but to see why +//! we have to expand out what will happen: +//! +//! - `convertAll` will create a pointer to one of the entries in the +//! vector, which will have type `&String` +//! - It will then call the impl of `convertTo()` that is intended +//! for use with objects. This has the type: +//! +//! fn(self: &Object) -> int +//! +//! It is ok to provide a value for `self` of type `&String` because +//! `&String <: &Object`. +//! +//! OK, so intuitively we want this to be legal, so let's bring this back +//! to variance and see whether we are computing the correct result. We +//! must first figure out how to phrase the question "is an impl for +//! `Object,int` usable where an impl for `String,int` is expected?" +//! +//! Maybe it's helpful to think of a dictionary-passing implementation of +//! type classes. In that case, `convertAll()` takes an implicit parameter +//! representing the impl. In short, we *have* an impl of type: +//! +//! V_O = ConvertTo for Object +//! +//! and the function prototype expects an impl of type: +//! +//! V_S = ConvertTo for String +//! +//! As with any argument, this is legal if the type of the value given +//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`? +//! The answer will depend on the variance of the various parameters. In +//! this case, because the `Self` parameter is contravariant and `A` is +//! covariant, it means that: +//! +//! V_O <: V_S iff +//! int <: int +//! String <: Object +//! +//! These conditions are satisfied and so we are happy. +//! +//! ### The algorithm +//! +//! The basic idea is quite straightforward. We iterate over the types +//! defined and, for each use of a type parameter X, accumulate a +//! constraint indicating that the variance of X must be valid for the +//! variance of that use site. We then iteratively refine the variance of +//! X until all constraints are met. There is *always* a sol'n, because at +//! the limit we can declare all type parameters to be invariant and all +//! constraints will be satisfied. +//! +//! As a simple example, consider: +//! +//! enum Option { Some(A), None } +//! enum OptionalFn { Some(|B|), None } +//! enum OptionalMap { Some(|C| -> C), None } +//! +//! Here, we will generate the constraints: +//! +//! 1. V(A) <= + +//! 2. V(B) <= - +//! 3. V(C) <= + +//! 4. V(C) <= - +//! +//! These indicate that (1) the variance of A must be at most covariant; +//! (2) the variance of B must be at most contravariant; and (3, 4) the +//! variance of C must be at most covariant *and* contravariant. All of these +//! results are based on a variance lattice defined as follows: +//! +//! * Top (bivariant) +//! - + +//! o Bottom (invariant) +//! +//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the +//! optimal solution. Note that there is always a naive solution which +//! just declares all variables to be invariant. +//! +//! You may be wondering why fixed-point iteration is required. The reason +//! is that the variance of a use site may itself be a function of the +//! variance of other type parameters. In full generality, our constraints +//! take the form: +//! +//! V(X) <= Term +//! Term := + | - | * | o | V(X) | Term x Term +//! +//! Here the notation V(X) indicates the variance of a type/region +//! parameter `X` with respect to its defining class. `Term x Term` +//! represents the "variance transform" as defined in the paper: +//! +//! If the variance of a type variable `X` in type expression `E` is `V2` +//! and the definition-site variance of the [corresponding] type parameter +//! of a class `C` is `V1`, then the variance of `X` in the type expression +//! `C` is `V3 = V1.xform(V2)`. use std::collections::HashMap; use arena; @@ -216,18 +212,16 @@ pub fn infer_variance(tcx: &ty::ctxt, solve_constraints(constraints_cx); } -/************************************************************************** - * Representing terms - * - * Terms are structured as a straightforward tree. Rather than rely on - * GC, we allocate terms out of a bounded arena (the lifetime of this - * arena is the lifetime 'a that is threaded around). - * - * We assign a unique index to each type/region parameter whose variance - * is to be inferred. We refer to such variables as "inferreds". An - * `InferredIndex` is a newtype'd int representing the index of such - * a variable. - */ +// Representing terms +// +// Terms are structured as a straightforward tree. Rather than rely on +// GC, we allocate terms out of a bounded arena (the lifetime of this +// arena is the lifetime 'a that is threaded around). +// +// We assign a unique index to each type/region parameter whose variance +// is to be inferred. We refer to such variables as "inferreds". An +// `InferredIndex` is a newtype'd int representing the index of such +// a variable. type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; @@ -249,9 +243,7 @@ impl<'a> fmt::Show for VarianceTerm<'a> { } } -/************************************************************************** - * The first pass over the crate simply builds up the set of inferreds. - */ +// The first pass over the crate simply builds up the set of inferreds. struct TermsContext<'a> { tcx: &'a ty::ctxt, @@ -393,12 +385,10 @@ impl<'a> Visitor<()> for TermsContext<'a> { } } -/************************************************************************** - * Constraint construction and representation - * - * The second pass over the AST determines the set of constraints. - * We walk the set of items and, for each member, generate new constraints. - */ +// Constraint construction and representation +// +// The second pass over the AST determines the set of constraints. +// We walk the set of items and, for each member, generate new constraints. struct ConstraintContext<'a> { terms_cx: TermsContext<'a>, @@ -492,7 +482,7 @@ impl<'a> Visitor<()> for ConstraintContext<'a> { let variant = ty::VariantInfo::from_ast_variant(tcx, &**ast_variant, - /*discriminant*/ 0); + /* discriminant */ 0); for arg_ty in variant.args.iter() { self.add_constraints_from_ty(*arg_ty, self.covariant); } @@ -616,6 +606,8 @@ impl<'a> ConstraintContext<'a> { return result; } + /// Returns a variance term representing the declared variance of + /// the type/region parameter with the given id. fn declared_variance(&self, param_def_id: ast::DefId, item_def_id: ast::DefId, @@ -623,11 +615,6 @@ impl<'a> ConstraintContext<'a> { space: ParamSpace, index: uint) -> VarianceTermPtr<'a> { - /*! - * Returns a variance term representing the declared variance of - * the type/region parameter with the given id. - */ - assert_eq!(param_def_id.krate, item_def_id.krate); if self.invariant_lang_items[kind as uint] == Some(item_def_id) { @@ -715,7 +702,7 @@ impl<'a> ConstraintContext<'a> { ty::ty_nil | ty::ty_bot | ty::ty_bool | ty::ty_char | ty::ty_int(_) | ty::ty_uint(_) | ty::ty_float(_) | ty::ty_str => { - /* leaf type -- noop */ + // leaf type -- noop } ty::ty_rptr(region, ref mt) => { @@ -920,14 +907,12 @@ impl<'a> ConstraintContext<'a> { } } -/************************************************************************** - * Constraint solving - * - * The final phase iterates over the constraints, refining the variance - * for each inferred until a fixed point is reached. This will be the - * optimal solution to the constraints. The final variance for each - * inferred is then written into the `variance_map` in the tcx. - */ +// Constraint solving +// +// The final phase iterates over the constraints, refining the variance +// for each inferred until a fixed point is reached. This will be the +// optimal solution to the constraints. The final variance for each +// inferred is then written into the `variance_map` in the tcx. struct SolveContext<'a> { terms_cx: TermsContext<'a>, @@ -1063,9 +1048,7 @@ impl<'a> SolveContext<'a> { } } -/************************************************************************** - * Miscellany transformations on variance - */ +// Miscellany transformations on variance trait Xform { fn xform(self, v: Self) -> Self; diff --git a/src/librustc/plugin/mod.rs b/src/librustc/plugin/mod.rs index fa70ffc7392f3..5e02e71c8def1 100644 --- a/src/librustc/plugin/mod.rs +++ b/src/librustc/plugin/mod.rs @@ -8,54 +8,52 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Infrastructure for compiler plugins. - * - * Plugins are Rust libraries which extend the behavior of `rustc` - * in various ways. - * - * Plugin authors will use the `Registry` type re-exported by - * this module, along with its methods. The rest of the module - * is for use by `rustc` itself. - * - * To define a plugin, build a dylib crate with a - * `#[plugin_registrar]` function: - * - * ```rust,ignore - * #![crate_id = "myplugin"] - * #![crate_type = "dylib"] - * #![feature(plugin_registrar)] - * - * extern crate rustc; - * - * use rustc::plugin::Registry; - * - * #[plugin_registrar] - * pub fn plugin_registrar(reg: &mut Registry) { - * reg.register_macro("mymacro", expand_mymacro); - * } - * - * fn expand_mymacro(...) { // details elided - * ``` - * - * WARNING: We currently don't check that the registrar function - * has the appropriate type! - * - * To use a plugin while compiling another crate: - * - * ```rust - * #![feature(phase)] - * - * #[phase(plugin)] - * extern crate myplugin; - * ``` - * - * If you also need the plugin crate available at runtime, use - * `phase(plugin, link)`. - * - * See `src/test/auxiliary/macro_crate_test.rs` and `src/libfourcc` - * for examples of syntax extension plugins. - */ +//! Infrastructure for compiler plugins. +//! +//! Plugins are Rust libraries which extend the behavior of `rustc` +//! in various ways. +//! +//! Plugin authors will use the `Registry` type re-exported by +//! this module, along with its methods. The rest of the module +//! is for use by `rustc` itself. +//! +//! To define a plugin, build a dylib crate with a +//! `#[plugin_registrar]` function: +//! +//! ```rust,ignore +//! #![crate_id = "myplugin"] +//! #![crate_type = "dylib"] +//! #![feature(plugin_registrar)] +//! +//! extern crate rustc; +//! +//! use rustc::plugin::Registry; +//! +//! #[plugin_registrar] +//! pub fn plugin_registrar(reg: &mut Registry) { +//! reg.register_macro("mymacro", expand_mymacro); +//! } +//! +//! fn expand_mymacro(...) { // details elided +//! ``` +//! +//! WARNING: We currently don't check that the registrar function +//! has the appropriate type! +//! +//! To use a plugin while compiling another crate: +//! +//! ```rust +//! #![feature(phase)] +//! +//! #[phase(plugin)] +//! extern crate myplugin; +//! ``` +//! +//! If you also need the plugin crate available at runtime, use +//! `phase(plugin, link)`. +//! +//! See `src/test/auxiliary/macro_crate_test.rs` and `src/libfourcc` +//! for examples of syntax extension plugins. pub use self::registry::Registry; diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 85edd4ea481ae..b4d58ed274a10 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - use middle::def; use middle::subst; use middle::subst::{VecPerParamSpace,Subst}; @@ -339,10 +338,10 @@ pub fn ty_to_str(cx: &ctxt, typ: t) -> String { } // if there is an id, print that instead of the structural type: - /*for def_id in ty::type_def_id(typ).iter() { - // note that this typedef cannot have type parameters - return ty::item_path_str(cx, *def_id); - }*/ + // for def_id in ty::type_def_id(typ).iter() { + // // note that this typedef cannot have type parameters + // return ty::item_path_str(cx, *def_id); + // } // pretty print the structural type representation: return match ty::get(typ).sty { diff --git a/src/librustrt/c_str.rs b/src/librustrt/c_str.rs index 161d3ed5e65e1..e5dff849a191d 100644 --- a/src/librustrt/c_str.rs +++ b/src/librustrt/c_str.rs @@ -8,62 +8,59 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -C-string manipulation and management - -This modules provides the basic methods for creating and manipulating -null-terminated strings for use with FFI calls (back to C). Most C APIs require -that the string being passed to them is null-terminated, and by default rust's -string types are *not* null terminated. - -The other problem with translating Rust strings to C strings is that Rust -strings can validly contain a null-byte in the middle of the string (0 is a -valid unicode codepoint). This means that not all Rust strings can actually be -translated to C strings. - -# Creation of a C string - -A C string is managed through the `CString` type defined in this module. It -"owns" the internal buffer of characters and will automatically deallocate the -buffer when the string is dropped. The `ToCStr` trait is implemented for `&str` -and `&[u8]`, but the conversions can fail due to some of the limitations -explained above. - -This also means that currently whenever a C string is created, an allocation -must be performed to place the data elsewhere (the lifetime of the C string is -not tied to the lifetime of the original string/data buffer). If C strings are -heavily used in applications, then caching may be advisable to prevent -unnecessary amounts of allocations. - -An example of creating and using a C string would be: - -```rust -extern crate libc; - -extern { - fn puts(s: *const libc::c_char); -} - -fn main() { - let my_string = "Hello, world!"; - - // Allocate the C string with an explicit local that owns the string. The - // `c_buffer` pointer will be deallocated when `my_c_string` goes out of scope. - let my_c_string = my_string.to_c_str(); - unsafe { - puts(my_c_string.as_ptr()); - } - - // Don't save/return the pointer to the C string, the `c_buffer` will be - // deallocated when this block returns! - my_string.with_c_str(|c_buffer| { - unsafe { puts(c_buffer); } - }); -} -``` - -*/ +//! C-string manipulation and management +//! +//! This modules provides the basic methods for creating and manipulating +//! null-terminated strings for use with FFI calls (back to C). Most C APIs +//! require that the string being passed to them is null-terminated, and by +//! default rust's string types are *not* null terminated. +//! +//! The other problem with translating Rust strings to C strings is that Rust +//! strings can validly contain a null-byte in the middle of the string (0 is a +//! valid unicode codepoint). This means that not all Rust strings can actually +//! be translated to C strings. +//! +//! # Creation of a C string +//! +//! A C string is managed through the `CString` type defined in this module. It +//! "owns" the internal buffer of characters and will automatically deallocate +//! the buffer when the string is dropped. The `ToCStr` trait is implemented for +//! `&str` and `&[u8]`, but the conversions can fail due to some of the +//! limitations explained above. +//! +//! This also means that currently whenever a C string is created, an allocation +//! must be performed to place the data elsewhere (the lifetime of the C string +//! is not tied to the lifetime of the original string/data buffer). If C +//! strings are heavily used in applications, then caching may be advisable to +//! prevent unnecessary amounts of allocations. +//! +//! An example of creating and using a C string would be: +//! +//! ```rust +//! extern crate libc; +//! +//! extern { +//! fn puts(s: *const libc::c_char); +//! } +//! +//! fn main() { +//! let my_string = "Hello, world!"; +//! +//! // Allocate the C string with an explicit local that owns the string. +//! // The `c_buffer` pointer will be deallocated when `my_c_string` goes +//! // out of scope. +//! let my_c_string = my_string.to_c_str(); +//! unsafe { +//! puts(my_c_string.as_ptr()); +//! } +//! +//! // Don't save/return the pointer to the C string, the `c_buffer` will be +//! // deallocated when this block returns! +//! my_string.with_c_str(|c_buffer| { +//! unsafe { puts(c_buffer); } +//! }); +//! } +//! ``` use core::prelude::*; diff --git a/src/librustrt/local_data.rs b/src/librustrt/local_data.rs index d4c87e9fc05c1..87e428f431591 100644 --- a/src/librustrt/local_data.rs +++ b/src/librustrt/local_data.rs @@ -8,32 +8,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Task local data management - -Allows storing arbitrary types inside task-local-storage (TLS), to be accessed -anywhere within a task, keyed by a global pointer parameterized over the type of -the TLS slot. Useful for dynamic variables, singletons, and interfacing with -foreign code with bad callback interfaces. - -To declare a new key for storing local data of a particular type, use the -`local_data_key!` macro. This macro will expand to a `static` item appropriately -named and annotated. This name is then passed to the functions in this module to -modify/read the slot specified by the key. - -```rust -local_data_key!(key_int: int) -local_data_key!(key_vector: Vec) - -key_int.replace(Some(3)); -assert_eq!(*key_int.get().unwrap(), 3); - -key_vector.replace(Some(vec![4])); -assert_eq!(*key_vector.get().unwrap(), vec![4]); -``` - -*/ +//! Task local data management +//! +//! Allows storing arbitrary types inside task-local-storage (TLS), to be +//! accessed anywhere within a task, keyed by a global pointer parameterized +//! over the type of the TLS slot. Useful for dynamic variables, singletons, +//! and interfacing with foreign code with bad callback interfaces. +//! +//! To declare a new key for storing local data of a particular type, use the +//! `local_data_key!` macro. This macro will expand to a `static` item +//! appropriately named and annotated. This name is then passed to the functions +//! in this module to modify/read the slot specified by the key. +//! +//! ```rust +//! local_data_key!(key_int: int) +//! local_data_key!(key_vector: Vec) +//! +//! key_int.replace(Some(3)); +//! assert_eq!(*key_int.get().unwrap(), 3); +//! +//! key_vector.replace(Some(vec![4])); +//! assert_eq!(*key_vector.get().unwrap(), vec![4]); +//! ``` // Casting 'Arcane Sight' reveals an overwhelming aura of Transmutation // magic. @@ -49,17 +45,15 @@ use core::raw; use local::Local; use task::{Task, LocalStorage}; -/** - * Indexes a task-local data slot. This pointer is used for comparison to - * differentiate keys from one another. The actual type `T` is not used anywhere - * as a member of this type, except that it is parameterized with it to define - * the type of each key's value. - * - * The value of each Key is of the singleton enum KeyValue. These also have the - * same name as `Key` and their purpose is to take up space in the programs data - * sections to ensure that each value of the `Key` type points to a unique - * location. - */ +/// Indexes a task-local data slot. This pointer is used for comparison to +/// differentiate keys from one another. The actual type `T` is not used anywhere +/// as a member of this type, except that it is parameterized with it to define +/// the type of each key's value. +/// +/// The value of each Key is of the singleton enum KeyValue. These also have the +/// same name as `Key` and their purpose is to take up space in the programs data +/// sections to ensure that each value of the `Key` type points to a unique +/// location. pub type Key = &'static KeyValue; #[allow(missing_doc)] diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index a9e6a6a4a9a87..f8b1e745218f2 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -8,31 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Bindings to libuv, along with the default implementation of `std::rt::rtio`. - -UV types consist of the event loop (Loop), Watchers, Requests and -Callbacks. - -Watchers and Requests encapsulate pointers to uv *handles*, which have -subtyping relationships with each other. This subtyping is reflected -in the bindings with explicit or implicit coercions. For example, an -upcast from TcpWatcher to StreamWatcher is done with -`tcp_watcher.as_stream()`. In other cases a callback on a specific -type of watcher will be passed a watcher of a supertype. - -Currently all use of Request types (connect/write requests) are -encapsulated in the bindings and don't need to be dealt with by the -caller. - -# Safety note - -Due to the complex lifecycle of uv handles, as well as compiler bugs, -this module is not memory safe and requires explicit memory management, -via `close` and `delete` methods. - -*/ +//! Bindings to libuv, along with the default implementation of `std::rt::rtio`. +//! +//! UV types consist of the event loop (Loop), Watchers, Requests and +//! Callbacks. +//! +//! Watchers and Requests encapsulate pointers to uv *handles*, which have +//! subtyping relationships with each other. This subtyping is reflected +//! in the bindings with explicit or implicit coercions. For example, an +//! upcast from TcpWatcher to StreamWatcher is done with +//! `tcp_watcher.as_stream()`. In other cases a callback on a specific +//! type of watcher will be passed a watcher of a supertype. +//! +//! Currently all use of Request types (connect/write requests) are +//! encapsulated in the bindings and don't need to be dealt with by the +//! caller. +//! +//! # Safety note +//! +//! Due to the complex lifecycle of uv handles, as well as compiler bugs, +//! this module is not memory safe and requires explicit memory management, +//! via `close` and `delete` methods. #![crate_id = "rustuv#0.11.0"] // NOTE: remove after stage0 #![crate_name = "rustuv"] diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 863536a411101..806e6f3002c86 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -8,24 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Low-level bindings to the libuv library. - * - * This module contains a set of direct, 'bare-metal' wrappers around - * the libuv C-API. - * - * We're not bothering yet to redefine uv's structs as Rust structs - * because they are quite large and change often between versions. - * The maintenance burden is just too high. Instead we use the uv's - * `uv_handle_size` and `uv_req_size` to find the correct size of the - * structs and allocate them on the heap. This can be revisited later. - * - * There are also a collection of helper functions to ease interacting - * with the low-level API. - * - * As new functionality, existent in uv.h, is added to the rust stdlib, - * the mappings should be added in this module. - */ +//! Low-level bindings to the libuv library. +//! +//! This module contains a set of direct, 'bare-metal' wrappers around +//! the libuv C-API. +//! +//! We're not bothering yet to redefine uv's structs as Rust structs +//! because they are quite large and change often between versions. +//! The maintenance burden is just too high. Instead we use the uv's +//! `uv_handle_size` and `uv_req_size` to find the correct size of the +//! structs and allocate them on the heap. This can be revisited later. +//! +//! There are also a collection of helper functions to ease interacting +//! with the low-level API. +//! +//! As new functionality, existent in uv.h, is added to the rust stdlib, +//! the mappings should be added in this module. #![allow(non_camel_case_types)] // C types diff --git a/src/libserialize/base64.rs b/src/libserialize/base64.rs index 63cfbd6d9aa17..c731e95d35520 100644 --- a/src/libserialize/base64.rs +++ b/src/libserialize/base64.rs @@ -58,21 +58,19 @@ pub trait ToBase64 { } impl<'a> ToBase64 for &'a [u8] { - /** - * Turn a vector of `u8` bytes into a base64 string. - * - * # Example - * - * ```rust - * extern crate serialize; - * use serialize::base64::{ToBase64, STANDARD}; - * - * fn main () { - * let str = [52,32].to_base64(STANDARD); - * println!("base 64 output: {}", str); - * } - * ``` - */ + /// Turn a vector of `u8` bytes into a base64 string. + /// + /// # Example + /// + /// ```rust + /// extern crate serialize; + /// use serialize::base64::{ToBase64, STANDARD}; + /// + /// fn main () { + /// let str = [52,32].to_base64(STANDARD); + /// println!("base 64 output: {}", str); + /// } + /// ``` fn to_base64(&self, config: Config) -> String { let bytes = match config.char_set { Standard => STANDARD_CHARS, @@ -177,34 +175,32 @@ impl fmt::Show for FromBase64Error { } impl<'a> FromBase64 for &'a str { - /** - * Convert any base64 encoded string (literal, `@`, `&`, or `~`) - * to the byte values it encodes. - * - * You can use the `String::from_utf8` function in `std::string` to turn a - * `Vec` into a string with characters corresponding to those values. - * - * # Example - * - * This converts a string literal to base64 and back. - * - * ```rust - * extern crate serialize; - * use serialize::base64::{ToBase64, FromBase64, STANDARD}; - * - * fn main () { - * let hello_str = b"Hello, World".to_base64(STANDARD); - * println!("base64 output: {}", hello_str); - * let res = hello_str.as_slice().from_base64(); - * if res.is_ok() { - * let opt_bytes = String::from_utf8(res.unwrap()); - * if opt_bytes.is_ok() { - * println!("decoded from base64: {}", opt_bytes.unwrap()); - * } - * } - * } - * ``` - */ + /// Convert any base64 encoded string (literal, `@`, `&`, or `~`) + /// to the byte values it encodes. + /// + /// You can use the `String::from_utf8` function in `std::string` to turn a + /// `Vec` into a string with characters corresponding to those values. + /// + /// # Example + /// + /// This converts a string literal to base64 and back. + /// + /// ```rust + /// extern crate serialize; + /// use serialize::base64::{ToBase64, FromBase64, STANDARD}; + /// + /// fn main () { + /// let hello_str = b"Hello, World".to_base64(STANDARD); + /// println!("base64 output: {}", hello_str); + /// let res = hello_str.as_slice().from_base64(); + /// if res.is_ok() { + /// let opt_bytes = String::from_utf8(res.unwrap()); + /// if opt_bytes.is_ok() { + /// println!("decoded from base64: {}", opt_bytes.unwrap()); + /// } + /// } + /// } + /// ``` fn from_base64(&self) -> Result, FromBase64Error> { let mut r = Vec::new(); let mut buf: u32 = 0; diff --git a/src/libserialize/hex.rs b/src/libserialize/hex.rs index 51fab7b135458..e53c9bd8324a2 100644 --- a/src/libserialize/hex.rs +++ b/src/libserialize/hex.rs @@ -22,21 +22,19 @@ pub trait ToHex { static CHARS: &'static[u8] = b"0123456789abcdef"; impl<'a> ToHex for &'a [u8] { - /** - * Turn a vector of `u8` bytes into a hexadecimal string. - * - * # Example - * - * ```rust - * extern crate serialize; - * use serialize::hex::ToHex; - * - * fn main () { - * let str = [52,32].to_hex(); - * println!("{}", str); - * } - * ``` - */ + /// Turn a vector of `u8` bytes into a hexadecimal string. + /// + /// # Example + /// + /// ```rust + /// extern crate serialize; + /// use serialize::hex::ToHex; + /// + /// fn main () { + /// let str = [52,32].to_hex(); + /// println!("{}", str); + /// } + /// ``` fn to_hex(&self) -> String { let mut v = Vec::with_capacity(self.len() * 2); for &byte in self.iter() { @@ -76,31 +74,29 @@ impl fmt::Show for FromHexError { } impl<'a> FromHex for &'a str { - /** - * Convert any hexadecimal encoded string (literal, `@`, `&`, or `~`) - * to the byte values it encodes. - * - * You can use the `String::from_utf8` function in `std::string` to turn a - * `Vec` into a string with characters corresponding to those values. - * - * # Example - * - * This converts a string literal to hexadecimal and back. - * - * ```rust - * extern crate serialize; - * use serialize::hex::{FromHex, ToHex}; - * - * fn main () { - * let hello_str = "Hello, World".as_bytes().to_hex(); - * println!("{}", hello_str); - * let bytes = hello_str.as_slice().from_hex().unwrap(); - * println!("{}", bytes); - * let result_str = String::from_utf8(bytes).unwrap(); - * println!("{}", result_str); - * } - * ``` - */ + /// Convert any hexadecimal encoded string (literal, `@`, `&`, or `~`) + /// to the byte values it encodes. + /// + /// You can use the `String::from_utf8` function in `std::string` to turn a + /// `Vec` into a string with characters corresponding to those values. + /// + /// # Example + /// + /// This converts a string literal to hexadecimal and back. + /// + /// ```rust + /// extern crate serialize; + /// use serialize::hex::{FromHex, ToHex}; + /// + /// fn main () { + /// let hello_str = "Hello, World".as_bytes().to_hex(); + /// println!("{}", hello_str); + /// let bytes = hello_str.as_slice().from_hex().unwrap(); + /// println!("{}", bytes); + /// let result_str = String::from_utf8(bytes).unwrap(); + /// println!("{}", result_str); + /// } + /// ``` fn from_hex(&self) -> Result, FromHexError> { // This may be an overestimate if there is any whitespace let mut b = Vec::with_capacity(self.len() / 2); diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs index f4e5c27a14b33..46e419b8d5b85 100644 --- a/src/libserialize/json.rs +++ b/src/libserialize/json.rs @@ -14,130 +14,132 @@ #![forbid(non_camel_case_types)] #![allow(missing_doc)] -/*! -JSON parsing and serialization - -# What is JSON? - -JSON (JavaScript Object Notation) is a way to write data in Javascript. -Like XML, it allows to encode structured data in a text format that can be easily read by humans. -Its simple syntax and native compatibility with JavaScript have made it a widely used format. - -Data types that can be encoded are JavaScript types (see the `Json` enum for more details): - -* `Boolean`: equivalent to rust's `bool` -* `Number`: equivalent to rust's `f64` -* `String`: equivalent to rust's `String` -* `Array`: equivalent to rust's `Vec`, but also allowing objects of different types in the same -array -* `Object`: equivalent to rust's `Treemap` -* `Null` - -An object is a series of string keys mapping to values, in `"key": value` format. -Arrays are enclosed in square brackets ([ ... ]) and objects in curly brackets ({ ... }). -A simple JSON document encoding a person, his/her age, address and phone numbers could look like: - -```ignore -{ - "FirstName": "John", - "LastName": "Doe", - "Age": 43, - "Address": { - "Street": "Downing Street 10", - "City": "London", - "Country": "Great Britain" - }, - "PhoneNumbers": [ - "+44 1234567", - "+44 2345678" - ] -} -``` - -# Rust Type-based Encoding and Decoding - -Rust provides a mechanism for low boilerplate encoding & decoding of values to and from JSON via -the serialization API. -To be able to encode a piece of data, it must implement the `serialize::Encodable` trait. -To be able to decode a piece of data, it must implement the `serialize::Decodable` trait. -The Rust compiler provides an annotation to automatically generate the code for these traits: -`#[deriving(Decodable, Encodable)]` - -The JSON API provides an enum `json::Json` and a trait `ToJson` to encode objects. -The `ToJson` trait provides a `to_json` method to convert an object into a `json::Json` value. -A `json::Json` value can be encoded as a string or buffer using the functions described above. -You can also use the `json::Encoder` object, which implements the `Encoder` trait. - -When using `ToJson` the `Encodable` trait implementation is not mandatory. - -# Examples of use - -## Using Autoserialization - -Create a struct called TestStruct1 and serialize and deserialize it to and from JSON -using the serialization API, using the derived serialization code. - -```rust -extern crate serialize; -use serialize::json; - -#[deriving(Decodable, Encodable)] //generate Decodable, Encodable impl. -pub struct TestStruct1 { - data_int: u8, - data_str: String, - data_vector: Vec, -} - -fn main() { - let object = TestStruct1 - {data_int: 1, data_str:"toto".to_string(), data_vector:vec![2,3,4,5]}; - - // Serialize using `json::encode` - let encoded = json::encode(&object); - - // Deserialize using `json::decode` - let decoded: TestStruct1 = json::decode(encoded.as_slice()).unwrap(); -} -``` - -## Using `ToJson` - -This example uses the `ToJson` trait to generate the JSON string. - -```rust -use std::collections::TreeMap; -use serialize::json::ToJson; -use serialize::json; - -#[deriving(Decodable)] -pub struct TestStruct1 { - data_int: u8, - data_str: String, - data_vector: Vec, -} - -impl ToJson for TestStruct1 { - fn to_json( &self ) -> json::Json { - let mut d = TreeMap::new(); - d.insert("data_int".to_string(), self.data_int.to_json()); - d.insert("data_str".to_string(), self.data_str.to_json()); - d.insert("data_vector".to_string(), self.data_vector.to_json()); - json::Object(d) - } -} - -fn main() { - // Serialize using `ToJson` - let test2 = TestStruct1 {data_int: 1, data_str:"toto".to_string(), data_vector:vec![2,3,4,5]}; - let tjson: json::Json = test2.to_json(); - let json_str: String = tjson.to_str(); - - // Deserialize like before - let decoded: TestStruct1 = json::decode(json_str.as_slice()).unwrap(); -} -``` - -*/ +//! JSON parsing and serialization +//! +//! # What is JSON? +//! +//! JSON (JavaScript Object Notation) is a way to write data in Javascript. Like +//! XML, it allows to encode structured data in a text format that can be easily +//! read by humans. Its simple syntax and native compatibility with JavaScript +//! have made it a widely used format. +//! +//! Data types that can be encoded are JavaScript types (see the `Json` enum for +//! more details): +//! +//! * `Boolean`: equivalent to rust's `bool` +//! * `Number`: equivalent to rust's `f64` +//! * `String`: equivalent to rust's `String` +//! * `Array`: equivalent to rust's `Vec`, but also allowing objects of +//! different types in the same array +//! * `Object`: equivalent to rust's `Treemap` +//! * `Null` +//! +//! An object is a series of string keys mapping to values, in `"key": value` +//! format. Arrays are enclosed in square brackets ([ ... ]) and objects in +//! curly brackets ({ ... }). A simple JSON document encoding a person, his/her +//! age, address and phone numbers could look like: +//! +//! ```ignore +//! { +//! "FirstName": "John", +//! "LastName": "Doe", +//! "Age": 43, +//! "Address": { +//! "Street": "Downing Street 10", +//! "City": "London", +//! "Country": "Great Britain" +//! }, +//! "PhoneNumbers": [ +//! "+44 1234567", +//! "+44 2345678" +//! ] +//! } +//! ``` +//! +//! # Rust Type-based Encoding and Decoding +//! +//! Rust provides a mechanism for low boilerplate encoding & decoding of values +//! to and from JSON via the serialization API. To be able to encode a piece of +//! data, it must implement the `serialize::Encodable` trait. To be able to +//! decode a piece of data, it must implement the `serialize::Decodable` trait. +//! The Rust compiler provides an annotation to automatically generate the code +//! for these traits: `#[deriving(Decodable, Encodable)]` +//! +//! The JSON API provides an enum `json::Json` and a trait `ToJson` to encode +//! objects. The `ToJson` trait provides a `to_json` method to convert an object +//! into a `json::Json` value. A `json::Json` value can be encoded as a string +//! or buffer using the functions described above. You can also use the +//! `json::Encoder` object, which implements the `Encoder` trait. +//! +//! When using `ToJson` the `Encodable` trait implementation is not mandatory. +//! +//! # Examples of use +//! +//! ## Using Autoserialization +//! +//! Create a struct called TestStruct1 and serialize and deserialize it to and +//! from JSON using the serialization API, using the derived serialization code. +//! +//! ```rust +//! extern crate serialize; +//! use serialize::json; +//! +//! #[deriving(Decodable, Encodable)] //generate Decodable, Encodable impl. +//! pub struct TestStruct1 { +//! data_int: u8, +//! data_str: String, +//! data_vector: Vec, +//! } +//! +//! fn main() { +//! let object = TestStruct1 +//! {data_int: 1, data_str:"toto".to_string(), data_vector:vec![2,3,4,5]}; +//! +//! // Serialize using `json::encode` +//! let encoded = json::encode(&object); +//! +//! // Deserialize using `json::decode` +//! let decoded: TestStruct1 = json::decode(encoded.as_slice()).unwrap(); +//! } +//! ``` +//! +//! ## Using `ToJson` +//! +//! This example uses the `ToJson` trait to generate the JSON string. +//! +//! ```rust +//! use std::collections::TreeMap; +//! use serialize::json::ToJson; +//! use serialize::json; +//! +//! #[deriving(Decodable)] +//! pub struct TestStruct1 { +//! data_int: u8, +//! data_str: String, +//! data_vector: Vec, +//! } +//! +//! impl ToJson for TestStruct1 { +//! fn to_json( &self ) -> json::Json { +//! let mut d = TreeMap::new(); +//! d.insert("data_int".to_string(), self.data_int.to_json()); +//! d.insert("data_str".to_string(), self.data_str.to_json()); +//! d.insert("data_vector".to_string(), self.data_vector.to_json()); +//! json::Object(d) +//! } +//! } +//! +//! fn main() { +//! // Serialize using `ToJson` +//! let test2 = +//! TestStruct1 {data_int: 1, data_str:"toto".to_string(), data_vector:vec![2,3,4,5]}; +//! let tjson: json::Json = test2.to_json(); +//! let json_str: String = tjson.to_str(); +//! +//! // Deserialize like before +//! let decoded: TestStruct1 = json::decode(json_str.as_slice()).unwrap(); +//! } +//! ``` use std; use std::collections::{HashMap, TreeMap}; diff --git a/src/libserialize/lib.rs b/src/libserialize/lib.rs index f635c3f415099..5a751bb3f02d4 100644 --- a/src/libserialize/lib.rs +++ b/src/libserialize/lib.rs @@ -10,9 +10,7 @@ //! Support code for encoding and decoding types. -/* -Core encoding and decoding interfaces. -*/ +// Core encoding and decoding interfaces. #![crate_id = "serialize#0.11.0"] // NOTE: remove after stage0 #![crate_name = "serialize"] diff --git a/src/libserialize/serialize.rs b/src/libserialize/serialize.rs index 03d9445b9b94b..92f0bb4f59462 100644 --- a/src/libserialize/serialize.rs +++ b/src/libserialize/serialize.rs @@ -10,9 +10,7 @@ //! Support code for encoding and decoding types. -/* -Core encoding and decoding interfaces. -*/ +// Core encoding and decoding interfaces. use std::path; use std::rc::Rc; diff --git a/src/libstd/collections/mod.rs b/src/libstd/collections/mod.rs index ccef1c0fd2adb..716d1f6783964 100644 --- a/src/libstd/collections/mod.rs +++ b/src/libstd/collections/mod.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Collection types. - */ +//! Collection types. #![experimental] diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs index 728875ce26010..de41e6c35cadf 100644 --- a/src/libstd/dynamic_lib.rs +++ b/src/libstd/dynamic_lib.rs @@ -8,13 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Dynamic library facilities. - -A simple wrapper over the platform's dynamic library facilities - -*/ +//! Dynamic library facilities. +//! +//! A simple wrapper over the platform's dynamic library facilities #![experimental] #![allow(missing_doc)] diff --git a/src/libstd/fmt.rs b/src/libstd/fmt.rs index 5834e576b0814..49250ba1a772b 100644 --- a/src/libstd/fmt.rs +++ b/src/libstd/fmt.rs @@ -8,409 +8,411 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Utilities for formatting and printing strings - -This module contains the runtime support for the `format!` syntax extension. -This macro is implemented in the compiler to emit calls to this module in order -to format arguments at runtime into strings and streams. - -The functions contained in this module should not normally be used in everyday -use cases of `format!`. The assumptions made by these functions are unsafe for -all inputs, and the compiler performs a large amount of validation on the -arguments to `format!` in order to ensure safety at runtime. While it is -possible to call these functions directly, it is not recommended to do so in the -general case. - -## Usage - -The `format!` macro is intended to be familiar to those coming from C's -printf/fprintf functions or Python's `str.format` function. In its current -revision, the `format!` macro returns a `String` type which is the result of -the formatting. In the future it will also be able to pass in a stream to -format arguments directly while performing minimal allocations. - -Some examples of the `format!` extension are: - -```rust -# extern crate debug; -# fn main() { -format!("Hello"); // => "Hello" -format!("Hello, {:s}!", "world"); // => "Hello, world!" -format!("The number is {:d}", 1i); // => "The number is 1" -format!("{:?}", (3i, 4i)); // => "(3, 4)" -format!("{value}", value=4i); // => "4" -format!("{} {}", 1i, 2i); // => "1 2" -# } -``` - -From these, you can see that the first argument is a format string. It is -required by the compiler for this to be a string literal; it cannot be a -variable passed in (in order to perform validity checking). The compiler will -then parse the format string and determine if the list of arguments provided is -suitable to pass to this format string. - -### Positional parameters - -Each formatting argument is allowed to specify which value argument it's -referencing, and if omitted it is assumed to be "the next argument". For -example, the format string `{} {} {}` would take three parameters, and they -would be formatted in the same order as they're given. The format string -`{2} {1} {0}`, however, would format arguments in reverse order. - -Things can get a little tricky once you start intermingling the two types of -positional specifiers. The "next argument" specifier can be thought of as an -iterator over the argument. Each time a "next argument" specifier is seen, the -iterator advances. This leads to behavior like this: - -```rust -format!("{1} {} {0} {}", 1i, 2i); // => "2 1 1 2" -``` - -The internal iterator over the argument has not been advanced by the time the -first `{}` is seen, so it prints the first argument. Then upon reaching the -second `{}`, the iterator has advanced forward to the second argument. -Essentially, parameters which explicitly name their argument do not affect -parameters which do not name an argument in terms of positional specifiers. - -A format string is required to use all of its arguments, otherwise it is a -compile-time error. You may refer to the same argument more than once in the -format string, although it must always be referred to with the same type. - -### Named parameters - -Rust itself does not have a Python-like equivalent of named parameters to a -function, but the `format!` macro is a syntax extension which allows it to -leverage named parameters. Named parameters are listed at the end of the -argument list and have the syntax: - -```text -identifier '=' expression -``` - -For example, the following `format!` expressions all use named argument: - -```rust -# extern crate debug; -# fn main() { -format!("{argument}", argument = "test"); // => "test" -format!("{name} {}", 1i, name = 2i); // => "2 1" -format!("{a:s} {c:d} {b:?}", a="a", b=(), c=3i); // => "a 3 ()" -# } -``` - -It is illegal to put positional parameters (those without names) after arguments -which have names. Like with positional parameters, it is illegal to provide -named parameters that are unused by the format string. - -### Argument types - -Each argument's type is dictated by the format string. It is a requirement that -every argument is only ever referred to by one type. For example, this is an -invalid format string: - -```text -{0:d} {0:s} -``` - -This is invalid because the first argument is both referred to as an integer as -well as a string. - -Because formatting is done via traits, there is no requirement that the -`d` format actually takes an `int`, but rather it simply requires a type which -ascribes to the `Signed` formatting trait. There are various parameters which do -require a particular type, however. Namely if the syntax `{:.*s}` is used, then -the number of characters to print from the string precedes the actual string and -must have the type `uint`. Although a `uint` can be printed with `{:u}`, it is -illegal to reference an argument as such. For example, this is another invalid -format string: - -```text -{:.*s} {0:u} -``` - -### Formatting traits - -When requesting that an argument be formatted with a particular type, you are -actually requesting that an argument ascribes to a particular trait. This allows -multiple actual types to be formatted via `{:d}` (like `i8` as well as `int`). -The current mapping of types to traits is: - -* *nothing* ⇒ `Show` -* `d` ⇒ `Signed` -* `i` ⇒ `Signed` -* `u` ⇒ `Unsigned` -* `b` ⇒ `Bool` -* `c` ⇒ `Char` -* `o` ⇒ `Octal` -* `x` ⇒ `LowerHex` -* `X` ⇒ `UpperHex` -* `s` ⇒ `String` -* `p` ⇒ `Pointer` -* `t` ⇒ `Binary` -* `f` ⇒ `Float` -* `e` ⇒ `LowerExp` -* `E` ⇒ `UpperExp` -* `?` ⇒ `Poly` - -> **Note**: The `Poly` formatting trait is provided by [libdebug](../../debug/) -> and is an experimental implementation that should not be relied upon. In order -> to use the `?` modifier, the libdebug crate must be linked against. - -What this means is that any type of argument which implements the -`std::fmt::Binary` trait can then be formatted with `{:t}`. Implementations are -provided for these traits for a number of primitive types by the standard -library as well. If no format is specified (as in `{}` or `{:6}`), then the -format trait used is the `Show` trait. This is one of the more commonly -implemented traits when formatting a custom type. - -When implementing a format trait for your own type, you will have to implement a -method of the signature: - -```rust -# use std; -# mod fmt { pub type Result = (); } -# struct T; -# trait SomeName { -fn fmt(&self, f: &mut std::fmt::Formatter) -> fmt::Result; -# } -``` - -Your type will be passed as `self` by-reference, and then the function should -emit output into the `f.buf` stream. It is up to each format trait -implementation to correctly adhere to the requested formatting parameters. The -values of these parameters will be listed in the fields of the `Formatter` -struct. In order to help with this, the `Formatter` struct also provides some -helper methods. - -Additionally, the return value of this function is `fmt::Result` which is a -typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting -implementations should ensure that they return errors from `write!` correctly -(propagating errors upward). - -An example of implementing the formatting traits would look -like: - -```rust -use std::fmt; -use std::f64; - -struct Vector2D { - x: int, - y: int, -} - -impl fmt::Show for Vector2D { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // The `f` value implements the `Writer` trait, which is what the - // write! macro is expecting. Note that this formatting ignores the - // various flags provided to format strings. - write!(f, "({}, {})", self.x, self.y) - } -} - -// Different traits allow different forms of output of a type. The meaning of -// this format is to print the magnitude of a vector. -impl fmt::Binary for Vector2D { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let magnitude = (self.x * self.x + self.y * self.y) as f64; - let magnitude = magnitude.sqrt(); - - // Respect the formatting flags by using the helper method - // `pad_integral` on the Formatter object. See the method documentation - // for details, and the function `pad` can be used to pad strings. - let decimals = f.precision.unwrap_or(3); - let string = f64::to_str_exact(magnitude, decimals); - f.pad_integral(true, "", string.as_bytes()) - } -} - -fn main() { - let myvector = Vector2D { x: 3, y: 4 }; - - println!("{}", myvector); // => "(3, 4)" - println!("{:10.3t}", myvector); // => " 5.000" -} -``` - -### Related macros - -There are a number of related macros in the `format!` family. The ones that are -currently implemented are: - -```ignore -format! // described above -write! // first argument is a &mut io::Writer, the destination -writeln! // same as write but appends a newline -print! // the format string is printed to the standard output -println! // same as print but appends a newline -format_args! // described below. -``` - - -#### `write!` - -This and `writeln` are two macros which are used to emit the format string to a -specified stream. This is used to prevent intermediate allocations of format -strings and instead directly write the output. Under the hood, this function is -actually invoking the `write` function defined in this module. Example usage is: - -```rust -# #![allow(unused_must_use)] -use std::io; - -let mut w = io::MemWriter::new(); -write!(&mut w as &mut io::Writer, "Hello {}!", "world"); -``` - -#### `print!` - -This and `println` emit their output to stdout. Similarly to the `write!` macro, -the goal of these macros is to avoid intermediate allocations when printing -output. Example usage is: - -```rust -print!("Hello {}!", "world"); -println!("I have a newline {}", "character at the end"); -``` - -#### `format_args!` -This is a curious macro which is used to safely pass around -an opaque object describing the format string. This object -does not require any heap allocations to create, and it only -references information on the stack. Under the hood, all of -the related macros are implemented in terms of this. First -off, some example usage is: - -``` -use std::fmt; -use std::io; - -# #[allow(unused_must_use)] -# fn main() { -format_args!(fmt::format, "this returns {}", "String"); - -let some_writer: &mut io::Writer = &mut io::stdout(); -format_args!(|args| { write!(some_writer, "{}", args) }, "print with a {}", "closure"); - -fn my_fmt_fn(args: &fmt::Arguments) { - write!(&mut io::stdout(), "{}", args); -} -format_args!(my_fmt_fn, "or a {} too", "function"); -# } -``` - -The first argument of the `format_args!` macro is a function (or closure) which -takes one argument of type `&fmt::Arguments`. This structure can then be -passed to the `write` and `format` functions inside this module in order to -process the format string. The goal of this macro is to even further prevent -intermediate allocations when dealing formatting strings. - -For example, a logging library could use the standard formatting syntax, but it -would internally pass around this structure until it has been determined where -output should go to. - -It is unsafe to programmatically create an instance of `fmt::Arguments` because -the operations performed when executing a format string require the compile-time -checks provided by the compiler. The `format_args!` macro is the only method of -safely creating these structures, but they can be unsafely created with the -constructor provided. - -## Syntax - -The syntax for the formatting language used is drawn from other languages, so it -should not be too alien. Arguments are formatted with python-like syntax, -meaning that arguments are surrounded by `{}` instead of the C-like `%`. The -actual grammar for the formatting syntax is: - -```text -format_string := [ format ] * -format := '{' [ argument ] [ ':' format_spec ] '}' -argument := integer | identifier - -format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type] -fill := character -align := '<' | '>' -sign := '+' | '-' -width := count -precision := count | '*' -type := identifier | '' -count := parameter | integer -parameter := integer '$' -``` - -## Formatting Parameters - -Each argument being formatted can be transformed by a number of formatting -parameters (corresponding to `format_spec` in the syntax above). These -parameters affect the string representation of what's being formatted. This -syntax draws heavily from Python's, so it may seem a bit familiar. - -### Fill/Alignment - -The fill character is provided normally in conjunction with the `width` -parameter. This indicates that if the value being formatted is smaller than -`width` some extra characters will be printed around it. The extra characters -are specified by `fill`, and the alignment can be one of two options: - -* `<` - the argument is left-aligned in `width` columns -* `>` - the argument is right-aligned in `width` columns - -### Sign/#/0 - -These can all be interpreted as flags for a particular formatter. - -* '+' - This is intended for numeric types and indicates that the sign should - always be printed. Positive signs are never printed by default, and the - negative sign is only printed by default for the `Signed` trait. This - flag indicates that the correct sign (+ or -) should always be printed. -* '-' - Currently not used -* '#' - This flag is indicates that the "alternate" form of printing should be - used. By default, this only applies to the integer formatting traits and - performs like: - * `x` - precedes the argument with a "0x" - * `X` - precedes the argument with a "0x" - * `t` - precedes the argument with a "0b" - * `o` - precedes the argument with a "0o" -* '0' - This is used to indicate for integer formats that the padding should - both be done with a `0` character as well as be sign-aware. A format - like `{:08d}` would yield `00000001` for the integer `1`, while the same - format would yield `-0000001` for the integer `-1`. Notice that the - negative version has one fewer zero than the positive version. - -### Width - -This is a parameter for the "minimum width" that the format should take up. If -the value's string does not fill up this many characters, then the padding -specified by fill/alignment will be used to take up the required space. - -The default fill/alignment for non-numerics is a space and left-aligned. The -defaults for numeric formatters is also a space but with right-alignment. If the -'0' flag is specified for numerics, then the implicit fill character is '0'. - -The value for the width can also be provided as a `uint` in the list of -parameters by using the `2$` syntax indicating that the second argument is a -`uint` specifying the width. - -### Precision - -For non-numeric types, this can be considered a "maximum width". If the -resulting string is longer than this width, then it is truncated down to this -many characters and only those are emitted. - -For integral types, this has no meaning currently. - -For floating-point types, this indicates how many digits after the decimal point -should be printed. - -## Escaping - -The literal characters `{` and `}` may be included in a string by preceding them -with the same character. For example, the `{` character is escaped with `{{` and -the `}` character is escaped with `}}`. - -*/ +//! Utilities for formatting and printing strings +//! +//! This module contains the runtime support for the `format!` syntax extension. +//! This macro is implemented in the compiler to emit calls to this module in +//! order to format arguments at runtime into strings and streams. +//! +//! The functions contained in this module should not normally be used in +//! everyday use cases of `format!`. The assumptions made by these functions are +//! unsafe for all inputs, and the compiler performs a large amount of +//! validation on the arguments to `format!` in order to ensure safety at +//! runtime. While it is possible to call these functions directly, it is not +//! recommended to do so in the general case. +//! +//! ## Usage +//! +//! The `format!` macro is intended to be familiar to those coming from C's +//! printf/fprintf functions or Python's `str.format` function. In its current +//! revision, the `format!` macro returns a `String` type which is the result of +//! the formatting. In the future it will also be able to pass in a stream to +//! format arguments directly while performing minimal allocations. +//! +//! Some examples of the `format!` extension are: +//! +//! ```rust +//! # extern crate debug; +//! # fn main() { +//! format!("Hello"); // => "Hello" +//! format!("Hello, {:s}!", "world"); // => "Hello, world!" +//! format!("The number is {:d}", 1i); // => "The number is 1" +//! format!("{:?}", (3i, 4i)); // => "(3, 4)" +//! format!("{value}", value=4i); // => "4" +//! format!("{} {}", 1i, 2i); // => "1 2" +//! # } +//! ``` +//! +//! From these, you can see that the first argument is a format string. It is +//! required by the compiler for this to be a string literal; it cannot be a +//! variable passed in (in order to perform validity checking). The compiler +//! will then parse the format string and determine if the list of arguments +//! provided is suitable to pass to this format string. +//! +//! ### Positional parameters +//! +//! Each formatting argument is allowed to specify which value argument it's +//! referencing, and if omitted it is assumed to be "the next argument". For +//! example, the format string `{} {} {}` would take three parameters, and they +//! would be formatted in the same order as they're given. The format string +//! `{2} {1} {0}`, however, would format arguments in reverse order. +//! +//! Things can get a little tricky once you start intermingling the two types of +//! positional specifiers. The "next argument" specifier can be thought of as an +//! iterator over the argument. Each time a "next argument" specifier is seen, +//! the iterator advances. This leads to behavior like this: +//! +//! ```rust +//! format!("{1} {} {0} {}", 1i, 2i); // => "2 1 1 2" +//! ``` +//! +//! The internal iterator over the argument has not been advanced by the time +//! the first `{}` is seen, so it prints the first argument. Then upon reaching +//! the second `{}`, the iterator has advanced forward to the second argument. +//! Essentially, parameters which explicitly name their argument do not affect +//! parameters which do not name an argument in terms of positional specifiers. +//! +//! A format string is required to use all of its arguments, otherwise it is a +//! compile-time error. You may refer to the same argument more than once in the +//! format string, although it must always be referred to with the same type. +//! +//! ### Named parameters +//! +//! Rust itself does not have a Python-like equivalent of named parameters to a +//! function, but the `format!` macro is a syntax extension which allows it to +//! leverage named parameters. Named parameters are listed at the end of the +//! argument list and have the syntax: +//! +//! ```text +//! identifier '=' expression +//! ``` +//! +//! For example, the following `format!` expressions all use named argument: +//! +//! ```rust +//! # extern crate debug; +//! # fn main() { +//! format!("{argument}", argument = "test"); // => "test" +//! format!("{name} {}", 1i, name = 2i); // => "2 1" +//! format!("{a:s} {c:d} {b:?}", a="a", b=(), c=3i); // => "a 3 ()" +//! # } +//! ``` +//! +//! It is illegal to put positional parameters (those without names) after +//! arguments which have names. Like with positional parameters, it is illegal +//! to provide named parameters that are unused by the format string. +//! +//! ### Argument types +//! +//! Each argument's type is dictated by the format string. It is a requirement +//! that every argument is only ever referred to by one type. For example, this +//! is an invalid format string: +//! +//! ```text +//! {0:d} {0:s} +//! ``` +//! +//! This is invalid because the first argument is both referred to as an integer +//! as well as a string. +//! +//! Because formatting is done via traits, there is no requirement that the +//! `d` format actually takes an `int`, but rather it simply requires a type +//! which ascribes to the `Signed` formatting trait. There are various +//! parameters which do require a particular type, however. Namely if the syntax +//! `{:.*s}` is used, then the number of characters to print from the string +//! precedes the actual string and must have the type `uint`. Although a `uint` +//! can be printed with `{:u}`, it is illegal to reference an argument as such. +//! For example, this is another invalid format string: +//! +//! ```text +//! {:.*s} {0:u} +//! ``` +//! +//! ### Formatting traits +//! +//! When requesting that an argument be formatted with a particular type, you +//! are actually requesting that an argument ascribes to a particular trait. +//! This allows multiple actual types to be formatted via `{:d}` (like `i8` as +//! well as `int`). The current mapping of types to traits is: +//! +//! * *nothing* ⇒ `Show` +//! * `d` ⇒ `Signed` +//! * `i` ⇒ `Signed` +//! * `u` ⇒ `Unsigned` +//! * `b` ⇒ `Bool` +//! * `c` ⇒ `Char` +//! * `o` ⇒ `Octal` +//! * `x` ⇒ `LowerHex` +//! * `X` ⇒ `UpperHex` +//! * `s` ⇒ `String` +//! * `p` ⇒ `Pointer` +//! * `t` ⇒ `Binary` +//! * `f` ⇒ `Float` +//! * `e` ⇒ `LowerExp` +//! * `E` ⇒ `UpperExp` +//! * `?` ⇒ `Poly` +//! +//! > **Note**: The `Poly` formatting trait is provided by [libdebug](../../debug/) +//! > and is an experimental implementation that should not be relied upon. In +//! > order to use the `?` modifier, the libdebug crate must be linked against. +//! +//! What this means is that any type of argument which implements the +//! `std::fmt::Binary` trait can then be formatted with `{:t}`. Implementations +//! are provided for these traits for a number of primitive types by the +//! standard library as well. If no format is specified (as in `{}` or `{:6}`), +//! then the format trait used is the `Show` trait. This is one of the more +//! commonly implemented traits when formatting a custom type. +//! +//! When implementing a format trait for your own type, you will have to +//! implement a method of the signature: +//! +//! ```rust +//! # use std; +//! # mod fmt { pub type Result = (); } +//! # struct T; +//! # trait SomeName { +//! fn fmt(&self, f: &mut std::fmt::Formatter) -> fmt::Result; +//! # } +//! ``` +//! +//! Your type will be passed as `self` by-reference, and then the function +//! should emit output into the `f.buf` stream. It is up to each format trait +//! implementation to correctly adhere to the requested formatting parameters. +//! The values of these parameters will be listed in the fields of the +//! `Formatter` struct. In order to help with this, the `Formatter` struct also +//! provides some helper methods. +//! +//! Additionally, the return value of this function is `fmt::Result` which is a +//! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting +//! implementations should ensure that they return errors from `write!` +//! correctly (propagating errors upward). +//! +//! An example of implementing the formatting traits would look +//! like: +//! +//! ```rust +//! use std::fmt; +//! use std::f64; +//! +//! struct Vector2D { +//! x: int, +//! y: int, +//! } +//! +//! impl fmt::Show for Vector2D { +//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +//! // The `f` value implements the `Writer` trait, which is what the +//! // write! macro is expecting. Note that this formatting ignores the +//! // various flags provided to format strings. +//! write!(f, "({}, {})", self.x, self.y) +//! } +//! } +//! +//! // Different traits allow different forms of output of a type. The meaning +//! // of this format is to print the magnitude of a vector. +//! impl fmt::Binary for Vector2D { +//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +//! let magnitude = (self.x * self.x + self.y * self.y) as f64; +//! let magnitude = magnitude.sqrt(); +//! +//! // Respect the formatting flags by using the helper method +//! // `pad_integral` on the Formatter object. See the method +//! // documentation for details, and the function `pad` can be used to +//! // pad strings. +//! let decimals = f.precision.unwrap_or(3); +//! let string = f64::to_str_exact(magnitude, decimals); +//! f.pad_integral(true, "", string.as_bytes()) +//! } +//! } +//! +//! fn main() { +//! let myvector = Vector2D { x: 3, y: 4 }; +//! +//! println!("{}", myvector); // => "(3, 4)" +//! println!("{:10.3t}", myvector); // => " 5.000" +//! } +//! ``` +//! +//! ### Related macros +//! +//! There are a number of related macros in the `format!` family. The ones that +//! are currently implemented are: +//! +//! ```ignore +//! format! // described above +//! write! // first argument is a &mut io::Writer, the destination +//! writeln! // same as write but appends a newline +//! print! // the format string is printed to the standard output +//! println! // same as print but appends a newline +//! format_args! // described below. +//! ``` +//! +//! +//! #### `write!` +//! +//! This and `writeln` are two macros which are used to emit the format string +//! to a specified stream. This is used to prevent intermediate allocations of +//! format strings and instead directly write the output. Under the hood, this +//! function is actually invoking the `write` function defined in this module. +//! Example usage is: +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io; +//! +//! let mut w = io::MemWriter::new(); +//! write!(&mut w as &mut io::Writer, "Hello {}!", "world"); +//! ``` +//! +//! #### `print!` +//! +//! This and `println` emit their output to stdout. Similarly to the `write!` +//! macro, the goal of these macros is to avoid intermediate allocations when +//! printing output. Example usage is: +//! +//! ```rust +//! print!("Hello {}!", "world"); +//! println!("I have a newline {}", "character at the end"); +//! ``` +//! +//! #### `format_args!` +//! This is a curious macro which is used to safely pass around +//! an opaque object describing the format string. This object +//! does not require any heap allocations to create, and it only +//! references information on the stack. Under the hood, all of +//! the related macros are implemented in terms of this. First +//! off, some example usage is: +//! +//! ``` +//! use std::fmt; +//! use std::io; +//! +//! # #[allow(unused_must_use)] +//! # fn main() { +//! format_args!(fmt::format, "this returns {}", "String"); +//! +//! let some_writer: &mut io::Writer = &mut io::stdout(); +//! format_args!(|args| { write!(some_writer, "{}", args) }, "print with a {}", "closure"); +//! +//! fn my_fmt_fn(args: &fmt::Arguments) { +//! write!(&mut io::stdout(), "{}", args); +//! } +//! format_args!(my_fmt_fn, "or a {} too", "function"); +//! # } +//! ``` +//! +//! The first argument of the `format_args!` macro is a function (or closure) +//! which takes one argument of type `&fmt::Arguments`. This structure can then +//! be passed to the `write` and `format` functions inside this module in order +//! to process the format string. The goal of this macro is to even further +//! prevent intermediate allocations when dealing formatting strings. +//! +//! For example, a logging library could use the standard formatting syntax, but +//! it would internally pass around this structure until it has been determined +//! where output should go to. +//! +//! It is unsafe to programmatically create an instance of `fmt::Arguments` +//! because the operations performed when executing a format string require the +//! compile-time checks provided by the compiler. The `format_args!` macro is +//! the only method of safely creating these structures, but they can be +//! unsafely created with the constructor provided. +//! +//! ## Syntax +//! +//! The syntax for the formatting language used is drawn from other languages, +//! so it should not be too alien. Arguments are formatted with python-like +//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like +//! `%`. The actual grammar for the formatting syntax is: +//! +//! ```text +//! format_string := [ format ] * +//! format := '{' [ argument ] [ ':' format_spec ] '}' +//! argument := integer | identifier +//! +//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type] +//! fill := character +//! align := '<' | '>' +//! sign := '+' | '-' +//! width := count +//! precision := count | '*' +//! type := identifier | '' +//! count := parameter | integer +//! parameter := integer '$' +//! ``` +//! +//! ## Formatting Parameters +//! +//! Each argument being formatted can be transformed by a number of formatting +//! parameters (corresponding to `format_spec` in the syntax above). These +//! parameters affect the string representation of what's being formatted. This +//! syntax draws heavily from Python's, so it may seem a bit familiar. +//! +//! ### Fill/Alignment +//! +//! The fill character is provided normally in conjunction with the `width` +//! parameter. This indicates that if the value being formatted is smaller than +//! `width` some extra characters will be printed around it. The extra +//! characters are specified by `fill`, and the alignment can be one of two +//! options: +//! +//! * `<` - the argument is left-aligned in `width` columns +//! * `>` - the argument is right-aligned in `width` columns +//! +//! ### Sign/#/0 +//! +//! These can all be interpreted as flags for a particular formatter. +//! +//! * '+' - This is intended for numeric types and indicates that the sign +//! should always be printed. Positive signs are never printed by +//! default, and the negative sign is only printed by default for the +//! `Signed` trait. This flag indicates that the correct sign (+ or -) +//! should always be printed. +//! * '-' - Currently not used +//! * '#' - This flag is indicates that the "alternate" form of printing should +//! be used. By default, this only applies to the integer formatting +//! traits and performs like: +//! * `x` - precedes the argument with a "0x" +//! * `X` - precedes the argument with a "0x" +//! * `t` - precedes the argument with a "0b" +//! * `o` - precedes the argument with a "0o" +//! * '0' - This is used to indicate for integer formats that the padding should +//! both be done with a `0` character as well as be sign-aware. A format +//! like `{:08d}` would yield `00000001` for the integer `1`, while the +//! same format would yield `-0000001` for the integer `-1`. Notice that +//! the negative version has one fewer zero than the positive version. +//! +//! ### Width +//! +//! This is a parameter for the "minimum width" that the format should take up. +//! If the value's string does not fill up this many characters, then the +//! padding specified by fill/alignment will be used to take up the required +//! space. +//! +//! The default fill/alignment for non-numerics is a space and left-aligned. The +//! defaults for numeric formatters is also a space but with right-alignment. If +//! the '0' flag is specified for numerics, then the implicit fill character +//! is '0'. +//! +//! The value for the width can also be provided as a `uint` in the list of +//! parameters by using the `2$` syntax indicating that the second argument is a +//! `uint` specifying the width. +//! +//! ### Precision +//! +//! For non-numeric types, this can be considered a "maximum width". If the +//! resulting string is longer than this width, then it is truncated down to +//! this many characters and only those are emitted. +//! +//! For integral types, this has no meaning currently. +//! +//! For floating-point types, this indicates how many digits after the decimal +//! point should be printed. +//! +//! ## Escaping +//! +//! The literal characters `{` and `}` may be included in a string by preceding +//! them with the same character. For example, the `{` character is escaped with +//! `{{` and the `}` character is escaped with `}}`. #![experimental] diff --git a/src/libstd/gc.rs b/src/libstd/gc.rs index 47b7426633c7b..14f497628c62d 100644 --- a/src/libstd/gc.rs +++ b/src/libstd/gc.rs @@ -8,13 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Task-local garbage-collected boxes - -The `Gc` type provides shared ownership of an immutable value. Destruction is not deterministic, and -will occur some time between every `Gc` handle being gone and the end of the task. The garbage -collector is task-local so `Gc` is not sendable. - -*/ +//! Task-local garbage-collected boxes +//! +//! The `Gc` type provides shared ownership of an immutable value. Destruction +//! is not deterministic, and will occur some time between every `Gc` handle +//! being gone and the end of the task. The garbage collector is task-local so +//! `Gc` is not sendable. #![experimental] #![allow(experimental)] diff --git a/src/libstd/hash.rs b/src/libstd/hash.rs index 2cc7e70747a79..069d33c1bb4ce 100644 --- a/src/libstd/hash.rs +++ b/src/libstd/hash.rs @@ -8,58 +8,56 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Generic hashing support. - * - * This module provides a generic way to compute the hash of a value. The - * simplest way to make a type hashable is to use `#[deriving(Hash)]`: - * - * # Example - * - * ```rust - * use std::hash; - * use std::hash::Hash; - * - * #[deriving(Hash)] - * struct Person { - * id: uint, - * name: String, - * phone: u64, - * } - * - * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; - * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; - * - * assert!(hash::hash(&person1) != hash::hash(&person2)); - * ``` - * - * If you need more control over how a value is hashed, you need to implement - * the trait `Hash`: - * - * ```rust - * use std::hash; - * use std::hash::Hash; - * use std::hash::sip::SipState; - * - * struct Person { - * id: uint, - * name: String, - * phone: u64, - * } - * - * impl Hash for Person { - * fn hash(&self, state: &mut SipState) { - * self.id.hash(state); - * self.phone.hash(state); - * } - * } - * - * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; - * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; - * - * assert!(hash::hash(&person1) == hash::hash(&person2)); - * ``` - */ +//! Generic hashing support. +//! +//! This module provides a generic way to compute the hash of a value. The +//! simplest way to make a type hashable is to use `#[deriving(Hash)]`: +//! +//! # Example +//! +//! ```rust +//! use std::hash; +//! use std::hash::Hash; +//! +//! #[deriving(Hash)] +//! struct Person { +//! id: uint, +//! name: String, +//! phone: u64, +//! } +//! +//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; +//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; +//! +//! assert!(hash::hash(&person1) != hash::hash(&person2)); +//! ``` +//! +//! If you need more control over how a value is hashed, you need to implement +//! the trait `Hash`: +//! +//! ```rust +//! use std::hash; +//! use std::hash::Hash; +//! use std::hash::sip::SipState; +//! +//! struct Person { +//! id: uint, +//! name: String, +//! phone: u64, +//! } +//! +//! impl Hash for Person { +//! fn hash(&self, state: &mut SipState) { +//! self.id.hash(state); +//! self.phone.hash(state); +//! } +//! } +//! +//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 }; +//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 }; +//! +//! assert!(hash::hash(&person1) == hash::hash(&person2)); +//! ``` pub use core_collections::hash::{Hash, Hasher, Writer, hash, sip}; diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs index e7f26c7bd910e..39dc4a672061a 100644 --- a/src/libstd/io/fs.rs +++ b/src/libstd/io/fs.rs @@ -8,46 +8,45 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Synchronous File I/O - -This module provides a set of functions and traits for working -with regular files & directories on a filesystem. - -At the top-level of the module are a set of freestanding functions, associated -with various filesystem operations. They all operate on `Path` objects. - -All operations in this module, including those as part of `File` et al -block the task during execution. In the event of failure, all functions/methods -will return an `IoResult` type with an `Err` value. - -Also included in this module is an implementation block on the `Path` object -defined in `std::path::Path`. The impl adds useful methods about inspecting the -metadata of a file. This includes getting the `stat` information, reading off -particular bits of it, etc. - -# Example - -```rust -# #![allow(unused_must_use)] -use std::io::{File, fs}; - -let path = Path::new("foo.txt"); - -// create the file, whether it exists or not -let mut file = File::create(&path); -file.write(b"foobar"); -# drop(file); - -// open the file in read-only mode -let mut file = File::open(&path); -file.read_to_end(); - -println!("{}", path.stat().unwrap().size); -# drop(file); -fs::unlink(&path); -``` - -*/ +//! Synchronous File I/O +//! +//! This module provides a set of functions and traits for working +//! with regular files & directories on a filesystem. +//! +//! At the top-level of the module are a set of freestanding functions, +//! associated with various filesystem operations. They all operate on `Path` +//! objects. +//! +//! All operations in this module, including those as part of `File` et al +//! block the task during execution. In the event of failure, all +//! functions/methods will return an `IoResult` type with an `Err` value. +//! +//! Also included in this module is an implementation block on the `Path` object +//! defined in `std::path::Path`. The impl adds useful methods about inspecting +//! the metadata of a file. This includes getting the `stat` information, +//! reading off particular bits of it, etc. +//! +//! # Example +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io::{File, fs}; +//! +//! let path = Path::new("foo.txt"); +//! +//! // create the file, whether it exists or not +//! let mut file = File::create(&path); +//! file.write(b"foobar"); +//! # drop(file); +//! +//! // open the file in read-only mode +//! let mut file = File::open(&path); +//! file.read_to_end(); +//! +//! println!("{}", path.stat().unwrap().size); +//! # drop(file); +//! fs::unlink(&path); +//! ``` use c_str::ToCStr; use clone::Clone; diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index 1d339b03af671..d6cc94e4258ee 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -13,208 +13,205 @@ // stdio, print!, println!, file access, process spawning, // error handling - -/*! I/O, including files, networking, timers, and processes - -`std::io` provides Rust's basic I/O types, -for reading and writing to files, TCP, UDP, -and other types of sockets and pipes, -manipulating the file system, spawning processes and signal handling. - -# Examples - -Some examples of obvious things you might want to do - -* Read lines from stdin - - ```rust - use std::io; - - for line in io::stdin().lines() { - print!("{}", line.unwrap()); - } - ``` - -* Read a complete file - - ```rust - use std::io::File; - - let contents = File::open(&Path::new("message.txt")).read_to_end(); - ``` - -* Write a line to a file - - ```rust - # #![allow(unused_must_use)] - use std::io::File; - - let mut file = File::create(&Path::new("message.txt")); - file.write(b"hello, file!\n"); - # drop(file); - # ::std::io::fs::unlink(&Path::new("message.txt")); - ``` - -* Iterate over the lines of a file - - ```rust,no_run - use std::io::BufferedReader; - use std::io::File; - - let path = Path::new("message.txt"); - let mut file = BufferedReader::new(File::open(&path)); - for line in file.lines() { - print!("{}", line.unwrap()); - } - ``` - -* Pull the lines of a file into a vector of strings - - ```rust,no_run - use std::io::BufferedReader; - use std::io::File; - - let path = Path::new("message.txt"); - let mut file = BufferedReader::new(File::open(&path)); - let lines: Vec = file.lines().map(|x| x.unwrap()).collect(); - ``` - -* Make a simple TCP client connection and request - - ```rust - # #![allow(unused_must_use)] - use std::io::TcpStream; - - # // connection doesn't fail if a server is running on 8080 - # // locally, we still want to be type checking this code, so lets - # // just stop it running (#11576) - # if false { - let mut socket = TcpStream::connect("127.0.0.1", 8080).unwrap(); - socket.write(b"GET / HTTP/1.0\n\n"); - let response = socket.read_to_end(); - # } - ``` - -* Make a simple TCP server - - ```rust - # fn main() { } - # fn foo() { - # #![allow(dead_code)] - use std::io::{TcpListener, TcpStream}; - use std::io::{Acceptor, Listener}; - - let listener = TcpListener::bind("127.0.0.1", 80); - - // bind the listener to the specified address - let mut acceptor = listener.listen(); - - fn handle_client(mut stream: TcpStream) { - // ... - # &mut stream; // silence unused mutability/variable warning - } - // accept connections and process them, spawning a new tasks for each one - for stream in acceptor.incoming() { - match stream { - Err(e) => { /* connection failed */ } - Ok(stream) => spawn(proc() { - // connection succeeded - handle_client(stream) - }) - } - } - - // close the socket server - drop(acceptor); - # } - ``` - - -# Error Handling - -I/O is an area where nearly every operation can result in unexpected -errors. Errors should be painfully visible when they happen, and handling them -should be easy to work with. It should be convenient to handle specific I/O -errors, and it should also be convenient to not deal with I/O errors. - -Rust's I/O employs a combination of techniques to reduce boilerplate -while still providing feedback about errors. The basic strategy: - -* All I/O operations return `IoResult` which is equivalent to - `Result`. The `Result` type is defined in the `std::result` - module. -* If the `Result` type goes unused, then the compiler will by default emit a - warning about the unused result. This is because `Result` has the - `#[must_use]` attribute. -* Common traits are implemented for `IoResult`, e.g. - `impl Reader for IoResult`, so that error values do not have - to be 'unwrapped' before use. - -These features combine in the API to allow for expressions like -`File::create(&Path::new("diary.txt")).write(b"Met a girl.\n")` -without having to worry about whether "diary.txt" exists or whether -the write succeeds. As written, if either `new` or `write_line` -encounters an error then the result of the entire expression will -be an error. - -If you wanted to handle the error though you might write: - -```rust -# #![allow(unused_must_use)] -use std::io::File; - -match File::create(&Path::new("diary.txt")).write(b"Met a girl.\n") { - Ok(()) => (), // succeeded - Err(e) => println!("failed to write to my diary: {}", e), -} - -# ::std::io::fs::unlink(&Path::new("diary.txt")); -``` - -So what actually happens if `create` encounters an error? -It's important to know that what `new` returns is not a `File` -but an `IoResult`. If the file does not open, then `new` will simply -return `Err(..)`. Because there is an implementation of `Writer` (the trait -required ultimately required for types to implement `write_line`) there is no -need to inspect or unwrap the `IoResult` and we simply call `write_line` -on it. If `new` returned an `Err(..)` then the followup call to `write_line` -will also return an error. - -## `try!` - -Explicit pattern matching on `IoResult`s can get quite verbose, especially -when performing many I/O operations. Some examples (like those above) are -alleviated with extra methods implemented on `IoResult`, but others have more -complex interdependencies among each I/O operation. - -The `try!` macro from `std::macros` is provided as a method of early-return -inside `Result`-returning functions. It expands to an early-return on `Err` -and otherwise unwraps the contained `Ok` value. - -If you wanted to read several `u32`s from a file and return their product: - -```rust -use std::io::{File, IoResult}; - -fn file_product(p: &Path) -> IoResult { - let mut f = File::open(p); - let x1 = try!(f.read_le_u32()); - let x2 = try!(f.read_le_u32()); - - Ok(x1 * x2) -} - -match file_product(&Path::new("numbers.bin")) { - Ok(x) => println!("{}", x), - Err(e) => println!("Failed to read numbers!") -} -``` - -With `try!` in `file_product`, each `read_le_u32` need not be directly -concerned with error handling; instead its caller is responsible for -responding to errors that may occur while attempting to read the numbers. - -*/ +//! I/O, including files, networking, timers, and processes +//! +//! `std::io` provides Rust's basic I/O types, +//! for reading and writing to files, TCP, UDP, +//! and other types of sockets and pipes, +//! manipulating the file system, spawning processes and signal handling. +//! +//! # Examples +//! +//! Some examples of obvious things you might want to do +//! +//! * Read lines from stdin +//! +//! ```rust +//! use std::io; +//! +//! for line in io::stdin().lines() { +//! print!("{}", line.unwrap()); +//! } +//! ``` +//! +//! * Read a complete file +//! +//! ```rust +//! use std::io::File; +//! +//! let contents = File::open(&Path::new("message.txt")).read_to_end(); +//! ``` +//! +//! * Write a line to a file +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io::File; +//! +//! let mut file = File::create(&Path::new("message.txt")); +//! file.write(b"hello, file!\n"); +//! # drop(file); +//! # ::std::io::fs::unlink(&Path::new("message.txt")); +//! ``` +//! +//! * Iterate over the lines of a file +//! +//! ```rust,no_run +//! use std::io::BufferedReader; +//! use std::io::File; +//! +//! let path = Path::new("message.txt"); +//! let mut file = BufferedReader::new(File::open(&path)); +//! for line in file.lines() { +//! print!("{}", line.unwrap()); +//! } +//! ``` +//! +//! * Pull the lines of a file into a vector of strings +//! +//! ```rust,no_run +//! use std::io::BufferedReader; +//! use std::io::File; +//! +//! let path = Path::new("message.txt"); +//! let mut file = BufferedReader::new(File::open(&path)); +//! let lines: Vec = file.lines().map(|x| x.unwrap()).collect(); +//! ``` +//! +//! * Make a simple TCP client connection and request +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io::TcpStream; +//! +//! # // connection doesn't fail if a server is running on 8080 +//! # // locally, we still want to be type checking this code, so lets +//! # // just stop it running (#11576) +//! # if false { +//! let mut socket = TcpStream::connect("127.0.0.1", 8080).unwrap(); +//! socket.write(b"GET / HTTP/1.0\n\n"); +//! let response = socket.read_to_end(); +//! # } +//! ``` +//! +//! * Make a simple TCP server +//! +//! ```rust +//! # fn main() { } +//! # fn foo() { +//! # #![allow(dead_code)] +//! use std::io::{TcpListener, TcpStream}; +//! use std::io::{Acceptor, Listener}; +//! +//! let listener = TcpListener::bind("127.0.0.1", 80); +//! +//! // bind the listener to the specified address +//! let mut acceptor = listener.listen(); +//! +//! fn handle_client(mut stream: TcpStream) { +//! // ... +//! # &mut stream; // silence unused mutability/variable warning +//! } +//! // accept connections and process them, spawning a new tasks for each one +//! for stream in acceptor.incoming() { +//! match stream { +//! Err(e) => { /* connection failed */ } +//! Ok(stream) => spawn(proc() { +//! // connection succeeded +//! handle_client(stream) +//! }) +//! } +//! } +//! +//! // close the socket server +//! drop(acceptor); +//! # } +//! ``` +//! +//! +//! # Error Handling +//! +//! I/O is an area where nearly every operation can result in unexpected +//! errors. Errors should be painfully visible when they happen, and handling +//! them should be easy to work with. It should be convenient to handle specific +//! I/O errors, and it should also be convenient to not deal with I/O errors. +//! +//! Rust's I/O employs a combination of techniques to reduce boilerplate +//! while still providing feedback about errors. The basic strategy: +//! +//! * All I/O operations return `IoResult` which is equivalent to +//! `Result`. The `Result` type is defined in the `std::result` +//! module. +//! * If the `Result` type goes unused, then the compiler will by default emit a +//! warning about the unused result. This is because `Result` has the +//! `#[must_use]` attribute. +//! * Common traits are implemented for `IoResult`, e.g. +//! `impl Reader for IoResult`, so that error values do not have +//! to be 'unwrapped' before use. +//! +//! These features combine in the API to allow for expressions like +//! `File::create(&Path::new("diary.txt")).write(b"Met a girl.\n")` +//! without having to worry about whether "diary.txt" exists or whether +//! the write succeeds. As written, if either `new` or `write_line` +//! encounters an error then the result of the entire expression will +//! be an error. +//! +//! If you wanted to handle the error though you might write: +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io::File; +//! +//! match File::create(&Path::new("diary.txt")).write(b"Met a girl.\n") { +//! Ok(()) => (), // succeeded +//! Err(e) => println!("failed to write to my diary: {}", e), +//! } +//! +//! # ::std::io::fs::unlink(&Path::new("diary.txt")); +//! ``` +//! +//! So what actually happens if `create` encounters an error? +//! It's important to know that what `new` returns is not a `File` +//! but an `IoResult`. If the file does not open, then `new` will simply +//! return `Err(..)`. Because there is an implementation of `Writer` (the trait +//! required ultimately required for types to implement `write_line`) there is +//! no need to inspect or unwrap the `IoResult` and we simply call +//! `write_line` on it. If `new` returned an `Err(..)` then the followup call to +//! `write_line` will also return an error. +//! +//! ## `try!` +//! +//! Explicit pattern matching on `IoResult`s can get quite verbose, especially +//! when performing many I/O operations. Some examples (like those above) are +//! alleviated with extra methods implemented on `IoResult`, but others have +//! more complex interdependencies among each I/O operation. +//! +//! The `try!` macro from `std::macros` is provided as a method of early-return +//! inside `Result`-returning functions. It expands to an early-return on `Err` +//! and otherwise unwraps the contained `Ok` value. +//! +//! If you wanted to read several `u32`s from a file and return their product: +//! +//! ```rust +//! use std::io::{File, IoResult}; +//! +//! fn file_product(p: &Path) -> IoResult { +//! let mut f = File::open(p); +//! let x1 = try!(f.read_le_u32()); +//! let x2 = try!(f.read_le_u32()); +//! +//! Ok(x1 * x2) +//! } +//! +//! match file_product(&Path::new("numbers.bin")) { +//! Ok(x) => println!("{}", x), +//! Err(e) => println!("Failed to read numbers!") +//! } +//! ``` +//! +//! With `try!` in `file_product`, each `read_le_u32` need not be directly +//! concerned with error handling; instead its caller is responsible for +//! responding to errors that may occur while attempting to read the numbers. #![experimental] #![deny(unused_must_use)] diff --git a/src/libstd/io/net/addrinfo.rs b/src/libstd/io/net/addrinfo.rs index 8d5fd2b99fd7b..4c137825b04ba 100644 --- a/src/libstd/io/net/addrinfo.rs +++ b/src/libstd/io/net/addrinfo.rs @@ -8,14 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Synchronous DNS Resolution - -Contains the functionality to perform DNS resolution in a style related to -getaddrinfo() - -*/ +//! Synchronous DNS Resolution +//! +//! Contains the functionality to perform DNS resolution in a style related to +//! getaddrinfo() #![allow(missing_doc)] diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index c5ddda9945de1..dd556d0adb4f1 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -8,19 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Named pipes - -This module contains the ability to communicate over named pipes with -synchronous I/O. On windows, this corresponds to talking over a Named Pipe, -while on Unix it corresponds to UNIX domain sockets. - -These pipes are similar to TCP in the sense that you can have both a stream to a -server and a server itself. The server provided accepts other `UnixStream` -instances as clients. - -*/ +//! Named pipes +//! +//! This module contains the ability to communicate over named pipes with +//! synchronous I/O. On windows, this corresponds to talking over a Named Pipe, +//! while on Unix it corresponds to UNIX domain sockets. +//! +//! These pipes are similar to TCP in the sense that you can have both a stream +//! to a server and a server itself. The server provided accepts other +//! `UnixStream` instances as clients. #![allow(missing_doc)] diff --git a/src/libstd/io/signal.rs b/src/libstd/io/signal.rs index 4a7655a63ce8c..ca1e2884baeea 100644 --- a/src/libstd/io/signal.rs +++ b/src/libstd/io/signal.rs @@ -8,16 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Signal handling - -This modules provides bindings to receive signals safely, built on top of the -local I/O factory. There are a number of defined signals which can be caught, -but not all signals will work across all platforms (windows doesn't have -definitions for a number of signals. - -*/ +//! Signal handling +//! +//! This modules provides bindings to receive signals safely, built on top of +//! the local I/O factory. There are a number of defined signals which can be +//! caught, but not all signals will work across all platforms (windows doesn't +//! have definitions for a number of signals. use clone::Clone; use comm::{Sender, Receiver, channel}; diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index e5a64f785ce96..33913cbba0e1b 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -8,24 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Non-blocking access to stdin, stdout, and stderr. - -This module provides bindings to the local event loop's TTY interface, using it -to offer synchronous but non-blocking versions of stdio. These handles can be -inspected for information about terminal dimensions or for related information -about the stream or terminal to which it is attached. - -# Example - -```rust -# #![allow(unused_must_use)] -use std::io; - -let mut out = io::stdout(); -out.write(b"Hello, world!"); -``` - -*/ +//! Non-blocking access to stdin, stdout, and stderr. +//! +//! This module provides bindings to the local event loop's TTY interface, using +//! it to offer synchronous but non-blocking versions of stdio. These handles +//! can be inspected for information about terminal dimensions or for related +//! information about the stream or terminal to which it is attached. +//! +//! # Example +//! +//! ```rust +//! # #![allow(unused_must_use)] +//! use std::io; +//! +//! let mut out = io::stdout(); +//! out.write(b"Hello, world!"); +//! ``` use failure::local_stderr; use fmt; diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs index 26e854d9d999f..1a23808736a80 100644 --- a/src/libstd/io/test.rs +++ b/src/libstd/io/test.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Various utility functions useful for writing I/O tests */ +//! Various utility functions useful for writing I/O tests #![macro_escape] @@ -88,13 +88,11 @@ pub fn next_test_ip6() -> SocketAddr { SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() } } -/* -XXX: Welcome to MegaHack City. - -The bots run multiple builds at the same time, and these builds -all want to use ports. This function figures out which workspace -it is running in and assigns a port range based on it. -*/ +// FIXME: Welcome to MegaHack City. +// +// The bots run multiple builds at the same time, and these builds +// all want to use ports. This function figures out which workspace +// it is running in and assigns a port range based on it. fn base_port() -> u16 { let base = 9600u16; @@ -132,16 +130,15 @@ pub fn raise_fd_limit() { unsafe { darwin_fd_limit::raise_fd_limit() } } +/// darwin_fd_limit exists to work around an issue where launchctl on Mac OS X +/// defaults the rlimit maxfiles to 256/unlimited. The default soft limit of 256 +/// ends up being far too low for our multithreaded scheduler testing, depending +/// on the number of cores available. +/// +/// This fixes issue #7772. #[cfg(target_os="macos")] #[allow(non_camel_case_types)] mod darwin_fd_limit { - /*! - * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the - * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low - * for our multithreaded scheduler testing, depending on the number of cores available. - * - * This fixes issue #7772. - */ use libc; type rlim_t = libc::uint64_t; diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs index 432461c460634..b2c412144bd90 100644 --- a/src/libstd/io/timer.rs +++ b/src/libstd/io/timer.rs @@ -8,14 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Synchronous Timers - -This module exposes the functionality to create timers, block the current task, -and create receivers which will receive notifications after a period of time. - -*/ +//! Synchronous Timers +//! +//! This module exposes the functionality to create timers, block the current +//! task, and create receivers which will receive notifications after a period +//! of time. use comm::{Receiver, Sender, channel}; use io::{IoResult, IoError}; diff --git a/src/libstd/io/util.rs b/src/libstd/io/util.rs index 83a01feee9017..f62f2f55295f9 100644 --- a/src/libstd/io/util.rs +++ b/src/libstd/io/util.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Utility implementations of Reader and Writer */ +//! Utility implementations of Reader and Writer use prelude::*; use cmp; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 928a1088d0efa..a697401a6030b 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -192,19 +192,19 @@ fn start(argc: int, argv: *const *const u8) -> int { green::start(argc, argv, rustuv::event_loop, __test::main) } -/* Exported macros */ +// Exported macros pub mod macros; pub mod bitflags; mod rtdeps; -/* The Prelude. */ +// The Prelude. pub mod prelude; -/* Primitive types */ +// Primitive types #[path = "num/float_macros.rs"] mod float_macros; #[path = "num/int_macros.rs"] mod int_macros; @@ -232,23 +232,23 @@ pub mod ascii; #[cfg(not(test))] pub mod gc; -/* Common traits */ +// Common traits pub mod from_str; pub mod num; pub mod to_str; -/* Common data structures */ +// Common data structures pub mod collections; pub mod hash; -/* Tasks and communication */ +// Tasks and communication pub mod task; pub mod sync; -/* Runtime and platform support */ +// Runtime and platform support pub mod c_vec; pub mod dynamic_lib; diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index 8b79af8c9310d..ef2d477412296 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -411,7 +411,7 @@ pub mod builtin { /// ``` #[macro_export] macro_rules! format_args( ($closure:expr, $fmt:expr $($args:tt)*) => ({ - /* compiler built-in */ + // compiler built-in }) ) /// Inspect an environment variable at compile time. diff --git a/src/libstd/num/strconv.rs b/src/libstd/num/strconv.rs index 5028987f44fdd..64e3ae600dce9 100644 --- a/src/libstd/num/strconv.rs +++ b/src/libstd/num/strconv.rs @@ -143,31 +143,29 @@ static NEG_INF_BUF: [u8, ..4] = ['-' as u8, 'i' as u8, 'n' as u8, 'f' as u8]; static NAN_BUF: [u8, ..3] = ['N' as u8, 'a' as u8, 'N' as u8]; -/** - * Converts an integral number to its string representation as a byte vector. - * This is meant to be a common base implementation for all integral string - * conversion functions like `to_str()` or `to_str_radix()`. - * - * # Arguments - * - `num` - The number to convert. Accepts any number that - * implements the numeric traits. - * - `radix` - Base to use. Accepts only the values 2-36. - * - `sign` - How to emit the sign. Options are: - * - `SignNone`: No sign at all. Basically emits `abs(num)`. - * - `SignNeg`: Only `-` on negative values. - * - `SignAll`: Both `+` on positive, and `-` on negative numbers. - * - `f` - a callback which will be invoked for each ascii character - * which composes the string representation of this integer - * - * # Return value - * A tuple containing the byte vector, and a boolean flag indicating - * whether it represents a special value like `inf`, `-inf`, `NaN` or not. - * It returns a tuple because there can be ambiguity between a special value - * and a number representation at higher bases. - * - * # Failure - * - Fails if `radix` < 2 or `radix` > 36. - */ +/// Converts an integral number to its string representation as a byte vector. +/// This is meant to be a common base implementation for all integral string +/// conversion functions like `to_str()` or `to_str_radix()`. +/// +/// # Arguments +/// - `num` - The number to convert. Accepts any number that +/// implements the numeric traits. +/// - `radix` - Base to use. Accepts only the values 2-36. +/// - `sign` - How to emit the sign. Options are: +/// - `SignNone`: No sign at all. Basically emits `abs(num)`. +/// - `SignNeg`: Only `-` on negative values. +/// - `SignAll`: Both `+` on positive, and `-` on negative numbers. +/// - `f` - a callback which will be invoked for each ascii character +/// which composes the string representation of this integer +/// +/// # Return value +/// A tuple containing the byte vector, and a boolean flag indicating +/// whether it represents a special value like `inf`, `-inf`, `NaN` or not. +/// It returns a tuple because there can be ambiguity between a special value +/// and a number representation at higher bases. +/// +/// # Failure +/// - Fails if `radix` < 2 or `radix` > 36. #[deprecated = "format!() and friends should be favored instead"] pub fn int_to_str_bytes_common(num: T, radix: uint, sign: SignFormat, f: |u8|) { assert!(2 <= radix && radix <= 36); @@ -223,40 +221,38 @@ pub fn int_to_str_bytes_common(num: T, radix: uint, sign: SignFormat, f: } } -/** - * Converts a number to its string representation as a byte vector. - * This is meant to be a common base implementation for all numeric string - * conversion functions like `to_str()` or `to_str_radix()`. - * - * # Arguments - * - `num` - The number to convert. Accepts any number that - * implements the numeric traits. - * - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation - * is used, then this base is only used for the significand. The exponent - * itself always printed using a base of 10. - * - `negative_zero` - Whether to treat the special value `-0` as - * `-0` or as `+0`. - * - `sign` - How to emit the sign. See `SignFormat`. - * - `digits` - The amount of digits to use for emitting the fractional - * part, if any. See `SignificantDigits`. - * - `exp_format` - Whether or not to use the exponential (scientific) notation. - * See `ExponentFormat`. - * - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if - * exponential notation is desired. - * - * # Return value - * A tuple containing the byte vector, and a boolean flag indicating - * whether it represents a special value like `inf`, `-inf`, `NaN` or not. - * It returns a tuple because there can be ambiguity between a special value - * and a number representation at higher bases. - * - * # Failure - * - Fails if `radix` < 2 or `radix` > 36. - * - Fails if `radix` > 14 and `exp_format` is `ExpDec` due to conflict - * between digit and exponent sign `'e'`. - * - Fails if `radix` > 25 and `exp_format` is `ExpBin` due to conflict - * between digit and exponent sign `'p'`. - */ +/// Converts a number to its string representation as a byte vector. +/// This is meant to be a common base implementation for all numeric string +/// conversion functions like `to_str()` or `to_str_radix()`. +/// +/// # Arguments +/// - `num` - The number to convert. Accepts any number that +/// implements the numeric traits. +/// - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation +/// is used, then this base is only used for the significand. The exponent +/// itself always printed using a base of 10. +/// - `negative_zero` - Whether to treat the special value `-0` as +/// `-0` or as `+0`. +/// - `sign` - How to emit the sign. See `SignFormat`. +/// - `digits` - The amount of digits to use for emitting the fractional +/// part, if any. See `SignificantDigits`. +/// - `exp_format` - Whether or not to use the exponential (scientific) notation. +/// See `ExponentFormat`. +/// - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if +/// exponential notation is desired. +/// +/// # Return value +/// A tuple containing the byte vector, and a boolean flag indicating +/// whether it represents a special value like `inf`, `-inf`, `NaN` or not. +/// It returns a tuple because there can be ambiguity between a special value +/// and a number representation at higher bases. +/// +/// # Failure +/// - Fails if `radix` < 2 or `radix` > 36. +/// - Fails if `radix` > 14 and `exp_format` is `ExpDec` due to conflict +/// between digit and exponent sign `'e'`. +/// - Fails if `radix` > 25 and `exp_format` is `ExpBin` due to conflict +/// between digit and exponent sign `'p'`. #[allow(deprecated)] pub fn float_to_str_bytes_common+Neg+Rem+Mul>( @@ -486,10 +482,8 @@ pub fn float_to_str_bytes_common+Neg+Rem+Mul>( @@ -507,45 +501,43 @@ static DIGIT_P_RADIX: uint = ('p' as uint) - ('a' as uint) + 11u; static DIGIT_I_RADIX: uint = ('i' as uint) - ('a' as uint) + 11u; static DIGIT_E_RADIX: uint = ('e' as uint) - ('a' as uint) + 11u; -/** - * Parses a byte slice as a number. This is meant to - * be a common base implementation for all numeric string conversion - * functions like `from_str()` or `from_str_radix()`. - * - * # Arguments - * - `buf` - The byte slice to parse. - * - `radix` - Which base to parse the number as. Accepts 2-36. - * - `negative` - Whether to accept negative numbers. - * - `fractional` - Whether to accept numbers with fractional parts. - * - `special` - Whether to accept special values like `inf` - * and `NaN`. Can conflict with `radix`, see Failure. - * - `exponent` - Which exponent format to accept. Options are: - * - `ExpNone`: No Exponent, accepts just plain numbers like `42` or - * `-8.2`. - * - `ExpDec`: Accepts numbers with a decimal exponent like `42e5` or - * `8.2E-2`. The exponent string itself is always base 10. - * Can conflict with `radix`, see Failure. - * - `ExpBin`: Accepts numbers with a binary exponent like `42P-8` or - * `FFp128`. The exponent string itself is always base 10. - * Can conflict with `radix`, see Failure. - * - `empty_zero` - Whether to accept an empty `buf` as a 0 or not. - * - `ignore_underscores` - Whether all underscores within the string should - * be ignored. - * - * # Return value - * Returns `Some(n)` if `buf` parses to a number n without overflowing, and - * `None` otherwise, depending on the constraints set by the remaining - * arguments. - * - * # Failure - * - Fails if `radix` < 2 or `radix` > 36. - * - Fails if `radix` > 14 and `exponent` is `ExpDec` due to conflict - * between digit and exponent sign `'e'`. - * - Fails if `radix` > 25 and `exponent` is `ExpBin` due to conflict - * between digit and exponent sign `'p'`. - * - Fails if `radix` > 18 and `special == true` due to conflict - * between digit and lowest first character in `inf` and `NaN`, the `'i'`. - */ +/// Parses a byte slice as a number. This is meant to +/// be a common base implementation for all numeric string conversion +/// functions like `from_str()` or `from_str_radix()`. +/// +/// # Arguments +/// - `buf` - The byte slice to parse. +/// - `radix` - Which base to parse the number as. Accepts 2-36. +/// - `negative` - Whether to accept negative numbers. +/// - `fractional` - Whether to accept numbers with fractional parts. +/// - `special` - Whether to accept special values like `inf` +/// and `NaN`. Can conflict with `radix`, see Failure. +/// - `exponent` - Which exponent format to accept. Options are: +/// - `ExpNone`: No Exponent, accepts just plain numbers like `42` or +/// `-8.2`. +/// - `ExpDec`: Accepts numbers with a decimal exponent like `42e5` or +/// `8.2E-2`. The exponent string itself is always base 10. +/// Can conflict with `radix`, see Failure. +/// - `ExpBin`: Accepts numbers with a binary exponent like `42P-8` or +/// `FFp128`. The exponent string itself is always base 10. +/// Can conflict with `radix`, see Failure. +/// - `empty_zero` - Whether to accept an empty `buf` as a 0 or not. +/// - `ignore_underscores` - Whether all underscores within the string should +/// be ignored. +/// +/// # Return value +/// Returns `Some(n)` if `buf` parses to a number n without overflowing, and +/// `None` otherwise, depending on the constraints set by the remaining +/// arguments. +/// +/// # Failure +/// - Fails if `radix` < 2 or `radix` > 36. +/// - Fails if `radix` > 14 and `exponent` is `ExpDec` due to conflict +/// between digit and exponent sign `'e'`. +/// - Fails if `radix` > 25 and `exponent` is `ExpBin` due to conflict +/// between digit and exponent sign `'p'`. +/// - Fails if `radix` > 18 and `special == true` due to conflict +/// between digit and lowest first character in `inf` and `NaN`, the `'i'`. pub fn from_str_bytes_common+ Mul+Sub+Neg+Add+ NumStrConv+Clone>( @@ -748,10 +740,8 @@ pub fn from_str_bytes_common+ Some(accum * multiplier) } -/** - * Parses a string as a number. This is a wrapper for - * `from_str_bytes_common()`, for details see there. - */ +/// Parses a string as a number. This is a wrapper for +/// `from_str_bytes_common()`, for details see there. #[inline] pub fn from_str_common+Mul+ Sub+Neg+Add+NumStrConv+Clone>( diff --git a/src/libstd/os.rs b/src/libstd/os.rs index b3f25914c8f56..958479afcc3c1 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -8,23 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * Higher-level interfaces to libc::* functions and operating system services. - * - * In general these take and return rust types, use rust idioms (enums, - * closures, vectors) rather than C idioms, and do more extensive safety - * checks. - * - * This module is not meant to only contain 1:1 mappings to libc entries; any - * os-interface code that is reasonably useful and broadly applicable can go - * here. Including utility routines that merely build on other os code. - * - * We assume the general case is that users do not care, and do not want to - * be made to care, which operating system they are on. While they may want - * to special case various special cases -- and so we will not _hide_ the - * facts of which OS the user is on -- they should be given the opportunity - * to write OS-ignorant code by default. - */ +//! Higher-level interfaces to libc::* functions and operating system services. +//! +//! In general these take and return rust types, use rust idioms (enums, +//! closures, vectors) rather than C idioms, and do more extensive safety +//! checks. +//! +//! This module is not meant to only contain 1:1 mappings to libc entries; any +//! os-interface code that is reasonably useful and broadly applicable can go +//! here. Including utility routines that merely build on other os code. +//! +//! We assume the general case is that users do not care, and do not want to +//! be made to care, which operating system they are on. While they may want +//! to special case various special cases -- and so we will not _hide_ the +//! facts of which OS the user is on -- they should be given the opportunity +//! to write OS-ignorant code by default. #![experimental] @@ -190,10 +188,8 @@ pub mod win32 { } } -/* -Accessing environment variables is not generally threadsafe. -Serialize access through a global lock. -*/ +// Accessing environment variables is not generally threadsafe. +// Serialize access through a global lock. fn with_env_lock(f: || -> T) -> T { use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; @@ -790,18 +786,16 @@ pub fn homedir() -> Option { _homedir() } -/** - * Returns the path to a temporary directory. - * - * On Unix, returns the value of the 'TMPDIR' environment variable if it is - * set, otherwise for non-Android it returns '/tmp'. If Android, since there - * is no global temporary folder (it is usually allocated per-app), we return - * '/data/local/tmp'. - * - * On Windows, returns the value of, in order, the 'TMP', 'TEMP', - * 'USERPROFILE' environment variable if any are set and not the empty - * string. Otherwise, tmpdir returns the path to the Windows directory. - */ +/// Returns the path to a temporary directory. +/// +/// On Unix, returns the value of the 'TMPDIR' environment variable if it is +/// set, otherwise for non-Android it returns '/tmp'. If Android, since there +/// is no global temporary folder (it is usually allocated per-app), we return +/// '/data/local/tmp'. +/// +/// On Windows, returns the value of, in order, the 'TMP', 'TEMP', +/// 'USERPROFILE' environment variable if any are set and not the empty +/// string. Otherwise, tmpdir returns the path to the Windows directory. pub fn tmpdir() -> Path { return lookup(); @@ -837,13 +831,11 @@ pub fn tmpdir() -> Path { } } -/** - * Convert a relative path to an absolute path - * - * If the given path is relative, return it prepended with the current working - * directory. If the given path is already an absolute path, return it - * as is. - */ +/// Convert a relative path to an absolute path +/// +/// If the given path is relative, return it prepended with the current working +/// directory. If the given path is already an absolute path, return it +/// as is. // NB: this is here rather than in path because it is a form of environment // querying; what it does depends on the process working directory, not just // the input paths. @@ -1038,16 +1030,14 @@ pub fn last_os_error() -> String { static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT; -/** - * Sets the process exit code - * - * Sets the exit code returned by the process if all supervised tasks - * terminate successfully (without failing). If the current root task fails - * and is supervised by the scheduler then any user-specified exit status is - * ignored and the process exits with the default failure status. - * - * Note that this is not synchronized against modifications of other threads. - */ +/// Sets the process exit code +/// +/// Sets the exit code returned by the process if all supervised tasks +/// terminate successfully (without failing). If the current root task fails +/// and is supervised by the scheduler then any user-specified exit status is +/// ignored and the process exits with the default failure status. +/// +/// Note that this is not synchronized against modifications of other threads. pub fn set_exit_status(code: int) { unsafe { EXIT_STATUS.store(code, SeqCst) } } @@ -1069,11 +1059,9 @@ unsafe fn load_argc_and_argv(argc: int, }) } -/** - * Returns the command line arguments - * - * Returns a list of the command line arguments. - */ +/// Returns the command line arguments +/// +/// Returns a list of the command line arguments. #[cfg(target_os = "macos")] fn real_args_as_bytes() -> Vec> { unsafe { diff --git a/src/libstd/path/mod.rs b/src/libstd/path/mod.rs index 7d814df8ebf95..b8198f367ef56 100644 --- a/src/libstd/path/mod.rs +++ b/src/libstd/path/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,60 +8,58 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Cross-platform path support - -This module implements support for two flavors of paths. `PosixPath` represents -a path on any unix-like system, whereas `WindowsPath` represents a path on -Windows. This module also exposes a typedef `Path` which is equal to the -appropriate platform-specific path variant. - -Both `PosixPath` and `WindowsPath` implement a trait `GenericPath`, which -contains the set of methods that behave the same for both paths. They each also -implement some methods that could not be expressed in `GenericPath`, yet behave -identically for both path flavors, such as `.components()`. - -The three main design goals of this module are 1) to avoid unnecessary -allocation, 2) to behave the same regardless of which flavor of path is being -used, and 3) to support paths that cannot be represented in UTF-8 (as Linux has -no restriction on paths beyond disallowing NUL). - -## Usage - -Usage of this module is fairly straightforward. Unless writing platform-specific -code, `Path` should be used to refer to the platform-native path. - -Creation of a path is typically done with either `Path::new(some_str)` or -`Path::new(some_vec)`. This path can be modified with `.push()` and -`.pop()` (and other setters). The resulting Path can either be passed to another -API that expects a path, or can be turned into a `&[u8]` with `.as_vec()` or a -`Option<&str>` with `.as_str()`. Similarly, attributes of the path can be queried -with methods such as `.filename()`. There are also methods that return a new -path instead of modifying the receiver, such as `.join()` or `.dir_path()`. - -Paths are always kept in normalized form. This means that creating the path -`Path::new("a/b/../c")` will return the path `a/c`. Similarly any attempt -to mutate the path will always leave it in normalized form. - -When rendering a path to some form of output, there is a method `.display()` -which is compatible with the `format!()` parameter `{}`. This will render the -path as a string, replacing all non-utf8 sequences with the Replacement -Character (U+FFFD). As such it is not suitable for passing to any API that -actually operates on the path; it is only intended for display. - -## Example - -```rust -let mut path = Path::new("/tmp/path"); -println!("path: {}", path.display()); -path.set_filename("foo"); -path.push("bar"); -println!("new path: {}", path.display()); -println!("path exists: {}", path.exists()); -``` - -*/ +//! Cross-platform path support +//! +//! This module implements support for two flavors of paths. `PosixPath` +//! represents a path on any unix-like system, whereas `WindowsPath` represents +//! a path on Windows. This module also exposes a typedef `Path` which is equal +//! to the appropriate platform-specific path variant. +//! +//! Both `PosixPath` and `WindowsPath` implement a trait `GenericPath`, which +//! contains the set of methods that behave the same for both paths. They each +//! also implement some methods that could not be expressed in `GenericPath`, +//! yet behave identically for both path flavors, such as `.components()`. +//! +//! The three main design goals of this module are 1) to avoid unnecessary +//! allocation, 2) to behave the same regardless of which flavor of path is +//! being used, and 3) to support paths that cannot be represented in UTF-8 (as +//! Linux has no restriction on paths beyond disallowing NUL). +//! +//! ## Usage +//! +//! Usage of this module is fairly straightforward. Unless writing +//! platform-specific code, `Path` should be used to refer to the +//! platform-native path. +//! +//! Creation of a path is typically done with either `Path::new(some_str)` or +//! `Path::new(some_vec)`. This path can be modified with `.push()` and +//! `.pop()` (and other setters). The resulting Path can either be passed to +//! another API that expects a path, or can be turned into a `&[u8]` with +//! `.as_vec()` or a `Option<&str>` with `.as_str()`. Similarly, attributes of +//! the path can be queried with methods such as `.filename()`. There are also +//! methods that return a new path instead of modifying the receiver, such as +//! `.join()` or `.dir_path()`. +//! +//! Paths are always kept in normalized form. This means that creating the path +//! `Path::new("a/b/../c")` will return the path `a/c`. Similarly any attempt +//! to mutate the path will always leave it in normalized form. +//! +//! When rendering a path to some form of output, there is a method `.display()` +//! which is compatible with the `format!()` parameter `{}`. This will render +//! the path as a string, replacing all non-utf8 sequences with the Replacement +//! Character (U+FFFD). As such it is not suitable for passing to any API that +//! actually operates on the path; it is only intended for display. +//! +//! ## Example +//! +//! ```rust +//! let mut path = Path::new("/tmp/path"); +//! println!("path: {}", path.display()); +//! path.set_filename("foo"); +//! path.push("bar"); +//! println!("new path: {}", path.display()); +//! println!("path exists: {}", path.exists()); +//! ``` #![experimental] diff --git a/src/libstd/rand/mod.rs b/src/libstd/rand/mod.rs index 0ffaadef0a130..b7436d69d8977 100644 --- a/src/libstd/rand/mod.rs +++ b/src/libstd/rand/mod.rs @@ -8,70 +8,71 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Utilities for random number generation - -The key functions are `random()` and `Rng::gen()`. These are polymorphic -and so can be used to generate any type that implements `Rand`. Type inference -means that often a simple call to `rand::random()` or `rng.gen()` will -suffice, but sometimes an annotation is required, e.g. `rand::random::()`. - -See the `distributions` submodule for sampling random numbers from -distributions like normal and exponential. - -# Task-local RNG - -There is built-in support for a RNG associated with each task stored -in task-local storage. This RNG can be accessed via `task_rng`, or -used implicitly via `random`. This RNG is normally randomly seeded -from an operating-system source of randomness, e.g. `/dev/urandom` on -Unix systems, and will automatically reseed itself from this source -after generating 32 KiB of random data. - -# Cryptographic security - -An application that requires an entropy source for cryptographic purposes -must use `OsRng`, which reads randomness from the source that the operating -system provides (e.g. `/dev/urandom` on Unixes or `CryptGenRandom()` on Windows). -The other random number generators provided by this module are not suitable -for such purposes. - -*Note*: many Unix systems provide `/dev/random` as well as `/dev/urandom`. -This module uses `/dev/urandom` for the following reasons: - -- On Linux, `/dev/random` may block if entropy pool is empty; `/dev/urandom` will not block. - This does not mean that `/dev/random` provides better output than - `/dev/urandom`; the kernel internally runs a cryptographically secure pseudorandom - number generator (CSPRNG) based on entropy pool for random number generation, - so the "quality" of `/dev/random` is not better than `/dev/urandom` in most cases. - However, this means that `/dev/urandom` can yield somewhat predictable randomness - if the entropy pool is very small, such as immediately after first booting. - If an application likely to be run soon after first booting, or on a system with very - few entropy sources, one should consider using `/dev/random` via `ReaderRng`. -- On some systems (e.g. FreeBSD, OpenBSD and Mac OS X) there is no difference - between the two sources. (Also note that, on some systems e.g. FreeBSD, both `/dev/random` - and `/dev/urandom` may block once if the CSPRNG has not seeded yet.) - -# Examples - -```rust -use std::rand; -use std::rand::Rng; - -let mut rng = rand::task_rng(); -if rng.gen() { // random bool - println!("int: {}, uint: {}", rng.gen::(), rng.gen::()) -} -``` - -```rust -use std::rand; - -let tuple = rand::random::<(f64, char)>(); -println!("{}", tuple) -``` -*/ +//! Utilities for random number generation +//! +//! The key functions are `random()` and `Rng::gen()`. These are polymorphic +//! and so can be used to generate any type that implements `Rand`. Type +//! inference means that often a simple call to `rand::random()` or `rng.gen()` +//! will suffice, but sometimes an annotation is required, e.g. +//! `rand::random::()`. +//! +//! See the `distributions` submodule for sampling random numbers from +//! distributions like normal and exponential. +//! +//! # Task-local RNG +//! +//! There is built-in support for a RNG associated with each task stored +//! in task-local storage. This RNG can be accessed via `task_rng`, or +//! used implicitly via `random`. This RNG is normally randomly seeded +//! from an operating-system source of randomness, e.g. `/dev/urandom` on +//! Unix systems, and will automatically reseed itself from this source +//! after generating 32 KiB of random data. +//! +//! # Cryptographic security +//! +//! An application that requires an entropy source for cryptographic purposes +//! must use `OsRng`, which reads randomness from the source that the operating +//! system provides (e.g. `/dev/urandom` on Unixes or `CryptGenRandom()` on +//! Windows). The other random number generators provided by this module are not +//! suitable for such purposes. +//! +//! *Note*: many Unix systems provide `/dev/random` as well as `/dev/urandom`. +//! This module uses `/dev/urandom` for the following reasons: +//! +//! - On Linux, `/dev/random` may block if entropy pool is empty; +//! `/dev/urandom` will not block. This does not mean that `/dev/random` +//! provides better output than `/dev/urandom`; the kernel internally runs a +//! cryptographically secure pseudorandom number generator (CSPRNG) based on +//! entropy pool for random number generation, so the "quality" of +//! `/dev/random` is not better than `/dev/urandom` in most cases. However, +//! this means that `/dev/urandom` can yield somewhat predictable randomness +//! if the entropy pool is very small, such as immediately after first +//! booting. If an application likely to be run soon after first booting, or +//! on a system with very few entropy sources, one should consider using +//! `/dev/random` via `ReaderRng`. +//! - On some systems (e.g. FreeBSD, OpenBSD and Mac OS X) there is no +//! difference between the two sources. (Also note that, on some systems +//! e.g. FreeBSD, both `/dev/random` and `/dev/urandom` may block once if +//! the CSPRNG has not seeded yet.) +//! +//! # Examples +//! +//! ```rust +//! use std::rand; +//! use std::rand::Rng; +//! +//! let mut rng = rand::task_rng(); +//! if rng.gen() { // random bool +//! println!("int: {}, uint: {}", rng.gen::(), rng.gen::()) +//! } +//! ``` +//! +//! ```rust +//! use std::rand; +//! +//! let tuple = rand::random::<(f64, char)>(); +//! println!("{}", tuple) +//! ``` #![experimental] diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 4490977bde619..742e52a63675a 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -8,48 +8,46 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! Runtime services, including the task scheduler and I/O dispatcher - -The `rt` module provides the private runtime infrastructure necessary -to support core language features like the exchange and local heap, -the garbage collector, logging, local data and unwinding. It also -implements the default task scheduler and task model. Initialization -routines are provided for setting up runtime resources in common -configurations, including that used by `rustc` when generating -executables. - -It is intended that the features provided by `rt` can be factored in a -way such that the core library can be built with different 'profiles' -for different use cases, e.g. excluding the task scheduler. A number -of runtime features though are critical to the functioning of the -language and an implementation must be provided regardless of the -execution environment. - -Of foremost importance is the global exchange heap, in the module -`heap`. Very little practical Rust code can be written without -access to the global heap. Unlike most of `rt` the global heap is -truly a global resource and generally operates independently of the -rest of the runtime. - -All other runtime features are task-local, including the local heap, -the garbage collector, local storage, logging and the stack unwinder. - -The relationship between `rt` and the rest of the core library is -not entirely clear yet and some modules will be moving into or -out of `rt` as development proceeds. - -Several modules in `core` are clients of `rt`: - -* `std::task` - The user-facing interface to the Rust task model. -* `std::local_data` - The interface to local data. -* `std::gc` - The garbage collector. -* `std::unstable::lang` - Miscellaneous lang items, some of which rely on `std::rt`. -* `std::cleanup` - Local heap destruction. -* `std::io` - In the future `std::io` will use an `rt` implementation. -* `std::logging` -* `std::comm` - -*/ +//! Runtime services, including the task scheduler and I/O dispatcher +//! +//! The `rt` module provides the private runtime infrastructure necessary +//! to support core language features like the exchange and local heap, +//! the garbage collector, logging, local data and unwinding. It also +//! implements the default task scheduler and task model. Initialization +//! routines are provided for setting up runtime resources in common +//! configurations, including that used by `rustc` when generating +//! executables. +//! +//! It is intended that the features provided by `rt` can be factored in a +//! way such that the core library can be built with different 'profiles' +//! for different use cases, e.g. excluding the task scheduler. A number +//! of runtime features though are critical to the functioning of the +//! language and an implementation must be provided regardless of the +//! execution environment. +//! +//! Of foremost importance is the global exchange heap, in the module +//! `heap`. Very little practical Rust code can be written without +//! access to the global heap. Unlike most of `rt` the global heap is +//! truly a global resource and generally operates independently of the +//! rest of the runtime. +//! +//! All other runtime features are task-local, including the local heap, +//! the garbage collector, local storage, logging and the stack unwinder. +//! +//! The relationship between `rt` and the rest of the core library is +//! not entirely clear yet and some modules will be moving into or +//! out of `rt` as development proceeds. +//! +//! Several modules in `core` are clients of `rt`: +//! +//! * `std::task` - The user-facing interface to the Rust task model. +//! * `std::local_data` - The interface to local data. +//! * `std::gc` - The garbage collector. +//! * `std::unstable::lang` - Miscellaneous lang items, some of which rely on `std::rt`. +//! * `std::cleanup` - Local heap destruction. +//! * `std::io` - In the future `std::io` will use an `rt` implementation. +//! * `std::logging` +//! * `std::comm` #![experimental] diff --git a/src/libstd/sync/future.rs b/src/libstd/sync/future.rs index 78da605143dc5..68fa1407b5a0a 100644 --- a/src/libstd/sync/future.rs +++ b/src/libstd/sync/future.rs @@ -8,21 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - * A type representing values that may be computed concurrently and - * operations for working with them. - * - * # Example - * - * ```rust - * use std::sync::Future; - * # fn fib(n: uint) -> uint {42}; - * # fn make_a_sandwich() {}; - * let mut delayed_fib = Future::spawn(proc() { fib(5000) }); - * make_a_sandwich(); - * println!("fib(5000) = {}", delayed_fib.get()) - * ``` - */ +//! A type representing values that may be computed concurrently and +//! operations for working with them. +//! +//! # Example +//! +//! ```rust +//! use std::sync::Future; +//! # fn fib(n: uint) -> uint {42}; +//! # fn make_a_sandwich() {}; +//! let mut delayed_fib = Future::spawn(proc() { fib(5000) }); +//! make_a_sandwich(); +//! println!("fib(5000) = {}", delayed_fib.get()) +//! ``` #![allow(missing_doc)] @@ -62,12 +60,10 @@ impl Future { } } + /// Executes the future's closure and then returns a reference + /// to the result. The reference lasts as long as + /// the future. pub fn get_ref<'a>(&'a mut self) -> &'a A { - /*! - * Executes the future's closure and then returns a reference - * to the result. The reference lasts as long as - * the future. - */ match self.state { Forced(ref v) => return v, Evaluating => fail!("Recursive forcing of future!"), @@ -83,52 +79,40 @@ impl Future { } } + /// Create a future from a value. + /// + /// The value is immediately available and calling `get` later will + /// not block. pub fn from_value(val: A) -> Future { - /*! - * Create a future from a value. - * - * The value is immediately available and calling `get` later will - * not block. - */ - Future {state: Forced(val)} } + /// Create a future from a function. + /// + /// The first time that the value is requested it will be retrieved by + /// calling the function. Note that this function is a local + /// function. It is not spawned into another task. pub fn from_fn(f: proc():Send -> A) -> Future { - /*! - * Create a future from a function. - * - * The first time that the value is requested it will be retrieved by - * calling the function. Note that this function is a local - * function. It is not spawned into another task. - */ - Future {state: Pending(f)} } } impl Future { + /// Create a future from a port + /// + /// The first time that the value is requested the task will block + /// waiting for the result to be received on the port. pub fn from_receiver(rx: Receiver) -> Future { - /*! - * Create a future from a port - * - * The first time that the value is requested the task will block - * waiting for the result to be received on the port. - */ - Future::from_fn(proc() { rx.recv() }) } + /// Create a future from a unique closure. + /// + /// The closure will be run in a new task and its result used as the + /// value of the future. pub fn spawn(blk: proc():Send -> A) -> Future { - /*! - * Create a future from a unique closure. - * - * The closure will be run in a new task and its result used as the - * value of the future. - */ - let (tx, rx) = channel(); spawn(proc() { diff --git a/src/libstd/task.rs b/src/libstd/task.rs index c20cbea0ae7cc..274ee585a48ee 100644 --- a/src/libstd/task.rs +++ b/src/libstd/task.rs @@ -74,12 +74,12 @@ //! //! // Spawn a task in the green pool //! let mut fut_green = TaskBuilder::new().green(&mut pool).try_future(proc() { -//! /* ... */ +//! // ... //! }); //! //! // Spawn a native task //! let mut fut_native = TaskBuilder::new().native().try_future(proc() { -//! /* ... */ +//! // ... //! }); //! //! // Wait for both tasks to finish, recording their outcome @@ -313,7 +313,7 @@ impl TaskBuilder { } } -/* Convenience functions */ +// Convenience functions /// Creates and executes a new child task /// @@ -342,7 +342,7 @@ pub fn try_future(f: proc():Send -> T) -> Future(blk: |Option<&str>| -> U) -> U { diff --git a/src/libstd/to_str.rs b/src/libstd/to_str.rs index e51e2c4d9ce5e..e085ce1f5603d 100644 --- a/src/libstd/to_str.rs +++ b/src/libstd/to_str.rs @@ -8,11 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The `ToStr` trait for converting to strings - -*/ +//! The `ToStr` trait for converting to strings #![experimental] diff --git a/src/libsync/comm/duplex.rs b/src/libsync/comm/duplex.rs index 44dd63cbf6c01..aa7d520fcbee2 100644 --- a/src/libsync/comm/duplex.rs +++ b/src/libsync/comm/duplex.rs @@ -8,11 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Higher level communication abstractions. - -*/ +//! Higher level communication abstractions. #![allow(missing_doc)] #![deprecated = "This type is replaced by having a pair of channels. This type \ diff --git a/src/libsync/lock.rs b/src/libsync/lock.rs index 1d119bafea199..bf963d9076b8a 100644 --- a/src/libsync/lock.rs +++ b/src/libsync/lock.rs @@ -27,9 +27,9 @@ use rustrt::task::Task; use raw; -/**************************************************************************** - * Poisoning helpers - ****************************************************************************/ +// **************************************************************************** +// * Poisoning helpers +// **************************************************************************** struct PoisonOnFail<'a> { flag: &'a mut bool, @@ -65,9 +65,9 @@ impl<'a> Drop for PoisonOnFail<'a> { } } -/**************************************************************************** - * Condvar - ****************************************************************************/ +// **************************************************************************** +// * Condvar +// **************************************************************************** enum Inner<'a> { InnerMutex(raw::MutexGuard<'a>), @@ -145,9 +145,9 @@ impl<'a> Condvar<'a> { } } -/**************************************************************************** - * Mutex - ****************************************************************************/ +// **************************************************************************** +// * Mutex +// **************************************************************************** /// A wrapper type which provides synchronized access to the underlying data, of /// type `T`. A mutex always provides exclusive access, and concurrent requests @@ -247,9 +247,9 @@ impl<'a, T: Send> DerefMut for MutexGuard<'a, T> { fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data } } -/**************************************************************************** - * R/W lock protected lock - ****************************************************************************/ +// **************************************************************************** +// * R/W lock protected lock +// **************************************************************************** /// A dual-mode reader-writer lock. The data can be accessed mutably or /// immutably, and immutably-accessing tasks may run concurrently. @@ -385,9 +385,9 @@ impl<'a, T: Send + Share> DerefMut for RWLockWriteGuard<'a, T> { fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data } } -/**************************************************************************** - * Barrier - ****************************************************************************/ +// **************************************************************************** +// * Barrier +// **************************************************************************** /// A barrier enables multiple tasks to synchronize the beginning /// of some computation. @@ -450,9 +450,9 @@ impl Barrier { } } -/**************************************************************************** - * Tests - ****************************************************************************/ +// **************************************************************************** +// * Tests +// **************************************************************************** #[cfg(test)] mod tests { @@ -793,9 +793,9 @@ mod tests { } } - /************************************************************************ - * Barrier tests - ************************************************************************/ + // ************************************************************************ + // * Barrier tests + // ************************************************************************ #[test] fn test_barrier() { let barrier = Arc::new(Barrier::new(10)); diff --git a/src/libsync/raw.rs b/src/libsync/raw.rs index 26cc0b2c6a23c..3c5c8a0f2330a 100644 --- a/src/libsync/raw.rs +++ b/src/libsync/raw.rs @@ -27,9 +27,9 @@ use collections::Vec; use mutex; use comm::{Receiver, Sender, channel}; -/**************************************************************************** - * Internals - ****************************************************************************/ +// **************************************************************************** +// * Internals +// **************************************************************************** // Each waiting task receives on one of these. type WaitEnd = Receiver<()>; @@ -138,7 +138,7 @@ impl Sem { }); // Uncomment if you wish to test for sem races. Not // valgrind-friendly. - /* for _ in range(0u, 1000) { task::deschedule(); } */ + // for _ in range(0u, 1000) { task::deschedule(); } // Need to wait outside the exclusive. if waiter_nobe.is_some() { let _ = waiter_nobe.unwrap().recv(); @@ -346,9 +346,9 @@ struct SemCondGuard<'a> { cvar: Condvar<'a>, } -/**************************************************************************** - * Semaphores - ****************************************************************************/ +// **************************************************************************** +// * Semaphores +// **************************************************************************** /// A counting, blocking, bounded-waiting semaphore. pub struct Semaphore { @@ -383,9 +383,9 @@ impl Semaphore { } } -/**************************************************************************** - * Mutexes - ****************************************************************************/ +// **************************************************************************** +// * Mutexes +// **************************************************************************** /// A blocking, bounded-waiting, mutual exclusion lock with an associated /// FIFO condition variable. @@ -429,9 +429,9 @@ impl Mutex { } } -/**************************************************************************** - * Reader-writer locks - ****************************************************************************/ +// **************************************************************************** +// * Reader-writer locks +// **************************************************************************** // NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem @@ -606,9 +606,9 @@ impl<'a> Drop for RWLockReadGuard<'a> { } } -/**************************************************************************** - * Tests - ****************************************************************************/ +// **************************************************************************** +// * Tests +// **************************************************************************** #[cfg(test)] mod tests { @@ -621,9 +621,9 @@ mod tests { use std::result; use std::task; - /************************************************************************ - * Semaphore tests - ************************************************************************/ + // ************************************************************************ + // * Semaphore tests + // ************************************************************************ #[test] fn test_sem_acquire_release() { let s = Semaphore::new(1); @@ -649,7 +649,7 @@ mod tests { } #[test] fn test_sem_as_cvar() { - /* Child waits and parent signals */ + // Child waits and parent signals let (tx, rx) = channel(); let s = Arc::new(Semaphore::new(0)); let s2 = s.clone(); @@ -661,7 +661,7 @@ mod tests { s.release(); let _ = rx.recv(); - /* Parent waits and child signals */ + // Parent waits and child signals let (tx, rx) = channel(); let s = Arc::new(Semaphore::new(0)); let s2 = s.clone(); @@ -709,9 +709,9 @@ mod tests { } rx.recv(); // wait for child to be done } - /************************************************************************ - * Mutex tests - ************************************************************************/ + // ************************************************************************ + // * Mutex tests + // ************************************************************************ #[test] fn test_mutex_lock() { // Unsafely achieve shared state, and do the textbook @@ -866,9 +866,9 @@ mod tests { }); assert!(result.is_err()); } - /************************************************************************ - * Reader/writer lock tests - ************************************************************************/ + // ************************************************************************ + // * Reader/writer lock tests + // ************************************************************************ #[cfg(test)] pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead } #[cfg(test)] diff --git a/src/libsyntax/abi.rs b/src/libsyntax/abi.rs index 9771bc9386b16..53a1fe620ed0e 100644 --- a/src/libsyntax/abi.rs +++ b/src/libsyntax/abi.rs @@ -84,21 +84,13 @@ static AbiDatas: &'static [AbiData] = &[ AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch}, ]; +/// Iterates through each of the defined ABIs. fn each_abi(op: |abi: Abi| -> bool) -> bool { - /*! - * - * Iterates through each of the defined ABIs. - */ - AbiDatas.iter().advance(|abi_data| op(abi_data.abi)) } +/// Returns the ABI with the given name (if any). pub fn lookup(name: &str) -> Option { - /*! - * - * Returns the ABI with the given name (if any). - */ - let mut res = None; each_abi(|abi| { diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index ce1302c8db874..41ece81de6246 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -297,8 +297,8 @@ pub enum Pat_ { // records this pattern's NodeId in an auxiliary // set (of "PatIdents that refer to nullary enums") PatIdent(BindingMode, SpannedIdent, Option>), - PatEnum(Path, Option>>), /* "none" means a * pattern where - * we don't bind the fields to names */ + PatEnum(Path, Option>>), // "none" means a * pattern where + // we don't bind the fields to names PatStruct(Path, Vec, bool), PatTup(Vec>), PatBox(Gc), @@ -776,7 +776,7 @@ pub struct UnboxedFnTy { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Ty_ { TyNil, - TyBot, /* bottom type */ + TyBot, // bottom type TyBox(P), TyUniq(P), TyVec(P), @@ -1013,13 +1013,11 @@ pub struct Attribute_ { pub is_sugared_doc: bool, } -/* - TraitRef's appear in impls. - resolve maps each TraitRef's ref_id to its defining trait; that's all - that the ref_id is for. The impl_id maps to the "self type" of this impl. - If this impl is an ItemImpl, the impl_id is redundant (it could be the - same as the impl's node id). - */ +// TraitRef's appear in impls. +// resolve maps each TraitRef's ref_id to its defining trait; that's all +// that the ref_id is for. The impl_id maps to the "self type" of this impl. +// If this impl is an ItemImpl, the impl_id is redundant (it could be the +// same as the impl's node id). #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct TraitRef { pub path: Path, @@ -1083,18 +1081,16 @@ impl StructFieldKind { #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] pub struct StructDef { - pub fields: Vec, /* fields, not including ctor */ - /* ID of the constructor. This is only used for tuple- or enum-like - * structs. */ + pub fields: Vec, // fields, not including ctor + // ID of the constructor. This is only used for tuple- or enum-like + // structs. pub ctor_id: Option, pub super_struct: Option>, // Super struct, if specified. pub is_virtual: bool, // True iff the struct may be inherited from. } -/* - FIXME (#3300): Should allow items to be anonymous. Right now - we just use dummy names for anon items. - */ +// FIXME (#3300): Should allow items to be anonymous. Right now +// we just use dummy names for anon items. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct Item { pub ident: Ident, diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs index 036d6b4b43adc..5dd3cdb5c649c 100644 --- a/src/libsyntax/ast_util.rs +++ b/src/libsyntax/ast_util.rs @@ -572,18 +572,14 @@ pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange { visitor.result.get() } +/// Computes the id range for a single fn body, +/// ignoring nested items. pub fn compute_id_range_for_fn_body(fk: &visit::FnKind, decl: &FnDecl, body: &Block, sp: Span, id: NodeId) - -> IdRange -{ - /*! - * Computes the id range for a single fn body, - * ignoring nested items. - */ - + -> IdRange { let visitor = IdRangeComputingVisitor { result: Cell::new(IdRange::max()) }; diff --git a/src/libsyntax/attr.rs b/src/libsyntax/attr.rs index 3b2ee4e2a6134..3990546a750fa 100644 --- a/src/libsyntax/attr.rs +++ b/src/libsyntax/attr.rs @@ -46,10 +46,8 @@ pub trait AttrMetaMethods { /// #[foo="bar"] and #[foo(bar)] fn name(&self) -> InternedString; - /** - * Gets the string value if self is a MetaNameValue variant - * containing a string, otherwise None. - */ + /// Gets the string value if self is a MetaNameValue variant + /// containing a string, otherwise None. fn value_str(&self) -> Option; /// Gets a list of inner meta items from a list MetaItem type. fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc]>; @@ -143,7 +141,7 @@ impl AttributeMethods for Attribute { } } -/* Constructors */ +// Constructors pub fn mk_name_value_item_str(name: InternedString, value: InternedString) -> Gc { @@ -208,7 +206,7 @@ pub fn mk_sugared_doc_attr(id: AttrId, text: InternedString, lo: BytePos, spanned(lo, hi, attr) } -/* Searching */ +// Searching /// Check if `needle` occurs in `haystack` by a structural /// comparison. This is slightly subtle, and relies on ignoring the /// span included in the `==` comparison a plain MetaItem. @@ -244,7 +242,7 @@ pub fn last_meta_item_value_str_by_name(items: &[Gc], name: &str) .and_then(|i| i.value_str()) } -/* Higher-level applications */ +// Higher-level applications pub fn sort_meta_items(items: &[Gc]) -> Vec> { // This is sort of stupid here, but we need to sort by @@ -262,7 +260,7 @@ pub fn sort_meta_items(items: &[Gc]) -> Vec> { box(GC) Spanned { node: MetaList((*n).clone(), sort_meta_items(mis.as_slice())), - .. /*bad*/ (*m).clone() + .. /* bad */ (*m).clone() } } _ => m @@ -420,18 +418,16 @@ pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[Gc]) { } -/** - * Fold this over attributes to parse #[repr(...)] forms. - * - * Valid repr contents: any of the primitive integral type names (see - * `int_type_of_word`, below) to specify the discriminant type; and `C`, to use - * the same discriminant size that the corresponding C enum would. These are - * not allowed on univariant or zero-variant enums, which have no discriminant. - * - * If a discriminant type is so specified, then the discriminant will be - * present (before fields, if any) with that type; reprensentation - * optimizations which would remove it will not be done. - */ +/// Fold this over attributes to parse #[repr(...)] forms. +/// +/// Valid repr contents: any of the primitive integral type names (see +/// `int_type_of_word`, below) to specify the discriminant type; and `C`, to use +/// the same discriminant size that the corresponding C enum would. These are +/// not allowed on univariant or zero-variant enums, which have no discriminant. +/// +/// If a discriminant type is so specified, then the discriminant will be +/// present (before fields, if any) with that type; reprensentation +/// optimizations which would remove it will not be done. pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr) -> ReprAttr { let mut acc = acc; diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index c917198e7d471..20e9cbbbe522e 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -8,18 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The CodeMap tracks all the source code used within a single crate, mapping -from integer byte positions to the original source code location. Each bit of -source parsed during crate parsing (typically files, in-memory strings, or -various bits of macro expansion) cover a continuous range of bytes in the -CodeMap and are represented by FileMaps. Byte positions are stored in `spans` -and used pervasively in the compiler. They are absolute positions within the -CodeMap, which upon request can be converted to line and column information, -source code snippets, etc. - -*/ +//! The CodeMap tracks all the source code used within a single crate, mapping +//! from integer byte positions to the original source code location. Each bit +//! of source parsed during crate parsing (typically files, in-memory strings, +//! or various bits of macro expansion) cover a continuous range of bytes in the +//! CodeMap and are represented by FileMaps. Byte positions are stored in `spans` +//! and used pervasively in the compiler. They are absolute positions within the +//! CodeMap, which upon request can be converted to line and column information, +//! source code snippets, etc. use serialize::{Encodable, Decodable, Encoder, Decoder}; use std::cell::RefCell; @@ -79,12 +75,10 @@ impl Sub for CharPos { } } -/** -Spans represent a region of code, used for error reporting. Positions in spans -are *absolute* positions from the beginning of the codemap, not positions -relative to FileMaps. Methods on the CodeMap can be used to relate spans back -to the original source. -*/ +/// Spans represent a region of code, used for error reporting. Positions in +/// spans are *absolute* positions from the beginning of the codemap, not +/// positions relative to FileMaps. Methods on the CodeMap can be used to +/// relate spans back to the original source. #[deriving(Clone, Show, Hash)] pub struct Span { pub lo: BytePos, @@ -112,7 +106,7 @@ impl PartialEq for Span { impl Eq for Span {} impl, E> Encodable for Span { - /* Note #1972 -- spans are encoded but not decoded */ + // Note #1972 -- spans are encoded but not decoded fn encode(&self, s: &mut S) -> Result<(), E> { s.emit_nil() } @@ -136,7 +130,7 @@ pub fn dummy_spanned(t: T) -> Spanned { respan(DUMMY_SP, t) } -/* assuming that we're not in macro expansion */ +// assuming that we're not in macro expansion pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span { Span {lo: lo, hi: hi, expn_info: None} } diff --git a/src/libsyntax/ext/asm.rs b/src/libsyntax/ext/asm.rs index f0494e1812013..a95f2960c5767 100644 --- a/src/libsyntax/ext/asm.rs +++ b/src/libsyntax/ext/asm.rs @@ -8,9 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - * Inline assembly support. - */ +//! Inline assembly support. use ast; use codemap::Span; diff --git a/src/libsyntax/ext/bytes.rs b/src/libsyntax/ext/bytes.rs index ce13fa2a7c6ee..59b3c3ac03d95 100644 --- a/src/libsyntax/ext/bytes.rs +++ b/src/libsyntax/ext/bytes.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* The compiler code necessary to support the bytes! extension. */ +// The compiler code necessary to support the bytes! extension. use ast; use codemap::Span; diff --git a/src/libsyntax/ext/cfg.rs b/src/libsyntax/ext/cfg.rs index c2930662bc459..0921ea183c535 100644 --- a/src/libsyntax/ext/cfg.rs +++ b/src/libsyntax/ext/cfg.rs @@ -8,11 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/** -The compiler code necessary to support the cfg! extension, which -expands to a literal `true` or `false` based on whether the given cfgs -match the current compilation environment. -*/ +//! The compiler code necessary to support the cfg! extension, which +//! expands to a literal `true` or `false` based on whether the given cfgs +//! match the current compilation environment. use ast; use codemap::Span; diff --git a/src/libsyntax/ext/deriving/cmp/ord.rs b/src/libsyntax/ext/deriving/cmp/ord.rs index 59cdec1ea88f0..eab7683482e71 100644 --- a/src/libsyntax/ext/deriving/cmp/ord.rs +++ b/src/libsyntax/ext/deriving/cmp/ord.rs @@ -101,23 +101,21 @@ pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span, let test_id = cx.ident_of("__test"); let equals_expr = some_ordering_const(cx, span, Equal); - /* - Builds: - - let __test = self_field1.partial_cmp(&other_field2); - if __test == ::std::option::Some(::std::cmp::Equal) { - let __test = self_field2.partial_cmp(&other_field2); - if __test == ::std::option::Some(::std::cmp::Equal) { - ... - } else { - __test - } - } else { - __test - } - - FIXME #6449: These `if`s could/should be `match`es. - */ + // Builds: + // + // let __test = self_field1.partial_cmp(&other_field2); + // if __test == ::std::option::Some(::std::cmp::Equal) { + // let __test = self_field2.partial_cmp(&other_field2); + // if __test == ::std::option::Some(::std::cmp::Equal) { + // ... + // } else { + // __test + // } + // } else { + // __test + // } + // + // FIXME #6449: These `if`s could/should be `match`es. cs_same_method_fold( // foldr nests the if-elses correctly, leaving the first field // as the outermost one, and the last as the innermost. @@ -160,23 +158,21 @@ fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt, span: Span, cs_fold( false, // need foldr, |cx, span, subexpr, self_f, other_fs| { - /* - build up a series of chain ||'s and &&'s from the inside - out (hence foldr) to get lexical ordering, i.e. for op == - `ast::lt` - - ``` - self.f1 < other.f1 || (!(other.f1 < self.f1) && - (self.f2 < other.f2 || (!(other.f2 < self.f2) && - (false) - )) - ) - ``` - - The optimiser should remove the redundancy. We explicitly - get use the binops to avoid auto-deref dereferencing too many - layers of pointers, if the type includes pointers. - */ + // build up a series of chain ||'s and &&'s from the inside + // out (hence foldr) to get lexical ordering, i.e. for op == + // `ast::lt` + // + // ``` + // self.f1 < other.f1 || (!(other.f1 < self.f1) && + // (self.f2 < other.f2 || (!(other.f2 < self.f2) && + // (false) + // )) + // ) + // ``` + // + // The optimiser should remove the redundancy. We explicitly + // get use the binops to avoid auto-deref dereferencing too many + // layers of pointers, if the type includes pointers. let other_f = match other_fs { [o_f] => o_f, _ => cx.span_bug(span, "not exactly 2 arguments in `deriving(Ord)`") diff --git a/src/libsyntax/ext/deriving/cmp/totalord.rs b/src/libsyntax/ext/deriving/cmp/totalord.rs index 271aa90cd24a4..21f08fa9798f4 100644 --- a/src/libsyntax/ext/deriving/cmp/totalord.rs +++ b/src/libsyntax/ext/deriving/cmp/totalord.rs @@ -70,23 +70,21 @@ pub fn cs_cmp(cx: &mut ExtCtxt, span: Span, let test_id = cx.ident_of("__test"); let equals_path = ordering_const(cx, span, Equal); - /* - Builds: - - let __test = self_field1.cmp(&other_field2); - if other == ::std::cmp::Equal { - let __test = self_field2.cmp(&other_field2); - if __test == ::std::cmp::Equal { - ... - } else { - __test - } - } else { - __test - } - - FIXME #6449: These `if`s could/should be `match`es. - */ + // Builds: + // + // let __test = self_field1.cmp(&other_field2); + // if other == ::std::cmp::Equal { + // let __test = self_field2.cmp(&other_field2); + // if __test == ::std::cmp::Equal { + // ... + // } else { + // __test + // } + // } else { + // __test + // } + // + // FIXME #6449: These `if`s could/should be `match`es. cs_same_method_fold( // foldr nests the if-elses correctly, leaving the first field // as the outermost one, and the last as the innermost. diff --git a/src/libsyntax/ext/deriving/decodable.rs b/src/libsyntax/ext/deriving/decodable.rs index 0c23d65fde046..07c977c2b24a3 100644 --- a/src/libsyntax/ext/deriving/decodable.rs +++ b/src/libsyntax/ext/deriving/decodable.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -The compiler code necessary for `#[deriving(Decodable)]`. See -encodable.rs for more. -*/ +//! The compiler code necessary for `#[deriving(Decodable)]`. See +//! encodable.rs for more. use ast; use ast::{MetaItem, Item, Expr, MutMutable, Ident}; diff --git a/src/libsyntax/ext/deriving/encodable.rs b/src/libsyntax/ext/deriving/encodable.rs index f57670af1999b..3c870952eca2a 100644 --- a/src/libsyntax/ext/deriving/encodable.rs +++ b/src/libsyntax/ext/deriving/encodable.rs @@ -8,79 +8,76 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The compiler code necessary to implement the `#[deriving(Encodable)]` -(and `Decodable`, in decodable.rs) extension. The idea here is that -type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`. - -For example, a type like: - -```ignore -#[deriving(Encodable, Decodable)] -struct Node { id: uint } -``` - -would generate two implementations like: - -```ignore -impl Encodable for Node { - fn encode(&self, s: &S) { - s.emit_struct("Node", 1, || { - s.emit_field("id", 0, || s.emit_uint(self.id)) - }) - } -} - -impl Decodable for node_id { - fn decode(d: &D) -> Node { - d.read_struct("Node", 1, || { - Node { - id: d.read_field("x".to_string(), 0, || decode(d)) - } - }) - } -} -``` - -Other interesting scenarios are whe the item has type parameters or -references other non-built-in types. A type definition like: - -```ignore -#[deriving(Encodable, Decodable)] -struct spanned { node: T, span: Span } -``` - -would yield functions like: - -```ignore - impl< - S: Encoder, - T: Encodable - > spanned: Encodable { - fn encode(s: &S) { - s.emit_rec(|| { - s.emit_field("node", 0, || self.node.encode(s)); - s.emit_field("span", 1, || self.span.encode(s)); - }) - } - } - - impl< - D: Decoder, - T: Decodable - > spanned: Decodable { - fn decode(d: &D) -> spanned { - d.read_rec(|| { - { - node: d.read_field("node".to_string(), 0, || decode(d)), - span: d.read_field("span".to_string(), 1, || decode(d)), - } - }) - } - } -``` -*/ +//! The compiler code necessary to implement the `#[deriving(Encodable)]` +//! (and `Decodable`, in decodable.rs) extension. The idea here is that +//! type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`. +//! +//! For example, a type like: +//! +//! ```ignore +//! #[deriving(Encodable, Decodable)] +//! struct Node { id: uint } +//! ``` +//! +//! would generate two implementations like: +//! +//! ```ignore +//! impl Encodable for Node { +//! fn encode(&self, s: &S) { +//! s.emit_struct("Node", 1, || { +//! s.emit_field("id", 0, || s.emit_uint(self.id)) +//! }) +//! } +//! } +//! +//! impl Decodable for node_id { +//! fn decode(d: &D) -> Node { +//! d.read_struct("Node", 1, || { +//! Node { +//! id: d.read_field("x".to_string(), 0, || decode(d)) +//! } +//! }) +//! } +//! } +//! ``` +//! +//! Other interesting scenarios are whe the item has type parameters or +//! references other non-built-in types. A type definition like: +//! +//! ```ignore +//! #[deriving(Encodable, Decodable)] +//! struct spanned { node: T, span: Span } +//! ``` +//! +//! would yield functions like: +//! +//! ```ignore +//! impl< +//! S: Encoder, +//! T: Encodable +//! > spanned: Encodable { +//! fn encode(s: &S) { +//! s.emit_rec(|| { +//! s.emit_field("node", 0, || self.node.encode(s)); +//! s.emit_field("span", 1, || self.span.encode(s)); +//! }) +//! } +//! } +//! +//! impl< +//! D: Decoder, +//! T: Decodable +//! > spanned: Decodable { +//! fn decode(d: &D) -> spanned { +//! d.read_rec(|| { +//! { +//! node: d.read_field("node".to_string(), 0, || decode(d)), +//! span: d.read_field("span".to_string(), 1, || decode(d)), +//! } +//! }) +//! } +//! } +//! ``` use ast; use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil}; diff --git a/src/libsyntax/ext/deriving/generic/mod.rs b/src/libsyntax/ext/deriving/generic/mod.rs index 157b64fb47c0a..72251deb38db0 100644 --- a/src/libsyntax/ext/deriving/generic/mod.rs +++ b/src/libsyntax/ext/deriving/generic/mod.rs @@ -8,174 +8,170 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Some code that abstracts away much of the boilerplate of writing -`deriving` instances for traits. Among other things it manages getting -access to the fields of the 4 different sorts of structs and enum -variants, as well as creating the method and impl ast instances. - -Supported features (fairly exhaustive): - -- Methods taking any number of parameters of any type, and returning - any type, other than vectors, bottom and closures. -- Generating `impl`s for types with type parameters and lifetimes - (e.g. `Option`), the parameters are automatically given the - current trait as a bound. (This includes separate type parameters - and lifetimes for methods.) -- Additional bounds on the type parameters, e.g. the `Ord` instance - requires an explicit `PartialEq` bound at the - moment. (`TraitDef.additional_bounds`) - -Unsupported: FIXME #6257: calling methods on reference fields, -e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`, -because of how the auto-dereferencing happens. - -The most important thing for implementers is the `Substructure` and -`SubstructureFields` objects. The latter groups 5 possibilities of the -arguments: - -- `Struct`, when `Self` is a struct (including tuple structs, e.g - `struct T(int, char)`). -- `EnumMatching`, when `Self` is an enum and all the arguments are the - same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) -- `EnumNonMatching` when `Self` is an enum and the arguments are not - the same variant (e.g. `None`, `Some(1)` and `None`). If - `const_nonmatching` is true, this will contain an empty list. -- `StaticEnum` and `StaticStruct` for static methods, where the type - being derived upon is either an enum or struct respectively. (Any - argument with type Self is just grouped among the non-self - arguments.) - -In the first two cases, the values from the corresponding fields in -all the arguments are grouped together. In the `EnumNonMatching` case -this isn't possible (different variants have different fields), so the -fields are grouped by which argument they come from. There are no -fields with values in the static cases, so these are treated entirely -differently. - -The non-static cases have `Option` in several places associated -with field `expr`s. This represents the name of the field it is -associated with. It is only not `None` when the associated field has -an identifier in the source code. For example, the `x`s in the -following snippet - -```rust -struct A { x : int } - -struct B(int); - -enum C { - C0(int), - C1 { x: int } -} -``` - -The `int`s in `B` and `C0` don't have an identifier, so the -`Option`s would be `None` for them. - -In the static cases, the structure is summarised, either into the just -spans of the fields or a list of spans and the field idents (for tuple -structs and record structs, respectively), or a list of these, for -enums (one for each variant). For empty struct and empty enum -variants, it is represented as a count of 0. - -# Examples - -The following simplified `PartialEq` is used for in-code examples: - -```rust -trait PartialEq { - fn eq(&self, other: &Self); -} -impl PartialEq for int { - fn eq(&self, other: &int) -> bool { - *self == *other - } -} -``` - -Some examples of the values of `SubstructureFields` follow, using the -above `PartialEq`, `A`, `B` and `C`. - -## Structs - -When generating the `expr` for the `A` impl, the `SubstructureFields` is - -~~~text -Struct(~[FieldInfo { - span: - name: Some(), - self_: , - other: ~[, - name: None, - - ~[] - }]) -~~~ - -## Enums - -When generating the `expr` for a call with `self == C0(a)` and `other -== C0(b)`, the SubstructureFields is - -~~~text -EnumMatching(0, , - ~[FieldInfo { - span: - name: None, - self_: , - other: ~[] - }]) -~~~ - -For `C1 {x}` and `C1 {x}`, - -~~~text -EnumMatching(1, , - ~[FieldInfo { - span: - name: Some(), - self_: , - other: ~[] - }]) -~~~ - -For `C0(a)` and `C1 {x}` , - -~~~text -EnumNonMatching(~[(0, , - ~[(, None, )]), - (1, , - ~[(, Some(), - )])]) -~~~ - -(and vice versa, but with the order of the outermost list flipped.) - -## Static - -A static method on the above would result in, - -~~~text -StaticStruct(, Named(~[(, )])) - -StaticStruct(, Unnamed(~[])) - -StaticEnum(, ~[(, , Unnamed(~[])), - (, , - Named(~[(, )]))]) -~~~ - -*/ +//! Some code that abstracts away much of the boilerplate of writing +//! `deriving` instances for traits. Among other things it manages getting +//! access to the fields of the 4 different sorts of structs and enum +//! variants, as well as creating the method and impl ast instances. +//! +//! Supported features (fairly exhaustive): +//! +//! - Methods taking any number of parameters of any type, and returning +//! any type, other than vectors, bottom and closures. +//! - Generating `impl`s for types with type parameters and lifetimes +//! (e.g. `Option`), the parameters are automatically given the +//! current trait as a bound. (This includes separate type parameters +//! and lifetimes for methods.) +//! - Additional bounds on the type parameters, e.g. the `Ord` instance +//! requires an explicit `PartialEq` bound at the +//! moment. (`TraitDef.additional_bounds`) +//! +//! Unsupported: FIXME #6257: calling methods on reference fields, +//! e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`, +//! because of how the auto-dereferencing happens. +//! +//! The most important thing for implementers is the `Substructure` and +//! `SubstructureFields` objects. The latter groups 5 possibilities of the +//! arguments: +//! +//! - `Struct`, when `Self` is a struct (including tuple structs, e.g +//! `struct T(int, char)`). +//! - `EnumMatching`, when `Self` is an enum and all the arguments are the +//! same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) +//! - `EnumNonMatching` when `Self` is an enum and the arguments are not +//! the same variant (e.g. `None`, `Some(1)` and `None`). If +//! `const_nonmatching` is true, this will contain an empty list. +//! - `StaticEnum` and `StaticStruct` for static methods, where the type +//! being derived upon is either an enum or struct respectively. (Any +//! argument with type Self is just grouped among the non-self +//! arguments.) +//! +//! In the first two cases, the values from the corresponding fields in +//! all the arguments are grouped together. In the `EnumNonMatching` case +//! this isn't possible (different variants have different fields), so the +//! fields are grouped by which argument they come from. There are no +//! fields with values in the static cases, so these are treated entirely +//! differently. +//! +//! The non-static cases have `Option` in several places associated +//! with field `expr`s. This represents the name of the field it is +//! associated with. It is only not `None` when the associated field has +//! an identifier in the source code. For example, the `x`s in the +//! following snippet +//! +//! ```rust +//! struct A { x : int } +//! +//! struct B(int); +//! +//! enum C { +//! C0(int), +//! C1 { x: int } +//! } +//! ``` +//! +//! The `int`s in `B` and `C0` don't have an identifier, so the +//! `Option`s would be `None` for them. +//! +//! In the static cases, the structure is summarised, either into the just +//! spans of the fields or a list of spans and the field idents (for tuple +//! structs and record structs, respectively), or a list of these, for +//! enums (one for each variant). For empty struct and empty enum +//! variants, it is represented as a count of 0. +//! +//! # Examples +//! +//! The following simplified `PartialEq` is used for in-code examples: +//! +//! ```rust +//! trait PartialEq { +//! fn eq(&self, other: &Self); +//! } +//! impl PartialEq for int { +//! fn eq(&self, other: &int) -> bool { +//! *self == *other +//! } +//! } +//! ``` +//! +//! Some examples of the values of `SubstructureFields` follow, using the +//! above `PartialEq`, `A`, `B` and `C`. +//! +//! ## Structs +//! +//! When generating the `expr` for the `A` impl, the `SubstructureFields` is +//! +//! ~~~text +//! Struct(~[FieldInfo { +//! span: +//! name: Some(), +//! self_: , +//! other: ~[, +//! name: None, +//! +//! ~[] +//! }]) +//! ~~~ +//! +//! ## Enums +//! +//! When generating the `expr` for a call with `self == C0(a)` and `other +//! == C0(b)`, the SubstructureFields is +//! +//! ~~~text +//! EnumMatching(0, , +//! ~[FieldInfo { +//! span: +//! name: None, +//! self_: , +//! other: ~[] +//! }]) +//! ~~~ +//! +//! For `C1 {x}` and `C1 {x}`, +//! +//! ~~~text +//! EnumMatching(1, , +//! ~[FieldInfo { +//! span: +//! name: Some(), +//! self_: , +//! other: ~[] +//! }]) +//! ~~~ +//! +//! For `C0(a)` and `C1 {x}` , +//! +//! ~~~text +//! EnumNonMatching(~[(0, , +//! ~[(, None, )]), +//! (1, , +//! ~[(, Some(), +//! )])]) +//! ~~~ +//! +//! (and vice versa, but with the order of the outermost list flipped.) +//! +//! ## Static +//! +//! A static method on the above would result in, +//! +//! ~~~text +//! StaticStruct(, Named(~[(, )])) +//! +//! StaticStruct(, Unnamed(~[])) +//! +//! StaticEnum(, ~[(, , Unnamed(~[])), +//! (, , +//! Named(~[(, )]))]) +//! ~~~ use std::cell::RefCell; use std::gc::{Gc, GC}; @@ -281,18 +277,14 @@ pub enum StaticFields { /// and examples pub enum SubstructureFields<'a> { Struct(Vec), - /** - Matching variants of the enum: variant index, ast::Variant, - fields: the field name is only non-`None` in the case of a struct - variant. - */ + /// Matching variants of the enum: variant index, ast::Variant, + /// fields: the field name is only non-`None` in the case of a struct + /// variant. EnumMatching(uint, &'a ast::Variant, Vec), - /** - non-matching variants of the enum, [(variant index, ast::Variant, - [field span, field ident, fields])] \(i.e. all fields for self are in the - first tuple, for other1 are in the second tuple, etc.) - */ + /// non-matching variants of the enum, [(variant index, ast::Variant, + /// [field span, field ident, fields])] \(i.e. all fields for self are in + /// the first tuple, for other1 are in the second tuple, etc.) EnumNonMatching(&'a [(uint, P, Vec<(Span, Option, Gc)>)]), @@ -304,18 +296,14 @@ pub enum SubstructureFields<'a> { -/** -Combine the values of all the fields together. The last argument is -all the fields of all the structures, see above for details. -*/ +/// Combine the values of all the fields together. The last argument is +/// all the fields of all the structures, see above for details. pub type CombineSubstructureFunc<'a> = |&mut ExtCtxt, Span, &Substructure|: 'a -> Gc; -/** -Deal with non-matching enum variants, the arguments are a list -representing each variant: (variant index, ast::Variant instance, -[variant fields]), and a list of the nonself args of the type -*/ +/// Deal with non-matching enum variants, the arguments are a list +/// representing each variant: (variant index, ast::Variant instance, +/// [variant fields]), and a list of the nonself args of the type pub type EnumNonMatchFunc<'a> = |&mut ExtCtxt, Span, @@ -365,18 +353,16 @@ impl<'a> TraitDef<'a> { }) } - /** - * - * Given that we are deriving a trait `Tr` for a type `T<'a, ..., - * 'z, A, ..., Z>`, creates an impl like: - * - * ```ignore - * impl<'a, ..., 'z, A:Tr B1 B2, ..., Z: Tr B1 B2> Tr for T { ... } - * ``` - * - * where B1, B2, ... are the bounds given by `bounds_paths`.' - * - */ + /// + /// Given that we are deriving a trait `Tr` for a type `T<'a, ..., + /// 'z, A, ..., Z>`, creates an impl like: + /// + /// ```ignore + /// impl<'a, ..., 'z, A:Tr B1 B2, ..., Z: Tr B1 B2> Tr for T { ... } + /// ``` + /// + /// where B1, B2, ... are the bounds given by `bounds_paths`.' + /// fn create_derived_impl(&self, cx: &mut ExtCtxt, type_ident: Ident, @@ -647,27 +633,25 @@ impl<'a> MethodDef<'a> { } } - /** - ~~~ - #[deriving(PartialEq)] - struct A { x: int, y: int } - - // equivalent to: - impl PartialEq for A { - fn eq(&self, __arg_1: &A) -> bool { - match *self { - A {x: ref __self_0_0, y: ref __self_0_1} => { - match *__arg_1 { - A {x: ref __self_1_0, y: ref __self_1_1} => { - __self_0_0.eq(__self_1_0) && __self_0_1.eq(__self_1_1) - } - } - } - } - } - } - ~~~ - */ + /// ``` + /// #[deriving(PartialEq)] + /// struct A { x: int, y: int } + /// + /// // equivalent to: + /// impl PartialEq for A { + /// fn eq(&self, __arg_1: &A) -> bool { + /// match *self { + /// A {x: ref __self_0_0, y: ref __self_0_1} => { + /// match *__arg_1 { + /// A {x: ref __self_1_0, y: ref __self_1_1} => { + /// __self_0_0.eq(__self_1_0) && __self_0_1.eq(__self_1_1) + /// } + /// } + /// } + /// } + /// } + /// } + /// ``` fn expand_struct_method_body(&self, cx: &mut ExtCtxt, trait_: &TraitDef, @@ -752,32 +736,30 @@ impl<'a> MethodDef<'a> { &StaticStruct(struct_def, summary)) } - /** - ~~~ - #[deriving(PartialEq)] - enum A { - A1 - A2(int) - } - - // is equivalent to (with const_nonmatching == false) - - impl PartialEq for A { - fn eq(&self, __arg_1: &A) { - match *self { - A1 => match *__arg_1 { - A1 => true - A2(ref __arg_1_1) => false - }, - A2(self_1) => match *__arg_1 { - A1 => false, - A2(ref __arg_1_1) => self_1.eq(__arg_1_1) - } - } - } - } - ~~~ - */ + /// ``` + /// #[deriving(PartialEq)] + /// enum A { + /// A1 + /// A2(int) + /// } + /// + /// // is equivalent to (with const_nonmatching == false) + /// + /// impl PartialEq for A { + /// fn eq(&self, __arg_1: &A) { + /// match *self { + /// A1 => match *__arg_1 { + /// A1 => true + /// A2(ref __arg_1_1) => false + /// }, + /// A2(self_1) => match *__arg_1 { + /// A1 => false, + /// A2(ref __arg_1_1) => self_1.eq(__arg_1_1) + /// } + /// } + /// } + /// } + /// ``` fn expand_enum_method_body(&self, cx: &mut ExtCtxt, trait_: &TraitDef, @@ -792,28 +774,25 @@ impl<'a> MethodDef<'a> { None, &mut matches, 0) } - - /** - Creates the nested matches for an enum definition recursively, i.e. - - ~~~text - match self { - Variant1 => match other { Variant1 => matching, Variant2 => nonmatching, ... }, - Variant2 => match other { Variant1 => nonmatching, Variant2 => matching, ... }, - ... - } - ~~~ - - It acts in the most naive way, so every branch (and subbranch, - subsubbranch, etc) exists, not just the ones where all the variants in - the tree are the same. Hopefully the optimisers get rid of any - repetition, otherwise derived methods with many Self arguments will be - exponentially large. - - `matching` is Some(n) if all branches in the tree above the - current position are variant `n`, `None` otherwise (including on - the first call). - */ + /// Creates the nested matches for an enum definition recursively, i.e. + /// + /// ```text + /// match self { + /// Variant1 => match other { Variant1 => matching, Variant2 => nonmatching, ... }, + /// Variant2 => match other { Variant1 => nonmatching, Variant2 => matching, ... }, + /// ... + /// } + /// ``` + /// + /// It acts in the most naive way, so every branch (and subbranch, + /// subsubbranch, etc) exists, not just the ones where all the variants in + /// the tree are the same. Hopefully the optimisers get rid of any + /// repetition, otherwise derived methods with many Self arguments will be + /// exponentially large. + /// + /// `matching` is Some(n) if all branches in the tree above the + /// current position are variant `n`, `None` otherwise (including on + /// the first call). fn build_enum_match(&self, cx: &mut ExtCtxt, trait_: &TraitDef, @@ -1161,12 +1140,10 @@ impl<'a> TraitDef<'a> { } } -/* helpful premade recipes */ +// helpful premade recipes -/** -Fold the fields. `use_foldl` controls whether this is done -left-to-right (`true`) or right-to-left (`false`). -*/ +/// Fold the fields. `use_foldl` controls whether this is done +/// left-to-right (`true`) or right-to-left (`false`). pub fn cs_fold(use_foldl: bool, f: |&mut ExtCtxt, Span, Gc, Gc, &[Gc]| -> Gc, base: Gc, @@ -1205,15 +1182,13 @@ pub fn cs_fold(use_foldl: bool, } -/** -Call the method that is being derived on all the fields, and then -process the collected results. i.e. - -~~~ -f(cx, span, ~[self_1.method(__arg_1_1, __arg_2_1), - self_2.method(__arg_1_2, __arg_2_2)]) -~~~ -*/ +/// Call the method that is being derived on all the fields, and then +/// process the collected results. i.e. +/// +/// ``` +/// f(cx, span, ~[self_1.method(__arg_1_1, __arg_2_1), +/// self_2.method(__arg_1_2, __arg_2_2)]) +/// ``` #[inline] pub fn cs_same_method(f: |&mut ExtCtxt, Span, Vec>| -> Gc, enum_nonmatch_f: EnumNonMatchFunc, @@ -1244,11 +1219,9 @@ pub fn cs_same_method(f: |&mut ExtCtxt, Span, Vec>| -> Gc, } } -/** -Fold together the results of calling the derived method on all the -fields. `use_foldl` controls whether this is done left-to-right -(`true`) or right-to-left (`false`). -*/ +/// Fold together the results of calling the derived method on all the +/// fields. `use_foldl` controls whether this is done left-to-right +/// (`true`) or right-to-left (`false`). #[inline] pub fn cs_same_method_fold(use_foldl: bool, f: |&mut ExtCtxt, Span, Gc, Gc| -> Gc, @@ -1274,10 +1247,8 @@ pub fn cs_same_method_fold(use_foldl: bool, cx, trait_span, substructure) } -/** -Use a given binop to combine the result of calling the derived method -on all the fields. -*/ +/// Use a given binop to combine the result of calling the derived method +/// on all the fields. #[inline] pub fn cs_binop(binop: ast::BinOp, base: Gc, enum_nonmatch_f: EnumNonMatchFunc, diff --git a/src/libsyntax/ext/deriving/generic/ty.rs b/src/libsyntax/ext/deriving/generic/ty.rs index 7501b950770c2..cfa58ccb9fe17 100644 --- a/src/libsyntax/ext/deriving/generic/ty.rs +++ b/src/libsyntax/ext/deriving/generic/ty.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -A mini version of ast::Ty, which is easier to use, and features an -explicit `Self` type to use when specifying impls to be derived. -*/ +//! A mini version of ast::Ty, which is easier to use, and features an +//! explicit `Self` type to use when specifying impls to be derived. use ast; use ast::{P,Expr,Generics,Ident}; diff --git a/src/libsyntax/ext/deriving/mod.rs b/src/libsyntax/ext/deriving/mod.rs index edfe54db0c760..ff63cb061fe2e 100644 --- a/src/libsyntax/ext/deriving/mod.rs +++ b/src/libsyntax/ext/deriving/mod.rs @@ -8,15 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -The compiler code necessary to implement the `#[deriving]` extensions. - - -FIXME (#2810): hygiene. Search for "__" strings (in other files too). -We also assume "extra" is the standard library, and "std" is the core -library. - -*/ +//! The compiler code necessary to implement the `#[deriving]` extensions. +//! +//! +//! FIXME (#2810): hygiene. Search for "__" strings (in other files too). +//! We also assume "extra" is the standard library, and "std" is the core +//! library. use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord}; use ext::base::ExtCtxt; diff --git a/src/libsyntax/ext/env.rs b/src/libsyntax/ext/env.rs index 9ef7241ca2484..a46c8fff0c810 100644 --- a/src/libsyntax/ext/env.rs +++ b/src/libsyntax/ext/env.rs @@ -8,11 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - * The compiler code necessary to support the env! extension. Eventually this - * should all get sucked into either the compiler syntax extension plugin - * interface. - */ +//! The compiler code necessary to support the env! extension. Eventually this +//! should all get sucked into either the compiler syntax extension plugin +//! interface. use ast; use codemap::Span; diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 752b3a09e652e..7671dc568a066 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -610,7 +610,7 @@ fn expand_stmt(s: &Stmt, fld: &mut MacroExpander) -> SmallVector> { node: StmtSemi(e, stmt_id) } } - _ => s /* might already have a semi */ + _ => s // might already have a semi } }).collect() } @@ -974,7 +974,7 @@ impl<'a, 'b> Folder for MacroExpander<'a, 'b> { } fn new_span(cx: &ExtCtxt, sp: Span) -> Span { - /* this discards information in the case of macro-defining macros */ + // this discards information in the case of macro-defining macros Span { lo: sp.lo, hi: sp.hi, diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs index 7b24b97d5da4d..a10e1e660e176 100644 --- a/src/libsyntax/ext/quote.rs +++ b/src/libsyntax/ext/quote.rs @@ -18,17 +18,12 @@ use parse::token; use std::gc::Gc; -/** -* -* Quasiquoting works via token trees. -* -* This is registered as a set of expression syntax extension called quote! -* that lifts its argument token-tree to an AST representing the -* construction of the same token tree, with ast::TTNonterminal nodes -* interpreted as antiquotes (splices). -* -*/ - +/// Quasiquoting works via token trees. +/// +/// This is registered as a set of expression syntax extension called quote! +/// that lifts its argument token-tree to an AST representing the +/// construction of the same token tree, with ast::TTNonterminal nodes +/// interpreted as antiquotes (splices). pub mod rt { use ast; use codemap::Spanned; @@ -67,19 +62,17 @@ pub mod rt { } } - /* Should be (when bugs in default methods are fixed): - - trait ToSource : ToTokens { - // Takes a thing and generates a string containing rust code for it. - pub fn to_source() -> String; - - // If you can make source, you can definitely make tokens. - pub fn to_tokens(cx: &ExtCtxt) -> ~[TokenTree] { - cx.parse_tts(self.to_source()) - } - } - - */ + // Should be (when bugs in default methods are fixed): + // + // trait ToSource : ToTokens { + // // Takes a thing and generates a string containing rust code for it. + // pub fn to_source() -> String; + // + // // If you can make source, you can definitely make tokens. + // pub fn to_tokens(cx: &ExtCtxt) -> ~[TokenTree] { + // cx.parse_tts(self.to_source()) + // } + // } // FIXME: Move this trait to pprust and get rid of *_to_str? pub trait ToSource { diff --git a/src/libsyntax/ext/source_util.rs b/src/libsyntax/ext/source_util.rs index 915fc16c15660..060fb21e9850d 100644 --- a/src/libsyntax/ext/source_util.rs +++ b/src/libsyntax/ext/source_util.rs @@ -28,7 +28,7 @@ use std::str; // the column/row/filename of the expression, or they include // a given file into the current one. -/* line!(): expands to the current line number */ +// line!(): expands to the current line number pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { base::check_zero_tts(cx, sp, tts, "line!"); @@ -39,7 +39,7 @@ pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.line)) } -/* col!(): expands to the current column number */ +// col!(): expands to the current column number pub fn expand_col(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { base::check_zero_tts(cx, sp, tts, "col!"); @@ -49,9 +49,9 @@ pub fn expand_col(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint())) } -/* file!(): expands to the current filename */ -/* The filemap (`loc.file`) contains a bunch more information we could spit - * out if we wanted. */ +// file!(): expands to the current filename +// The filemap (`loc.file`) contains a bunch more information we could spit +// out if we wanted. pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { base::check_zero_tts(cx, sp, tts, "file!"); diff --git a/src/libsyntax/ext/tt/macro_parser.rs b/src/libsyntax/ext/tt/macro_parser.rs index 86fbc8cec2a34..76e7800748deb 100644 --- a/src/libsyntax/ext/tt/macro_parser.rs +++ b/src/libsyntax/ext/tt/macro_parser.rs @@ -8,13 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Earley-like parser for macros. +//! Earley-like parser for macros. use ast; use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident}; use codemap::{BytePos, mk_sp}; use codemap; -use parse::lexer::*; //resolve bug? +use parse::lexer::*; // resolve bug? use parse::ParseSess; use parse::attr::ParserAttr; use parse::parser::{LifetimeAndTypesWithoutColons, Parser}; @@ -25,77 +25,74 @@ use std::rc::Rc; use std::gc::GC; use std::collections::HashMap; -/* This is an Earley-like parser, without support for in-grammar nonterminals, -only by calling out to the main rust parser for named nonterminals (which it -commits to fully when it hits one in a grammar). This means that there are no -completer or predictor rules, and therefore no need to store one column per -token: instead, there's a set of current Earley items and a set of next -ones. Instead of NTs, we have a special case for Kleene star. The big-O, in -pathological cases, is worse than traditional Earley parsing, but it's an -easier fit for Macro-by-Example-style rules, and I think the overhead is -lower. (In order to prevent the pathological case, we'd need to lazily -construct the resulting `NamedMatch`es at the very end. It'd be a pain, -and require more memory to keep around old items, but it would also save -overhead)*/ - -/* Quick intro to how the parser works: - -A 'position' is a dot in the middle of a matcher, usually represented as a -dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. - -The parser walks through the input a character at a time, maintaining a list -of items consistent with the current position in the input string: `cur_eis`. - -As it processes them, it fills up `eof_eis` with items that would be valid if -the macro invocation is now over, `bb_eis` with items that are waiting on -a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting -on the a particular token. Most of the logic concerns moving the · through the -repetitions indicated by Kleene stars. It only advances or calls out to the -real Rust parser when no `cur_eis` items remain - -Example: Start parsing `a a a a b` against [· a $( a )* a b]. - -Remaining input: `a a a a b` -next_eis: [· a $( a )* a b] - -- - - Advance over an `a`. - - - - -Remaining input: `a a a b` -cur: [a · $( a )* a b] -Descend/Skip (first item). -next: [a $( · a )* a b] [a $( a )* · a b]. - -- - - Advance over an `a`. - - - - -Remaining input: `a a b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - -- - - Advance over an `a`. - - - (this looks exactly like the last step) - -Remaining input: `a b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - -- - - Advance over an `a`. - - - (this looks exactly like the last step) - -Remaining input: `b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] - -- - - Advance over a `b`. - - - - -Remaining input: `` -eof: [a $( a )* a b ·] - - */ - +// This is an Earley-like parser, without support for in-grammar nonterminals, +// only by calling out to the main rust parser for named nonterminals (which it +// commits to fully when it hits one in a grammar). This means that there are no +// completer or predictor rules, and therefore no need to store one column per +// token: instead, there's a set of current Earley items and a set of next +// ones. Instead of NTs, we have a special case for Kleene star. The big-O, in +// pathological cases, is worse than traditional Earley parsing, but it's an +// easier fit for Macro-by-Example-style rules, and I think the overhead is +// lower. (In order to prevent the pathological case, we'd need to lazily +// construct the resulting `NamedMatch`es at the very end. It'd be a pain, +// and require more memory to keep around old items, but it would also save +// overhead) + +// Quick intro to how the parser works: +// +// A 'position' is a dot in the middle of a matcher, usually represented as a +// dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. +// +// The parser walks through the input a character at a time, maintaining a list +// of items consistent with the current position in the input string: `cur_eis`. +// +// As it processes them, it fills up `eof_eis` with items that would be valid if +// the macro invocation is now over, `bb_eis` with items that are waiting on +// a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting +// on the a particular token. Most of the logic concerns moving the · through +// the repetitions indicated by Kleene stars. It only advances or calls out to +// the real Rust parser when no `cur_eis` items remain +// +// Example: Start parsing `a a a a b` against [· a $( a )* a b]. +// +// Remaining input: `a a a a b` +// next_eis: [· a $( a )* a b] +// +// - - - Advance over an `a`. - - - +// +// Remaining input: `a a a b` +// cur: [a · $( a )* a b] +// Descend/Skip (first item). +// next: [a $( · a )* a b] [a $( a )* · a b]. +// +// - - - Advance over an `a`. - - - +// +// Remaining input: `a a b` +// cur: [a $( a · )* a b] next: [a $( a )* a · b] +// Finish/Repeat (first item) +// next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] +// +// - - - Advance over an `a`. - - - (this looks exactly like the last step) +// +// Remaining input: `a b` +// cur: [a $( a · )* a b] next: [a $( a )* a · b] +// Finish/Repeat (first item) +// next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] +// +// - - - Advance over an `a`. - - - (this looks exactly like the last step) +// +// Remaining input: `b` +// cur: [a $( a · )* a b] next: [a $( a )* a · b] +// Finish/Repeat (first item) +// next: [a $( a )* · a b] [a $( · a )* a b] +// +// - - - Advance over a `b`. - - - +// +// Remaining input: `` +// eof: [a $( a )* a b ·] -/* to avoid costly uniqueness checks, we require that `MatchSeq` always has a -nonempty body. */ +// to avoid costly uniqueness checks, we require that `MatchSeq` always has a +// nonempty body. #[deriving(Clone)] @@ -253,17 +250,17 @@ pub fn parse(sess: &ParseSess, let TokenAndSpan {tok: tok, sp: sp} = rdr.peek(); - /* we append new items to this while we go */ + // we append new items to this while we go loop { let ei = match cur_eis.pop() { - None => break, /* for each Earley Item */ + None => break, // for each Earley Item Some(ei) => ei, }; let idx = ei.idx; let len = ei.elts.len(); - /* at end of sequence */ + // at end of sequence if idx >= len { // can't move out of `match`es, so: if ei.up.is_some() { @@ -323,7 +320,7 @@ pub fn parse(sess: &ParseSess, } } else { match ei.elts.get(idx).node.clone() { - /* need to descend into sequence */ + // need to descend into sequence MatchSeq(ref matchers, ref sep, zero_ok, match_idx_lo, match_idx_hi) => { if zero_ok { @@ -364,7 +361,7 @@ pub fn parse(sess: &ParseSess, } } - /* error messages here could be improved with links to orig. rules */ + // error messages here could be improved with links to orig. rules if token_name_eq(&tok, &EOF) { if eof_eis.len() == 1u { let mut v = Vec::new(); @@ -397,7 +394,7 @@ pub fn parse(sess: &ParseSess, return Failure(sp, format!("no rules expected the token `{}`", token::to_str(&tok)).to_string()); } else if next_eis.len() > 0u { - /* Now process the next token */ + // Now process the next token while next_eis.len() > 0u { cur_eis.push(next_eis.pop().unwrap()); } diff --git a/src/libsyntax/ext/tt/transcribe.rs b/src/libsyntax/ext/tt/transcribe.rs index c0c066fe4668b..bd7c6d06b848b 100644 --- a/src/libsyntax/ext/tt/transcribe.rs +++ b/src/libsyntax/ext/tt/transcribe.rs @@ -34,18 +34,18 @@ pub struct TtReader<'a> { pub sp_diag: &'a SpanHandler, // the unzipped tree: stack: Vec, - /* for MBE-style macro transcription */ + // for MBE-style macro transcription interpolations: HashMap>, repeat_idx: Vec, repeat_len: Vec, - /* cached: */ + // cached: pub cur_tok: Token, pub cur_span: Span, } -/** This can do Macro-By-Example transcription. On the other hand, if - * `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and - * should) be none. */ +/// This can do Macro-By-Example transcription. On the other hand, if +/// `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and +/// should) be none. pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler, interp: Option>>, src: Vec ) @@ -58,17 +58,17 @@ pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler, dotdotdoted: false, sep: None, }), - interpolations: match interp { /* just a convenience */ + interpolations: match interp { // just a convenience None => HashMap::new(), Some(x) => x, }, repeat_idx: Vec::new(), repeat_len: Vec::new(), - /* dummy values, never read: */ + // dummy values, never read: cur_tok: EOF, cur_span: DUMMY_SP, }; - tt_next_token(&mut r); /* get cur_tok and cur_span set up */ + tt_next_token(&mut r); // get cur_tok and cur_span set up r } @@ -161,7 +161,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { } }; - /* done with this set; pop or repeat? */ + // done with this set; pop or repeat? if should_pop { let prev = r.stack.pop().unwrap(); match r.stack.mut_last() { @@ -177,20 +177,20 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { r.repeat_idx.pop(); r.repeat_len.pop(); } - } else { /* repeat */ + } else { // repeat *r.repeat_idx.mut_last().unwrap() += 1u; r.stack.mut_last().unwrap().idx = 0; match r.stack.last().unwrap().sep.clone() { Some(tk) => { - r.cur_tok = tk; /* repeat same span, I guess */ + r.cur_tok = tk; // repeat same span, I guess return ret_val; } None => {} } } } - loop { /* because it's easiest, this handles `TTDelim` not starting - with a `TTTok`, even though it won't happen */ + loop { // because it's easiest, this handles `TTDelim` not starting + // with a `TTTok`, even though it won't happen let t = { let frame = r.stack.last().unwrap(); // FIXME(pcwalton): Bad copy. @@ -217,7 +217,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { match lockstep_iter_size(&TTSeq(sp, tts.clone(), sep.clone(), zerok), r) { LisUnconstrained => { r.sp_diag.span_fatal( - sp.clone(), /* blame macro writer */ + sp.clone(), // blame macro writer "attempted to repeat an expression \ containing no syntax \ variables matched as repeating at this depth"); @@ -252,9 +252,9 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { TTNonterminal(sp, ident) => { r.stack.mut_last().unwrap().idx += 1; match *lookup_cur_matched(r, ident) { - /* sidestep the interpolation tricks for ident because - (a) idents can be in lots of places, so it'd be a pain - (b) we actually can, since it's a token. */ + // sidestep the interpolation tricks for ident because + // (a) idents can be in lots of places, so it'd be a pain + // (b) we actually can, since it's a token. MatchedNonterminal(NtIdent(box sn, b)) => { r.cur_span = sp; r.cur_tok = IDENT(sn,b); @@ -268,7 +268,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { } MatchedSeq(..) => { r.sp_diag.span_fatal( - r.cur_span, /* blame the macro writer */ + r.cur_span, // blame the macro writer format!("variable '{}' is still repeating at this depth", token::get_ident(ident)).as_slice()); } diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 04e6612daf1f0..c06fb19a4c78f 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -345,7 +345,7 @@ pub trait Folder { noop_fold_lifetime(l, self) } - //used in noop_fold_item and noop_fold_crate + // used in noop_fold_item and noop_fold_crate fn fold_attribute(&mut self, at: Attribute) -> Attribute { Spanned { span: self.new_span(at.span), @@ -361,9 +361,9 @@ pub trait Folder { } -/* some little folds that probably aren't useful to have in Folder itself*/ +// some little folds that probably aren't useful to have in Folder itself -//used in noop_fold_item and noop_fold_crate and noop_fold_crate_directive +// used in noop_fold_item and noop_fold_crate and noop_fold_crate_directive fn fold_meta_item_(mi: Gc, fld: &mut T) -> Gc { box(GC) Spanned { node: @@ -379,7 +379,7 @@ fn fold_meta_item_(mi: Gc, fld: &mut T) -> Gc { span: fld.new_span(mi.span) } } -//used in noop_fold_foreign_item and noop_fold_fn_decl +// used in noop_fold_foreign_item and noop_fold_fn_decl fn fold_arg_(a: &Arg, fld: &mut T) -> Arg { let id = fld.new_id(a.id); // Needs to be first, for ast_map. Arg { diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 6df91c66a25e8..53ee991385ae3 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -8,15 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The Rust parser and macro expander. - -# Note - -This API is completely unstable and subject to change. - -*/ +//! The Rust parser and macro expander. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. #![crate_id = "syntax#0.11.0"] // NOTE: remove after stage0 #![crate_name = "syntax"] diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 0f188fdf18a5a..c47c64ef626c8 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -53,7 +53,7 @@ pub struct StringReader<'a> { // The last character to be read pub curr: Option, pub filemap: Rc, - /* cached: */ + // cached: pub peek_tok: token::Token, pub peek_span: Span, } @@ -118,7 +118,7 @@ impl<'a> StringReader<'a> { col: CharPos(0), curr: Some('\n'), filemap: filemap, - /* dummy values; not read */ + // dummy values; not read peek_tok: token::EOF, peek_span: codemap::DUMMY_SP, }; @@ -636,9 +636,9 @@ impl<'a> StringReader<'a> { self.check_float_base(start_bpos, last_bpos, base); return token::LIT_FLOAT(str_to_ident(num_str.as_slice()), ast::TyF64); - /* FIXME (#2252): if this is out of range for either a - 32-bit or 64-bit float, it won't be noticed till the - back-end. */ + // FIXME (#2252): if this is out of range for either a + // 32-bit or 64-bit float, it won't be noticed till the + // back-end. } let last_bpos = self.last_pos; self.err_span_(start_bpos, last_bpos, "expected `f32` or `f64` suffix"); diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 025684ae71e8c..55dd582c3eb32 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -8,14 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Support for parsing unsupported, old syntaxes, for the -purpose of reporting errors. Parsing of these syntaxes -is tested by compile-test/obsolete-syntax.rs. - -Obsolete syntax that becomes too hard to parse can be -removed. -*/ +//! Support for parsing unsupported, old syntaxes, for the +//! purpose of reporting errors. Parsing of these syntaxes +//! is tested by compile-test/obsolete-syntax.rs. +//! +//! Obsolete syntax that becomes too hard to parse can be +//! removed. use ast::{Expr, ExprLit, LitNil}; use codemap::{Span, respan}; diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 6b6387b012786..ede119b4ad59a 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -130,9 +130,9 @@ enum ItemOrViewItem { // Possibly accept an `INTERPOLATED` expression (a pre-parsed expression // dropped into the token stream, which happens while parsing the // result of macro expansion) -/* Placement of these is not as complex as I feared it would be. -The important thing is to make sure that lookahead doesn't balk -at INTERPOLATED tokens */ +// Placement of these is not as complex as I feared it would be. +// The important thing is to make sure that lookahead doesn't balk +// at INTERPOLATED tokens macro_rules! maybe_whole_expr ( ($p:expr) => ( { @@ -284,7 +284,7 @@ struct ParsedItemsAndViewItems { foreign_items: Vec> } -/* ident is handled by common.rs */ +// ident is handled by common.rs pub struct Parser<'a> { pub sess: &'a ParseSess, @@ -985,17 +985,14 @@ impl<'a> Parser<'a> { // parse a TyBareFn type: pub fn parse_ty_bare_fn(&mut self) -> Ty_ { - /* - - [unsafe] [extern "ABI"] fn <'lt> (S) -> T - ^~~~^ ^~~~^ ^~~~^ ^~^ ^ - | | | | | - | | | | Return type - | | | Argument types - | | Lifetimes - | ABI - Function Style - */ + // [unsafe] [extern "ABI"] fn <'lt> (S) -> T + // ^~~~^ ^~~~^ ^~~~^ ^~^ ^ + // | | | | | + // | | | | Return type + // | | | Argument types + // | | Lifetimes + // | ABI + // Function Style let fn_style = self.parse_unsafety(); let abi = if self.eat_keyword(keywords::Extern) { @@ -1017,18 +1014,14 @@ impl<'a> Parser<'a> { // Parses a procedure type (`proc`). The initial `proc` keyword must // already have been parsed. pub fn parse_proc_type(&mut self) -> Ty_ { - /* - - proc <'lt> (S) [:Bounds] -> T - ^~~^ ^~~~^ ^ ^~~~~~~~^ ^ - | | | | | - | | | | Return type - | | | Bounds - | | Argument types - | Lifetimes - the `proc` keyword - - */ + // proc <'lt> (S) [:Bounds] -> T + // ^~~^ ^~~~^ ^ ^~~~~~~~^ ^ + // | | | | | + // | | | | Return type + // | | | Bounds + // | | Argument types + // | Lifetimes + // the `proc` keyword let lifetimes = if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); @@ -1065,19 +1058,15 @@ impl<'a> Parser<'a> { // parse a TyClosure type pub fn parse_ty_closure(&mut self) -> Ty_ { - /* - - [unsafe] [once] <'lt> |S| [:Bounds] -> T - ^~~~~~~^ ^~~~~^ ^~~~^ ^ ^~~~~~~~^ ^ - | | | | | | - | | | | | Return type - | | | | Closure bounds - | | | Argument types - | | Lifetimes - | Once-ness (a.k.a., affine) - Function Style - - */ + // [unsafe] [once] <'lt> |S| [:Bounds] -> T + // ^~~~~~~^ ^~~~~^ ^~~~^ ^ ^~~~~~~~^ ^ + // | | | | | | + // | | | | | Return type + // | | | | Closure bounds + // | | | Argument types + // | | Lifetimes + // | Once-ness (a.k.a., affine) + // Function Style let fn_style = self.parse_unsafety(); let onceness = if self.eat_keyword(keywords::Once) {Once} else {Many}; @@ -1157,16 +1146,13 @@ impl<'a> Parser<'a> { // parse a function type (following the 'fn') pub fn parse_ty_fn_decl(&mut self, allow_variadic: bool) -> (P, Vec) { - /* - - (fn) <'lt> (S) -> T - ^~~~^ ^~^ ^ - | | | - | | Return type - | Argument types - Lifetimes + // (fn) <'lt> (S) -> T + // ^~~~^ ^~^ ^ + // | | | + // | | Return type + // | Argument types + // Lifetimes - */ let lifetimes = if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); self.expect_gt(); @@ -1737,17 +1723,14 @@ impl<'a> Parser<'a> { } } - // matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) - // actually, it matches the empty one too, but putting that in there - // messes up the grammar.... + /// Parses zero or more comma separated lifetimes. + /// Expects each lifetime to be followed by either + /// a comma or `>`. Used when parsing type parameter + /// lists, where we expect something like `<'a, 'b, T>`. pub fn parse_lifetimes(&mut self) -> Vec { - /*! - * - * Parses zero or more comma separated lifetimes. - * Expects each lifetime to be followed by either - * a comma or `>`. Used when parsing type parameter - * lists, where we expect something like `<'a, 'b, T>`. - */ + // matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) + // actually, it matches the empty one too, but putting that in there + // messes up the grammar.... let mut res = Vec::new(); loop { @@ -2235,7 +2218,7 @@ impl<'a> Parser<'a> { p.fatal(format!("incorrect close delimiter: `{}`", token_str).as_slice()) }, - /* we ought to allow different depths of unquotation */ + // we ought to allow different depths of unquotation token::DOLLAR if p.quote_depth > 0u => { p.bump(); let sp = p.span; @@ -3276,9 +3259,9 @@ impl<'a> Parser<'a> { return box(GC) spanned(lo, hi, StmtDecl( box(GC) spanned(lo, hi, DeclItem( self.mk_item( - lo, hi, id /*id is good here*/, + lo, hi, id, // id is good here ItemMac(spanned(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT))), - Inherited, Vec::new(/*no attrs*/)))), + Inherited, Vec::new(/* no attrs */)))), ast::DUMMY_NODE_ID)); } diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index dcf37e37ff0a7..bef4ad1cb7d5d 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -41,7 +41,7 @@ pub enum BinOp { #[allow(non_camel_case_types)] #[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)] pub enum Token { - /* Expression-operator symbols. */ + // Expression-operator symbols. EQ, LT, LE, @@ -56,7 +56,7 @@ pub enum Token { BINOP(BinOp), BINOPEQ(BinOp), - /* Structural symbols */ + // Structural symbols AT, DOT, DOTDOT, @@ -77,7 +77,7 @@ pub enum Token { POUND, DOLLAR, - /* Literals */ + // Literals LIT_BYTE(u8), LIT_CHAR(char), LIT_INT(i64, ast::IntTy), @@ -86,11 +86,11 @@ pub enum Token { LIT_FLOAT(ast::Ident, ast::FloatTy), LIT_FLOAT_UNSUFFIXED(ast::Ident), LIT_STR(ast::Ident), - LIT_STR_RAW(ast::Ident, uint), /* raw str delimited by n hash symbols */ + LIT_STR_RAW(ast::Ident, uint), // raw str delimited by n hash symbols LIT_BINARY(Rc>), - LIT_BINARY_RAW(Rc>, uint), /* raw binary str delimited by n hash symbols */ + LIT_BINARY_RAW(Rc>, uint), // raw binary str delimited by n hash symbols - /* Name components */ + // Name components // an identifier contains an "is_mod_name" boolean, // indicating whether :: follows this token with no // whitespace in between. @@ -98,7 +98,7 @@ pub enum Token { UNDERSCORE, LIFETIME(ast::Ident), - /* For interpolation */ + // For interpolation INTERPOLATED(Nonterminal), DOC_COMMENT(ast::Ident), @@ -175,7 +175,7 @@ pub fn to_str(t: &Token) -> String { s } - /* Structural symbols */ + // Structural symbols AT => "@".to_string(), DOT => ".".to_string(), DOTDOT => "..".to_string(), @@ -196,7 +196,7 @@ pub fn to_str(t: &Token) -> String { POUND => "#".to_string(), DOLLAR => "$".to_string(), - /* Literals */ + // Literals LIT_BYTE(b) => { let mut res = String::from_str("b'"); (b as char).escape_default(|c| { @@ -248,14 +248,14 @@ pub fn to_str(t: &Token) -> String { delim="#".repeat(n), string=s.as_slice().to_ascii().as_str_ascii()) } - /* Name components */ + // Name components IDENT(s, _) => get_ident(s).get().to_string(), LIFETIME(s) => { format!("{}", get_ident(s)) } UNDERSCORE => "_".to_string(), - /* Other */ + // Other DOC_COMMENT(s) => get_ident(s).get().to_string(), EOF => "".to_string(), INTERPOLATED(ref nt) => { @@ -407,13 +407,11 @@ macro_rules! declare_special_idents_and_keywords {( $( pub static $si_static: Ident = Ident { name: $si_name, ctxt: 0 }; )* } - /** - * All the valid words that have meaning in the Rust language. - * - * Rust keywords are either 'strict' or 'reserved'. Strict keywords may not - * appear as identifiers at all. Reserved keywords are not used anywhere in - * the language and may not appear as identifiers. - */ + /// All the valid words that have meaning in the Rust language. + /// + /// Rust keywords are either 'strict' or 'reserved'. Strict keywords may not + /// appear as identifiers at all. Reserved keywords are not used anywhere in + /// the language and may not appear as identifiers. pub mod keywords { use ast::Ident; @@ -527,10 +525,8 @@ declare_special_idents_and_keywords! { } } -/** - * Maps a token to a record specifying the corresponding binary - * operator - */ +/// Maps a token to a record specifying the corresponding binary +/// operator pub fn token_to_binop(tok: &Token) -> Option { match *tok { BINOP(STAR) => Some(ast::BiMul), @@ -699,8 +695,8 @@ pub fn fresh_name(src: &ast::Ident) -> Name { // following: debug version. Could work in final except that it's incompatible with // good error messages and uses of struct names in ambiguous could-be-binding // locations. Also definitely destroys the guarantee given above about ptr_eq. - /*let num = rand::task_rng().gen_uint_range(0,0xffff); - gensym(format!("{}_{}",ident_to_str(src),num))*/ + // let num = rand::task_rng().gen_uint_range(0,0xffff); + // gensym(format!("{}_{}",ident_to_str(src),num)) } // create a fresh mark. diff --git a/src/libsyntax/print/pp.rs b/src/libsyntax/print/pp.rs index 24ab4b38e54b8..778e27c72d007 100644 --- a/src/libsyntax/print/pp.rs +++ b/src/libsyntax/print/pp.rs @@ -8,58 +8,56 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - * This pretty-printer is a direct reimplementation of Philip Karlton's - * Mesa pretty-printer, as described in appendix A of - * - * STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen. - * Stanford Department of Computer Science, 1979. - * - * The algorithm's aim is to break a stream into as few lines as possible - * while respecting the indentation-consistency requirements of the enclosing - * block, and avoiding breaking at silly places on block boundaries, for - * example, between "x" and ")" in "x)". - * - * I am implementing this algorithm because it comes with 20 pages of - * documentation explaining its theory, and because it addresses the set of - * concerns I've seen other pretty-printers fall down on. Weirdly. Even though - * it's 32 years old. What can I say? - * - * Despite some redundancies and quirks in the way it's implemented in that - * paper, I've opted to keep the implementation here as similar as I can, - * changing only what was blatantly wrong, a typo, or sufficiently - * non-idiomatic rust that it really stuck out. - * - * In particular you'll see a certain amount of churn related to INTEGER vs. - * CARDINAL in the Mesa implementation. Mesa apparently interconverts the two - * somewhat readily? In any case, I've used uint for indices-in-buffers and - * ints for character-sizes-and-indentation-offsets. This respects the need - * for ints to "go negative" while carrying a pending-calculation balance, and - * helps differentiate all the numbers flying around internally (slightly). - * - * I also inverted the indentation arithmetic used in the print stack, since - * the Mesa implementation (somewhat randomly) stores the offset on the print - * stack in terms of margin-col rather than col itself. I store col. - * - * I also implemented a small change in the String token, in that I store an - * explicit length for the string. For most tokens this is just the length of - * the accompanying string. But it's necessary to permit it to differ, for - * encoding things that are supposed to "go on their own line" -- certain - * classes of comment and blank-line -- where relying on adjacent - * hardbreak-like Break tokens with long blankness indication doesn't actually - * work. To see why, consider when there is a "thing that should be on its own - * line" between two long blocks, say functions. If you put a hardbreak after - * each function (or before each) and the breaking algorithm decides to break - * there anyways (because the functions themselves are long) you wind up with - * extra blank lines. If you don't put hardbreaks you can wind up with the - * "thing which should be on its own line" not getting its own line in the - * rare case of "really small functions" or such. This re-occurs with comments - * and explicit blank lines. So in those cases we use a string with a payload - * we want isolated to a line and an explicit length that's huge, surrounded - * by two zero-length breaks. The algorithm will try its best to fit it on a - * line (which it can't) and so naturally place the content on its own line to - * avoid combining it with other lines and making matters even worse. - */ +// This pretty-printer is a direct reimplementation of Philip Karlton's +// Mesa pretty-printer, as described in appendix A of +// +// STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen. +// Stanford Department of Computer Science, 1979. +// +// The algorithm's aim is to break a stream into as few lines as possible +// while respecting the indentation-consistency requirements of the enclosing +// block, and avoiding breaking at silly places on block boundaries, for +// example, between "x" and ")" in "x)". +// +// I am implementing this algorithm because it comes with 20 pages of +// documentation explaining its theory, and because it addresses the set of +// concerns I've seen other pretty-printers fall down on. Weirdly. Even though +// it's 32 years old. What can I say? +// +// Despite some redundancies and quirks in the way it's implemented in that +// paper, I've opted to keep the implementation here as similar as I can, +// changing only what was blatantly wrong, a typo, or sufficiently +// non-idiomatic rust that it really stuck out. +// +// In particular you'll see a certain amount of churn related to INTEGER vs. +// CARDINAL in the Mesa implementation. Mesa apparently interconverts the two +// somewhat readily? In any case, I've used uint for indices-in-buffers and +// ints for character-sizes-and-indentation-offsets. This respects the need +// for ints to "go negative" while carrying a pending-calculation balance, and +// helps differentiate all the numbers flying around internally (slightly). +// +// I also inverted the indentation arithmetic used in the print stack, since +// the Mesa implementation (somewhat randomly) stores the offset on the print +// stack in terms of margin-col rather than col itself. I store col. +// +// I also implemented a small change in the String token, in that I store an +// explicit length for the string. For most tokens this is just the length of +// the accompanying string. But it's necessary to permit it to differ, for +// encoding things that are supposed to "go on their own line" -- certain +// classes of comment and blank-line -- where relying on adjacent +// hardbreak-like Break tokens with long blankness indication doesn't actually +// work. To see why, consider when there is a "thing that should be on its own +// line" between two long blocks, say functions. If you put a hardbreak after +// each function (or before each) and the breaking algorithm decides to break +// there anyways (because the functions themselves are long) you wind up with +// extra blank lines. If you don't put hardbreaks you can wind up with the +// "thing which should be on its own line" not getting its own line in the +// rare case of "really small functions" or such. This re-occurs with comments +// and explicit blank lines. So in those cases we use a string with a payload +// we want isolated to a line and an explicit length that's huge, surrounded +// by two zero-length breaks. The algorithm will try its best to fit it on a +// line (which it can't) and so naturally place the content on its own line to +// avoid combining it with other lines and making matters even worse. use std::io; use std::string::String; @@ -186,83 +184,82 @@ pub fn mk_printer(out: Box, linewidth: uint) -> Printer { } -/* - * In case you do not have the paper, here is an explanation of what's going - * on. - * - * There is a stream of input tokens flowing through this printer. - * - * The printer buffers up to 3N tokens inside itself, where N is linewidth. - * Yes, linewidth is chars and tokens are multi-char, but in the worst - * case every token worth buffering is 1 char long, so it's ok. - * - * Tokens are String, Break, and Begin/End to delimit blocks. - * - * Begin tokens can carry an offset, saying "how far to indent when you break - * inside here", as well as a flag indicating "consistent" or "inconsistent" - * breaking. Consistent breaking means that after the first break, no attempt - * will be made to flow subsequent breaks together onto lines. Inconsistent - * is the opposite. Inconsistent breaking example would be, say: - * - * foo(hello, there, good, friends) - * - * breaking inconsistently to become - * - * foo(hello, there - * good, friends); - * - * whereas a consistent breaking would yield: - * - * foo(hello, - * there - * good, - * friends); - * - * That is, in the consistent-break blocks we value vertical alignment - * more than the ability to cram stuff onto a line. But in all cases if it - * can make a block a one-liner, it'll do so. - * - * Carrying on with high-level logic: - * - * The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and - * 'right' indices denote the active portion of the ring buffer as well as - * describing hypothetical points-in-the-infinite-stream at most 3N tokens - * apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch - * between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer - * and point-in-infinite-stream senses freely. - * - * There is a parallel ring buffer, 'size', that holds the calculated size of - * each token. Why calculated? Because for Begin/End pairs, the "size" - * includes everything between the pair. That is, the "size" of Begin is - * actually the sum of the sizes of everything between Begin and the paired - * End that follows. Since that is arbitrarily far in the future, 'size' is - * being rewritten regularly while the printer runs; in fact most of the - * machinery is here to work out 'size' entries on the fly (and give up when - * they're so obviously over-long that "infinity" is a good enough - * approximation for purposes of line breaking). - * - * The "input side" of the printer is managed as an abstract process called - * SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to - * manage calculating 'size'. SCAN is, in other words, the process of - * calculating 'size' entries. - * - * The "output side" of the printer is managed by an abstract process called - * PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to - * do with each token/size pair it consumes as it goes. It's trying to consume - * the entire buffered window, but can't output anything until the size is >= - * 0 (sizes are set to negative while they're pending calculation). - * - * So SCAN takes input and buffers tokens and pending calculations, while - * PRINT gobbles up completed calculations and tokens from the buffer. The - * theory is that the two can never get more than 3N tokens apart, because - * once there's "obviously" too much data to fit on a line, in a size - * calculation, SCAN will write "infinity" to the size and let PRINT consume - * it. - * - * In this implementation (following the paper, again) the SCAN process is - * the method called 'pretty_print', and the 'PRINT' process is the method - * called 'print'. - */ +// In case you do not have the paper, here is an explanation of what's going +// on. +// +// There is a stream of input tokens flowing through this printer. +// +// The printer buffers up to 3N tokens inside itself, where N is linewidth. +// Yes, linewidth is chars and tokens are multi-char, but in the worst +// case every token worth buffering is 1 char long, so it's ok. +// +// Tokens are String, Break, and Begin/End to delimit blocks. +// +// Begin tokens can carry an offset, saying "how far to indent when you break +// inside here", as well as a flag indicating "consistent" or "inconsistent" +// breaking. Consistent breaking means that after the first break, no attempt +// will be made to flow subsequent breaks together onto lines. Inconsistent +// is the opposite. Inconsistent breaking example would be, say: +// +// foo(hello, there, good, friends) +// +// breaking inconsistently to become +// +// foo(hello, there +// good, friends); +// +// whereas a consistent breaking would yield: +// +// foo(hello, +// there +// good, +// friends); +// +// That is, in the consistent-break blocks we value vertical alignment +// more than the ability to cram stuff onto a line. But in all cases if it +// can make a block a one-liner, it'll do so. +// +// Carrying on with high-level logic: +// +// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and +// 'right' indices denote the active portion of the ring buffer as well as +// describing hypothetical points-in-the-infinite-stream at most 3N tokens +// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch +// between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer +// and point-in-infinite-stream senses freely. +// +// There is a parallel ring buffer, 'size', that holds the calculated size of +// each token. Why calculated? Because for Begin/End pairs, the "size" +// includes everything between the pair. That is, the "size" of Begin is +// actually the sum of the sizes of everything between Begin and the paired +// End that follows. Since that is arbitrarily far in the future, 'size' is +// being rewritten regularly while the printer runs; in fact most of the +// machinery is here to work out 'size' entries on the fly (and give up when +// they're so obviously over-long that "infinity" is a good enough +// approximation for purposes of line breaking). +// +// The "input side" of the printer is managed as an abstract process called +// SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to +// manage calculating 'size'. SCAN is, in other words, the process of +// calculating 'size' entries. +// +// The "output side" of the printer is managed by an abstract process called +// PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to +// do with each token/size pair it consumes as it goes. It's trying to consume +// the entire buffered window, but can't output anything until the size is >= +// 0 (sizes are set to negative while they're pending calculation). +// +// So SCAN takes input and buffers tokens and pending calculations, while +// PRINT gobbles up completed calculations and tokens from the buffer. The +// theory is that the two can never get more than 3N tokens apart, because +// once there's "obviously" too much data to fit on a line, in a size +// calculation, SCAN will write "infinity" to the size and let PRINT consume +// it. +// +// In this implementation (following the paper, again) the SCAN process is +// the method called 'pretty_print', and the 'PRINT' process is the method +// called 'print'. + pub struct Printer { pub out: Box, buf_len: uint, diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 4660bb337ab23..9763dbef91375 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -1009,7 +1009,7 @@ impl<'a> State<'a> { try!(self.print_attribute(attr)); count += 1; } - _ => {/* fallthrough */ } + _ => { /* fallthrough */ } } } if count > 0 { @@ -1027,7 +1027,7 @@ impl<'a> State<'a> { try!(self.print_attribute(attr)); count += 1; } - _ => {/* fallthrough */ } + _ => { /* fallthrough */ } } } if count > 0 { @@ -1704,8 +1704,8 @@ impl<'a> State<'a> { pub fn print_pat(&mut self, pat: &ast::Pat) -> IoResult<()> { try!(self.maybe_print_comment(pat.span.lo)); try!(self.ann.pre(self, NodePat(pat))); - /* Pat isn't normalized, but the beauty of it - is that it doesn't matter */ + // Pat isn't normalized, but the beauty of it + // is that it doesn't matter match pat.node { ast::PatWild => try!(word(&mut self.s, "_")), ast::PatWildMulti => try!(word(&mut self.s, "..")), diff --git a/src/libsyntax/visit.rs b/src/libsyntax/visit.rs index 4ab064a88b795..07550a681e2f3 100644 --- a/src/libsyntax/visit.rs +++ b/src/libsyntax/visit.rs @@ -74,9 +74,8 @@ pub fn generics_of_fn(fk: &FnKind) -> Generics { /// new default implementation gets introduced.) pub trait Visitor { - fn visit_ident(&mut self, _sp: Span, _ident: Ident, _e: E) { - /*! Visit the idents */ - } + /// Visit the idents + fn visit_ident(&mut self, _sp: Span, _ident: Ident, _e: E) { } fn visit_mod(&mut self, m: &Mod, _s: Span, _n: NodeId, e: E) { walk_mod(self, m, e) } fn visit_view_item(&mut self, i: &ViewItem, e: E) { walk_view_item(self, i, e) } fn visit_foreign_item(&mut self, i: &ForeignItem, e: E) { walk_foreign_item(self, i, e) } @@ -101,26 +100,22 @@ pub trait Visitor { } fn visit_struct_field(&mut self, s: &StructField, e: E) { walk_struct_field(self, s, e) } fn visit_variant(&mut self, v: &Variant, g: &Generics, e: E) { walk_variant(self, v, g, e) } + /// Visits an optional reference to a lifetime. The `span` is + /// the span of some surrounding reference should opt_lifetime + /// be None. fn visit_opt_lifetime_ref(&mut self, _span: Span, opt_lifetime: &Option, env: E) { - /*! - * Visits an optional reference to a lifetime. The `span` is - * the span of some surrounding reference should opt_lifetime - * be None. - */ match *opt_lifetime { Some(ref l) => self.visit_lifetime_ref(l, env), None => () } } - fn visit_lifetime_ref(&mut self, _lifetime: &Lifetime, _e: E) { - /*! Visits a reference to a lifetime */ - } - fn visit_lifetime_decl(&mut self, _lifetime: &Lifetime, _e: E) { - /*! Visits a declaration of a lifetime */ - } + /// Visits a reference to a lifetime + fn visit_lifetime_ref(&mut self, _lifetime: &Lifetime, _e: E) { } + /// Visits a declaration of a lifetime + fn visit_lifetime_decl(&mut self, _lifetime: &Lifetime, _e: E) { } fn visit_explicit_self(&mut self, es: &ExplicitSelf, e: E) { walk_explicit_self(self, es, e) } diff --git a/src/libterm/terminfo/parm.rs b/src/libterm/terminfo/parm.rs index 139f1113aaf94..ec345dc1e1c23 100644 --- a/src/libterm/terminfo/parm.rs +++ b/src/libterm/terminfo/parm.rs @@ -77,17 +77,15 @@ impl Variables { } } -/** - Expand a parameterized capability - - # Arguments - * `cap` - string to expand - * `params` - vector of params for %p1 etc - * `vars` - Variables struct for %Pa etc - - To be compatible with ncurses, `vars` should be the same between calls to `expand` for - multiple capabilities for the same terminal. - */ +/// Expand a parameterized capability +/// +/// # Arguments +/// * `cap` - string to expand +/// * `params` - vector of params for %p1 etc +/// * `vars` - Variables struct for %Pa etc +/// +/// To be compatible with ncurses, `vars` should be the same between calls to `expand` for +/// multiple capabilities for the same terminal. pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result , String> { let mut state = Nothing; diff --git a/src/libtime/lib.rs b/src/libtime/lib.rs index 873cc7af7b647..450e4e5308887 100644 --- a/src/libtime/lib.rs +++ b/src/libtime/lib.rs @@ -78,14 +78,12 @@ mod imp { /// A record specifying a time value in seconds and nanoseconds. #[deriving(Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Decodable, Show)] pub struct Timespec { pub sec: i64, pub nsec: i32 } -/* - * Timespec assumes that pre-epoch Timespecs have negative sec and positive - * nsec fields. Darwin's and Linux's struct timespec functions handle pre- - * epoch timestamps using a "two steps back, one step forward" representation, - * though the man pages do not actually document this. For example, the time - * -1.2 seconds before the epoch is represented by `Timespec { sec: -2_i64, - * nsec: 800_000_000_i32 }`. - */ +// Timespec assumes that pre-epoch Timespecs have negative sec and positive +// nsec fields. Darwin's and Linux's struct timespec functions handle pre- +// epoch timestamps using a "two steps back, one step forward" representation, +// though the man pages do not actually document this. For example, the time +// -1.2 seconds before the epoch is represented by `Timespec { sec: -2_i64, +// nsec: 800_000_000_i32 }`. impl Timespec { pub fn new(sec: i64, nsec: i32) -> Timespec { assert!(nsec >= 0 && nsec < NSEC_PER_SEC); @@ -93,10 +91,8 @@ impl Timespec { } } -/** - * Returns the current time as a `timespec` containing the seconds and - * nanoseconds since 1970-01-01T00:00:00Z. - */ +/// Returns the current time as a `timespec` containing the seconds and +/// nanoseconds since 1970-01-01T00:00:00Z. pub fn get_time() -> Timespec { unsafe { let (sec, nsec) = os_get_time(); @@ -142,10 +138,8 @@ pub fn get_time() -> Timespec { } -/** - * Returns the current value of a high-resolution performance counter - * in nanoseconds since an unspecified epoch. - */ +/// Returns the current value of a high-resolution performance counter +/// in nanoseconds since an unspecified epoch. pub fn precise_time_ns() -> u64 { return os_precise_time_ns(); @@ -190,10 +184,8 @@ pub fn precise_time_ns() -> u64 { } -/** - * Returns the current value of a high-resolution performance counter - * in seconds since an unspecified epoch. - */ +/// Returns the current value of a high-resolution performance counter +/// in seconds since an unspecified epoch. pub fn precise_time_s() -> f64 { return (precise_time_ns() as f64) / 1000000000.; } @@ -317,22 +309,18 @@ impl Tm { at_utc(self.to_timespec()) } - /** - * Returns a time string formatted according to the `asctime` format in ISO - * C, in the local timezone. - * - * Example: "Thu Jan 1 00:00:00 1970" - */ + /// Returns a time string formatted according to the `asctime` format in ISO + /// C, in the local timezone. + /// + /// Example: "Thu Jan 1 00:00:00 1970" pub fn ctime(&self) -> String { self.to_local().asctime() } - /** - * Returns a time string formatted according to the `asctime` format in ISO - * C. - * - * Example: "Thu Jan 1 00:00:00 1970" - */ + /// Returns a time string formatted according to the `asctime` format in ISO + /// C. + /// + /// Example: "Thu Jan 1 00:00:00 1970" pub fn asctime(&self) -> String { self.strftime("%c") } @@ -342,12 +330,10 @@ impl Tm { strftime(format, self) } - /** - * Returns a time string formatted according to RFC 822. - * - * local: "Thu, 22 Mar 2012 07:53:18 PST" - * utc: "Thu, 22 Mar 2012 14:53:18 GMT" - */ + /// Returns a time string formatted according to RFC 822. + /// + /// local: "Thu, 22 Mar 2012 07:53:18 PST" + /// utc: "Thu, 22 Mar 2012 14:53:18 GMT" pub fn rfc822(&self) -> String { if self.tm_gmtoff == 0_i32 { self.strftime("%a, %d %b %Y %T GMT") @@ -356,23 +342,19 @@ impl Tm { } } - /** - * Returns a time string formatted according to RFC 822 with Zulu time. - * - * local: "Thu, 22 Mar 2012 07:53:18 -0700" - * utc: "Thu, 22 Mar 2012 14:53:18 -0000" - */ + /// Returns a time string formatted according to RFC 822 with Zulu time. + /// + /// local: "Thu, 22 Mar 2012 07:53:18 -0700" + /// utc: "Thu, 22 Mar 2012 14:53:18 -0000" pub fn rfc822z(&self) -> String { self.strftime("%a, %d %b %Y %T %z") } - /** - * Returns a time string formatted according to RFC 3999. RFC 3999 is - * compatible with ISO 8601. - * - * local: "2012-02-22T07:53:18-07:00" - * utc: "2012-02-22T14:53:18Z" - */ + /// Returns a time string formatted according to RFC 3999. RFC 3999 is + /// compatible with ISO 8601. + /// + /// local: "2012-02-22T07:53:18-07:00" + /// utc: "2012-02-22T14:53:18Z" pub fn rfc3339(&self) -> String { if self.tm_gmtoff == 0_i32 { self.strftime("%Y-%m-%dT%H:%M:%SZ") @@ -863,25 +845,24 @@ pub fn strptime(s: &str, format: &str) -> Result { pub fn strftime(format: &str, tm: &Tm) -> String { fn days_in_year(year: int) -> i32 { if (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)) { - 366 /* Days in a leap year */ + 366 // Days in a leap year } else { - 365 /* Days in a non-leap year */ + 365 // Days in a non-leap year } } fn iso_week_days(yday: i32, wday: i32) -> int { - /* The number of days from the first day of the first ISO week of this - * year to the year day YDAY with week day WDAY. - * ISO weeks start on Monday. The first ISO week has the year's first - * Thursday. - * YDAY may be as small as yday_minimum. - */ + // The number of days from the first day of the first ISO week of this + // year to the year day YDAY with week day WDAY. + // ISO weeks start on Monday. The first ISO week has the year's first + // Thursday. + // YDAY may be as small as yday_minimum. let yday: int = yday as int; let wday: int = wday as int; - let iso_week_start_wday: int = 1; /* Monday */ - let iso_week1_wday: int = 4; /* Thursday */ + let iso_week_start_wday: int = 1; // Monday + let iso_week1_wday: int = 4; // Thursday let yday_minimum: int = 366; - /* Add enough to the first operand of % to make it nonnegative. */ + // Add enough to the first operand of % to make it nonnegative. let big_enough_multiple_of_7: int = (yday_minimum / 7 + 2) * 7; yday - (yday - wday + iso_week1_wday + big_enough_multiple_of_7) % 7 @@ -893,14 +874,14 @@ pub fn strftime(format: &str, tm: &Tm) -> String { let mut days: int = iso_week_days (tm.tm_yday, tm.tm_wday); if days < 0 { - /* This ISO week belongs to the previous year. */ + // This ISO week belongs to the previous year. year -= 1; days = iso_week_days (tm.tm_yday + (days_in_year(year)), tm.tm_wday); } else { let d: int = iso_week_days (tm.tm_yday - (days_in_year(year)), tm.tm_wday); if 0 <= d { - /* This ISO week belongs to the next year. */ + // This ISO week belongs to the next year. year += 1; days = d; } diff --git a/src/libuuid/lib.rs b/src/libuuid/lib.rs index 426b350cab949..bc9914ecfd41c 100644 --- a/src/libuuid/lib.rs +++ b/src/libuuid/lib.rs @@ -8,51 +8,49 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Generate and parse UUIDs - -Provides support for Universally Unique Identifiers (UUIDs). A UUID is a -unique 128-bit number, stored as 16 octets. UUIDs are used to assign unique -identifiers to entities without requiring a central allocating authority. - -They are particularly useful in distributed systems, though can be used in -disparate areas, such as databases and network protocols. Typically a UUID is -displayed in a readable string form as a sequence of hexadecimal digits, -separated into groups by hyphens. - -The uniqueness property is not strictly guaranteed, however for all practical -purposes, it can be assumed that an unintentional collision would be extremely -unlikely. - -# Examples - -To create a new random (V4) UUID and print it out in hexadecimal form: - -```rust -use uuid::Uuid; - -fn main() { - let uuid1 = Uuid::new_v4(); - println!("{}", uuid1.to_str()); -} -``` - -# Strings - -Examples of string representations: - -* simple: `936DA01F9ABD4d9d80C702AF85C822A8` -* hyphenated: `550e8400-e29b-41d4-a716-446655440000` -* urn: `urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4` - -# References - -* [Wikipedia: Universally Unique Identifier]( - http://en.wikipedia.org/wiki/Universally_unique_identifier) -* [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace]( - http://tools.ietf.org/html/rfc4122) - -*/ +//! Generate and parse UUIDs +//! +//! Provides support for Universally Unique Identifiers (UUIDs). A UUID is a +//! unique 128-bit number, stored as 16 octets. UUIDs are used to assign +//! unique identifiers to entities without requiring a central allocating +//! authority. +//! +//! They are particularly useful in distributed systems, though can be used in +//! disparate areas, such as databases and network protocols. Typically a UUID +//! is displayed in a readable string form as a sequence of hexadecimal digits, +//! separated into groups by hyphens. +//! +//! The uniqueness property is not strictly guaranteed, however for all +//! practical purposes, it can be assumed that an unintentional collision would +//! be extremely unlikely. +//! +//! # Examples +//! +//! To create a new random (V4) UUID and print it out in hexadecimal form: +//! +//! ```rust +//! use uuid::Uuid; +//! +//! fn main() { +//! let uuid1 = Uuid::new_v4(); +//! println!("{}", uuid1.to_str()); +//! } +//! ``` +//! +//! # Strings +//! +//! Examples of string representations: +//! +//! * simple: `936DA01F9ABD4d9d80C702AF85C822A8` +//! * hyphenated: `550e8400-e29b-41d4-a716-446655440000` +//! * urn: `urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4` +//! +//! # References +//! +//! * [Wikipedia: Universally Unique Identifier]( +//! http://en.wikipedia.org/wiki/Universally_unique_identifier) +//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace]( +//! http://tools.ietf.org/html/rfc4122) #![crate_id = "uuid#0.11.0"] // NOTE: remove after stage0 #![crate_name = "uuid"] diff --git a/src/test/auxiliary/issue-3012-1.rs b/src/test/auxiliary/issue-3012-1.rs index dbb863da90a56..6d8a917823c2e 100644 --- a/src/test/auxiliary/issue-3012-1.rs +++ b/src/test/auxiliary/issue-3012-1.rs @@ -20,7 +20,7 @@ pub mod socket { impl Drop for socket_handle { fn drop(&mut self) { - /* c::close(self.sockfd); */ + // c::close(self.sockfd); } } diff --git a/src/test/auxiliary/kinds_in_metadata.rs b/src/test/auxiliary/kinds_in_metadata.rs index 7e090523984da..a4d7cadb727a7 100644 --- a/src/test/auxiliary/kinds_in_metadata.rs +++ b/src/test/auxiliary/kinds_in_metadata.rs @@ -8,8 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* Any copyright is dedicated to the Public Domain. - * http://creativecommons.org/publicdomain/zero/1.0/ */ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ // Tests that metadata serialization works for the `Copy` kind. diff --git a/src/test/bench/shootout-fasta.rs b/src/test/bench/shootout-fasta.rs index 4126fda00bcce..b2f973a64957a 100644 --- a/src/test/bench/shootout-fasta.rs +++ b/src/test/bench/shootout-fasta.rs @@ -8,11 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* -*- mode: rust; indent-tabs-mode: nil -*- - * Implementation of 'fasta' benchmark from - * Computer Language Benchmarks Game - * http://shootout.alioth.debian.org/ - */ +// -*- mode: rust; indent-tabs-mode: nil -*- +// Implementation of 'fasta' benchmark from +// Computer Language Benchmarks Game +// http://shootout.alioth.debian.org/ use std::io; use std::io::{BufferedWriter, File}; diff --git a/src/test/bench/shootout-pfib.rs b/src/test/bench/shootout-pfib.rs index 85f035b60cbbe..bd6c640fa5822 100644 --- a/src/test/bench/shootout-pfib.rs +++ b/src/test/bench/shootout-pfib.rs @@ -9,14 +9,11 @@ // except according to those terms. -/* - A parallel version of fibonacci numbers. - - This version is meant mostly as a way of stressing and benchmarking - the task system. It supports a lot of old command-line arguments to - control how it runs. - -*/ +// A parallel version of fibonacci numbers. +// +// This version is meant mostly as a way of stressing and benchmarking +// the task system. It supports a lot of old command-line arguments to +// control how it runs. extern crate getopts; extern crate time; diff --git a/src/test/bench/sudoku.rs b/src/test/bench/sudoku.rs index f0988bcfcf4ee..5b35bf0d776f3 100644 --- a/src/test/bench/sudoku.rs +++ b/src/test/bench/sudoku.rs @@ -66,7 +66,7 @@ impl Sudoku { } pub fn read(mut reader: BufferedReader) -> Sudoku { - /* assert first line is exactly "9,9" */ + // assert first line is exactly "9,9" assert!(reader.read_line().unwrap() == "9,9".to_string()); let mut g = Vec::from_fn(10u, { |_i| vec!(0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8) }); @@ -104,7 +104,7 @@ impl Sudoku { // solve sudoku grid pub fn solve(&mut self) { - let mut work: Vec<(u8, u8)> = Vec::new(); /* queue of uncolored fields */ + let mut work: Vec<(u8, u8)> = Vec::new(); // queue of uncolored fields for row in range(0u8, 9u8) { for col in range(0u8, 9u8) { let color = *self.grid.get(row as uint).get(col as uint); @@ -154,10 +154,10 @@ impl Sudoku { for idx in range(0u8, 9u8) { avail.remove(*self.grid .get(idx as uint) - .get(col as uint)); /* check same column fields */ + .get(col as uint)); // check same column fields avail.remove(*self.grid .get(row as uint) - .get(idx as uint)); /* check same row fields */ + .get(idx as uint)); // check same row fields } // check same block fields @@ -176,7 +176,7 @@ impl Sudoku { // Stores available colors as simple bitfield, bit 0 is always unset struct Colors(u16); -static HEADS: u16 = (1u16 << 10) - 1; /* bits 9..0 */ +static HEADS: u16 = (1u16 << 10) - 1; // bits 9..0 impl Colors { fn new(start_color: u8) -> Colors { diff --git a/src/test/run-pass/class-attributes-2.rs b/src/test/run-pass/class-attributes-2.rs index bd62f838444af..47d182c66bc2b 100644 --- a/src/test/run-pass/class-attributes-2.rs +++ b/src/test/run-pass/class-attributes-2.rs @@ -15,18 +15,14 @@ struct cat { impl Drop for cat { #[cat_dropper] - /** - Actually, cats don't always land on their feet when you drop them. - */ + /// Actually, cats don't always land on their feet when you drop them. fn drop(&mut self) { println!("{} landed on hir feet", self.name); } } #[cat_maker] -/** -Maybe it should technically be a kitten_maker. -*/ +/// Maybe it should technically be a kitten_maker. fn cat(name: String) -> cat { cat { name: name diff --git a/src/test/run-pass/explicit-self-generic.rs b/src/test/run-pass/explicit-self-generic.rs index ac2922e92d42c..e314ed11d849c 100644 --- a/src/test/run-pass/explicit-self-generic.rs +++ b/src/test/run-pass/explicit-self-generic.rs @@ -8,11 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/** - * A function that returns a hash of a value - * - * The hash should concentrate entropy in the lower bits. - */ +/// A function that returns a hash of a value +/// +/// The hash should concentrate entropy in the lower bits. type HashFn = proc(K) -> uint; type EqFn = proc(K, K) -> bool; diff --git a/src/test/run-pass/hashmap-memory.rs b/src/test/run-pass/hashmap-memory.rs index 61e5c28010d0c..3a6ddef2f6d1e 100644 --- a/src/test/run-pass/hashmap-memory.rs +++ b/src/test/run-pass/hashmap-memory.rs @@ -14,12 +14,9 @@ extern crate collections; extern crate debug; -/** - A somewhat reduced test case to expose some Valgrind issues. - - This originally came from the word-count benchmark. -*/ - +/// A somewhat reduced test case to expose some Valgrind issues. +/// +/// This originally came from the word-count benchmark. pub fn map(filename: String, emit: map_reduce::putter) { emit(filename, "1".to_string()); } diff --git a/src/test/run-pass/kinds-in-metadata.rs b/src/test/run-pass/kinds-in-metadata.rs index 233db83d289bd..b4ee590f19529 100644 --- a/src/test/run-pass/kinds-in-metadata.rs +++ b/src/test/run-pass/kinds-in-metadata.rs @@ -10,8 +10,8 @@ // aux-build:kinds_in_metadata.rs -/* Any copyright is dedicated to the Public Domain. - * http://creativecommons.org/publicdomain/zero/1.0/ */ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ // Tests that metadata serialization works for the `Copy` kind. diff --git a/src/test/run-pass/rename-directory.rs b/src/test/run-pass/rename-directory.rs index e0609782a0aa9..9f2e45f5b85ef 100644 --- a/src/test/run-pass/rename-directory.rs +++ b/src/test/run-pass/rename-directory.rs @@ -28,7 +28,7 @@ fn rename_directory() { fs::mkdir_recursive(&old_path, io::UserRWX); let test_file = &old_path.join("temp.txt"); - /* Write the temp input file */ + // Write the temp input file let ostream = test_file.with_c_str(|fromp| { "w+b".with_c_str(|modebuf| { libc::fopen(fromp, modebuf)