diff --git a/.travis.yml b/.travis.yml index b2840ac3121f8..93c2834b7d873 100644 --- a/.travis.yml +++ b/.travis.yml @@ -293,6 +293,7 @@ before_deploy: cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT; fi - travis_retry gem update --system + - ls -la deploy/$TRAVIS_COMMIT deploy: - provider: s3 diff --git a/src/Cargo.lock b/src/Cargo.lock index 0df5afe0b165c..27e7438ddfd26 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -164,6 +164,11 @@ dependencies = [ "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "byteorder" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cargo" version = "0.25.0" @@ -1020,6 +1025,14 @@ name = "log" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "log_settings" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "lzma-sys" version = "0.1.9" @@ -1598,13 +1611,16 @@ name = "rustc" version = "0.0.0" dependencies = [ "arena 0.0.0", + "backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", "jobserver 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_apfloat 0.0.0", "rustc_back 0.0.0", "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", @@ -1844,9 +1860,12 @@ name = "rustc_mir" version = "0.0.0" dependencies = [ "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc_apfloat 0.0.0", "rustc_const_eval 0.0.0", "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", @@ -2679,6 +2698,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" "checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32" +"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" "checksum cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "be1057b8462184f634c3a208ee35b0f935cfd94b694b26deadccd98732088d7b" "checksum cargo_metadata 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1f56ec3e469bca7c276f2eea015aa05c5e381356febdbb0683c2580189604537" "checksum cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9b13a57efd6b30ecd6598ebdb302cca617930b5470647570468a65d12ef9719" @@ -2749,6 +2769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75" "checksum libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "87f737ad6cc6fd6eefe3d9dc5412f1573865bded441300904d2f42269e140f16" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6" "checksum lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c1b93b78f89e8737dac81837fc8f5521ac162abcba902e1a3db949d55346d1da" "checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" "checksum markup5ever 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "047150a0e03b57e638fc45af33a0b63a0362305d5b9f92ecef81df472a4cceb0" diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index 631c9f72f3500..30afd52f44824 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -246,6 +246,9 @@ fn main() { // When running miri tests, we need to generate MIR for all libraries if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") { cmd.arg("-Zalways-encode-mir"); + if stage != "0" { + cmd.arg("-Zmiri"); + } cmd.arg("-Zmir-emit-validate=1"); } diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 888aa4449f8ee..bdc00295a2041 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -499,9 +499,10 @@ impl<'a> Builder<'a> { if mode != Mode::Tool { // Tools don't get debuginfo right now, e.g. cargo and rls don't // get compiled with debuginfo. - cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()) - .env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string()) - .env("RUSTC_FORCE_UNSTABLE", "1"); + // Adding debuginfo increases their sizes by a factor of 3-4. + cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()); + cargo.env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string()); + cargo.env("RUSTC_FORCE_UNSTABLE", "1"); // Currently the compiler depends on crates from crates.io, and // then other crates can depend on the compiler (e.g. proc-macro diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 8f3133bc9d81b..eee403dcbe3e7 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -321,6 +321,7 @@ impl Step for Rustfmt { #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Miri { + stage: u32, host: Interned, } @@ -336,6 +337,7 @@ impl Step for Miri { fn make_run(run: RunConfig) { run.builder.ensure(Miri { + stage: run.builder.top_stage, host: run.target, }); } @@ -343,8 +345,9 @@ impl Step for Miri { /// Runs `cargo test` for miri. fn run(self, builder: &Builder) { let build = builder.build; + let stage = self.stage; let host = self.host; - let compiler = builder.compiler(1, host); + let compiler = builder.compiler(stage, host); if let Some(miri) = builder.ensure(tool::Miri { compiler, target: self.host }) { let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); @@ -766,6 +769,7 @@ impl Step for Compiletest { if build.config.rust_debuginfo_tests { flags.push("-g".to_string()); } + flags.push("-Zmiri -Zunstable-options".to_string()); if let Some(linker) = build.linker(target) { cmd.arg("--linker").arg(linker); diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 0b62e1bd5afbf..a8892cb22101a 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -16,6 +16,7 @@ graphviz = { path = "../libgraphviz" } jobserver = "0.1" log = "0.3" owning_ref = "0.3.3" +rustc_apfloat = { path = "../librustc_apfloat" } rustc_back = { path = "../librustc_back" } rustc_const_math = { path = "../librustc_const_math" } rustc_data_structures = { path = "../librustc_data_structures" } @@ -23,6 +24,9 @@ rustc_errors = { path = "../librustc_errors" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +backtrace = "0.3.3" +byteorder = { version = "1.1", features = ["i128"]} + # Note that these dependencies are a lie, they're just here to get linkage to # work. diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 06c9995663e68..bf7484156a64a 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -64,6 +64,7 @@ #![feature(unboxed_closures)] #![feature(underscore_lifetimes)] #![feature(trace_macros)] +#![feature(catch_expr)] #![feature(test)] #![recursion_limit="512"] @@ -89,6 +90,10 @@ extern crate jobserver; extern crate serialize as rustc_serialize; // used by deriving +extern crate rustc_apfloat; +extern crate byteorder; +extern crate backtrace; + // Note that librustc doesn't actually depend on these crates, see the note in // `Cargo.toml` for this crate about why these are here. #[allow(unused_extern_crates)] diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs new file mode 100644 index 0000000000000..9ebfe25c107a9 --- /dev/null +++ b/src/librustc/mir/interpret/error.rs @@ -0,0 +1,322 @@ +use std::error::Error; +use std::{fmt, env}; + +use mir; +use ty::{FnSig, Ty, layout}; + +use super::{ + MemoryPointer, Lock, AccessKind +}; + +use rustc_const_math::ConstMathErr; +use syntax::codemap::Span; +use backtrace::Backtrace; + +#[derive(Debug)] +pub struct EvalError<'tcx> { + pub kind: EvalErrorKind<'tcx>, + pub backtrace: Option, +} + +impl<'tcx> From> for EvalError<'tcx> { + fn from(kind: EvalErrorKind<'tcx>) -> Self { + let backtrace = match env::var("RUST_BACKTRACE") { + Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()), + _ => None + }; + EvalError { + kind, + backtrace, + } + } +} + +#[derive(Debug)] +pub enum EvalErrorKind<'tcx> { + /// This variant is used by machines to signal their own errors that do not + /// match an existing variant + MachineError(Box), + FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), + NoMirFor(String), + UnterminatedCString(MemoryPointer), + DanglingPointerDeref, + DoubleFree, + InvalidMemoryAccess, + InvalidFunctionPointer, + InvalidBool, + InvalidDiscriminant, + PointerOutOfBounds { + ptr: MemoryPointer, + access: bool, + allocation_size: u64, + }, + InvalidNullPointerUsage, + ReadPointerAsBytes, + ReadBytesAsPointer, + InvalidPointerMath, + ReadUndefBytes, + DeadLocal, + InvalidBoolOp(mir::BinOp), + Unimplemented(String), + DerefFunctionPointer, + ExecuteMemory, + ArrayIndexOutOfBounds(Span, u64, u64), + Math(Span, ConstMathErr), + Intrinsic(String), + OverflowingMath, + InvalidChar(u128), + OutOfMemory { + allocation_size: u64, + memory_size: u64, + memory_usage: u64, + }, + ExecutionTimeLimitReached, + StackFrameLimitReached, + OutOfTls, + TlsOutOfBounds, + AbiViolation(String), + AlignmentCheckFailed { + required: u64, + has: u64, + }, + MemoryLockViolation { + ptr: MemoryPointer, + len: u64, + frame: usize, + access: AccessKind, + lock: Lock, + }, + MemoryAcquireConflict { + ptr: MemoryPointer, + len: u64, + kind: AccessKind, + lock: Lock, + }, + InvalidMemoryLockRelease { + ptr: MemoryPointer, + len: u64, + frame: usize, + lock: Lock, + }, + DeallocatedLockedMemory { + ptr: MemoryPointer, + lock: Lock, + }, + ValidationFailure(String), + CalledClosureAsFunction, + VtableForArgumentlessMethod, + ModifiedConstantMemory, + AssumptionNotHeld, + InlineAsm, + TypeNotPrimitive(Ty<'tcx>), + ReallocatedWrongMemoryKind(String, String), + DeallocatedWrongMemoryKind(String, String), + ReallocateNonBasePtr, + DeallocateNonBasePtr, + IncorrectAllocationInformation(u64, usize, u64, u64), + Layout(layout::LayoutError<'tcx>), + HeapAllocZeroBytes, + HeapAllocNonPowerOfTwoAlignment(u64), + Unreachable, + Panic, + ReadFromReturnPointer, + PathNotFound(Vec), + UnimplementedTraitSelection, + /// Abort in case type errors are reached + TypeckError, +} + +pub type EvalResult<'tcx, T = ()> = Result>; + +impl<'tcx> Error for EvalError<'tcx> { + fn description(&self) -> &str { + use self::EvalErrorKind::*; + match self.kind { + MachineError(ref inner) => inner.description(), + FunctionPointerTyMismatch(..) => + "tried to call a function through a function pointer of a different type", + InvalidMemoryAccess => + "tried to access memory through an invalid pointer", + DanglingPointerDeref => + "dangling pointer was dereferenced", + DoubleFree => + "tried to deallocate dangling pointer", + InvalidFunctionPointer => + "tried to use a function pointer after offsetting it", + InvalidBool => + "invalid boolean value read", + InvalidDiscriminant => + "invalid enum discriminant value read", + PointerOutOfBounds { .. } => + "pointer offset outside bounds of allocation", + InvalidNullPointerUsage => + "invalid use of NULL pointer", + MemoryLockViolation { .. } => + "memory access conflicts with lock", + MemoryAcquireConflict { .. } => + "new memory lock conflicts with existing lock", + ValidationFailure(..) => + "type validation failed", + InvalidMemoryLockRelease { .. } => + "invalid attempt to release write lock", + DeallocatedLockedMemory { .. } => + "tried to deallocate memory in conflict with a lock", + ReadPointerAsBytes => + "a raw memory access tried to access part of a pointer value as raw bytes", + ReadBytesAsPointer => + "a memory access tried to interpret some bytes as a pointer", + InvalidPointerMath => + "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations", + ReadUndefBytes => + "attempted to read undefined bytes", + DeadLocal => + "tried to access a dead local variable", + InvalidBoolOp(_) => + "invalid boolean operation", + Unimplemented(ref msg) => msg, + DerefFunctionPointer => + "tried to dereference a function pointer", + ExecuteMemory => + "tried to treat a memory pointer as a function pointer", + ArrayIndexOutOfBounds(..) => + "array index out of bounds", + Math(..) => + "mathematical operation failed", + Intrinsic(..) => + "intrinsic failed", + OverflowingMath => + "attempted to do overflowing math", + NoMirFor(..) => + "mir not found", + InvalidChar(..) => + "tried to interpret an invalid 32-bit value as a char", + OutOfMemory{..} => + "could not allocate more memory", + ExecutionTimeLimitReached => + "reached the configured maximum execution time", + StackFrameLimitReached => + "reached the configured maximum number of stack frames", + OutOfTls => + "reached the maximum number of representable TLS keys", + TlsOutOfBounds => + "accessed an invalid (unallocated) TLS key", + AbiViolation(ref msg) => msg, + AlignmentCheckFailed{..} => + "tried to execute a misaligned read or write", + CalledClosureAsFunction => + "tried to call a closure through a function pointer", + VtableForArgumentlessMethod => + "tried to call a vtable function without arguments", + ModifiedConstantMemory => + "tried to modify constant memory", + AssumptionNotHeld => + "`assume` argument was false", + InlineAsm => + "miri does not support inline assembly", + TypeNotPrimitive(_) => + "expected primitive type, got nonprimitive", + ReallocatedWrongMemoryKind(_, _) => + "tried to reallocate memory from one kind to another", + DeallocatedWrongMemoryKind(_, _) => + "tried to deallocate memory of the wrong kind", + ReallocateNonBasePtr => + "tried to reallocate with a pointer not to the beginning of an existing object", + DeallocateNonBasePtr => + "tried to deallocate with a pointer not to the beginning of an existing object", + IncorrectAllocationInformation(..) => + "tried to deallocate or reallocate using incorrect alignment or size", + Layout(_) => + "rustc layout computation failed", + UnterminatedCString(_) => + "attempted to get length of a null terminated string, but no null found before end of allocation", + HeapAllocZeroBytes => + "tried to re-, de- or allocate zero bytes on the heap", + HeapAllocNonPowerOfTwoAlignment(_) => + "tried to re-, de-, or allocate heap memory with alignment that is not a power of two", + Unreachable => + "entered unreachable code", + Panic => + "the evaluated program panicked", + ReadFromReturnPointer => + "tried to read from the return pointer", + EvalErrorKind::PathNotFound(_) => + "a path could not be resolved, maybe the crate is not loaded", + UnimplementedTraitSelection => + "there were unresolved type arguments during trait selection", + TypeckError => + "encountered constants with type errors, stopping evaluation", + } + } + + fn cause(&self) -> Option<&Error> { + use self::EvalErrorKind::*; + match self.kind { + MachineError(ref inner) => Some(&**inner), + _ => None, + } + } +} + +impl<'tcx> fmt::Display for EvalError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::EvalErrorKind::*; + match self.kind { + PointerOutOfBounds { ptr, access, allocation_size } => { + write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}", + if access { "memory access" } else { "pointer computed" }, + ptr.offset, ptr.alloc_id, allocation_size) + }, + MemoryLockViolation { ptr, len, frame, access, ref lock } => { + write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}", + access, frame, ptr, len, lock) + } + MemoryAcquireConflict { ptr, len, kind, ref lock } => { + write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}", + kind, ptr, len, lock) + } + InvalidMemoryLockRelease { ptr, len, frame, ref lock } => { + write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}", + frame, ptr, len, lock) + } + DeallocatedLockedMemory { ptr, ref lock } => { + write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}", + ptr, lock) + } + ValidationFailure(ref err) => { + write!(f, "type validation failed: {}", err) + } + NoMirFor(ref func) => write!(f, "no mir for `{}`", func), + FunctionPointerTyMismatch(sig, got) => + write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got), + ArrayIndexOutOfBounds(span, len, index) => + write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span), + ReallocatedWrongMemoryKind(ref old, ref new) => + write!(f, "tried to reallocate memory from {} to {}", old, new), + DeallocatedWrongMemoryKind(ref old, ref new) => + write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new), + Math(span, ref err) => + write!(f, "{:?} at {:?}", err, span), + Intrinsic(ref err) => + write!(f, "{}", err), + InvalidChar(c) => + write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), + OutOfMemory { allocation_size, memory_size, memory_usage } => + write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory", + allocation_size, memory_size - memory_usage, memory_size), + AlignmentCheckFailed { required, has } => + write!(f, "tried to access memory with alignment {}, but alignment {} is required", + has, required), + TypeNotPrimitive(ty) => + write!(f, "expected primitive type, got {}", ty), + Layout(ref err) => + write!(f, "rustc layout computation failed: {:?}", err), + PathNotFound(ref path) => + write!(f, "Cannot find path {:?}", path), + MachineError(ref inner) => + write!(f, "machine error: {}", inner), + IncorrectAllocationInformation(size, size2, align, align2) => + write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size, align, size2, align2), + _ => write!(f, "{}", self.description()), + } + } +} diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs new file mode 100644 index 0000000000000..c5d2ec1668c82 --- /dev/null +++ b/src/librustc/mir/interpret/mod.rs @@ -0,0 +1,270 @@ +//! An interpreter for MIR used in CTFE and by miri + +#[macro_export] +macro_rules! err { + ($($tt:tt)*) => { Err($crate::mir::interpret::EvalErrorKind::$($tt)*.into()) }; +} + +mod error; +mod value; + +pub use self::error::{EvalError, EvalResult, EvalErrorKind}; + +pub use self::value::{PrimVal, PrimValKind, Value, Pointer, PtrAndAlign, bytes_to_f32, bytes_to_f64}; + +use std::collections::BTreeMap; +use ty::layout::HasDataLayout; +use std::fmt; +use ty::layout; +use mir; +use ty; +use middle::region; +use std::iter; + +#[derive(Clone, Debug, PartialEq)] +pub enum Lock { + NoLock, + WriteLock(DynamicLifetime), + /// This should never be empty -- that would be a read lock held and nobody there to release it... + ReadLock(Vec), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct DynamicLifetime { + pub frame: usize, + pub region: Option, // "None" indicates "until the function ends" +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum AccessKind { + Read, + Write, +} + +/// Uniquely identifies a specific constant or static. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct GlobalId<'tcx> { + /// For a constant or static, the `Instance` of the item itself. + /// For a promoted global, the `Instance` of the function they belong to. + pub instance: ty::Instance<'tcx>, + + /// The index for promoted globals within their function's `Mir`. + pub promoted: Option, +} + +//////////////////////////////////////////////////////////////////////////////// +// Pointer arithmetic +//////////////////////////////////////////////////////////////////////////////// + +pub trait PointerArithmetic: layout::HasDataLayout { + // These are not supposed to be overriden. + + //// Trunace the given value to the pointer size; also return whether there was an overflow + fn truncate_to_ptr(self, val: u128) -> (u64, bool) { + let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits(); + ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) + } + + // Overflow checking only works properly on the range from -u64 to +u64. + fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + self.overflowing_offset(val, i as u64) + } + } + + fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { + let (res, over1) = val.overflowing_add(i); + let (res, over2) = self.truncate_to_ptr(res as u128); + (res, over1 || over2) + } + + fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_signed_offset(val, i as i128); + if over { err!(OverflowingMath) } else { Ok(res) } + } + + fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_offset(val, i); + if over { err!(OverflowingMath) } else { Ok(res) } + } + + fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { + self.overflowing_signed_offset(val, i as i128).0 + } +} + +impl PointerArithmetic for T {} + + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct MemoryPointer { + pub alloc_id: AllocId, + pub offset: u64, +} + +impl<'tcx> MemoryPointer { + pub fn new(alloc_id: AllocId, offset: u64) -> Self { + MemoryPointer { alloc_id, offset } + } + + pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { + MemoryPointer::new( + self.alloc_id, + cx.data_layout().wrapping_signed_offset(self.offset, i), + ) + } + + pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().signed_offset(self.offset, i)?, + )) + } + + pub fn overflowing_offset(self, i: u64, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset, i); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().offset(self.offset, i)?, + )) + } +} + + +#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] +pub struct AllocId(pub u64); + +impl fmt::Display for AllocId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Debug, Eq, PartialEq, Hash)] +pub struct Allocation { + /// The actual bytes of the allocation. + /// Note that the bytes of a pointer represent the offset of the pointer + pub bytes: Vec, + /// Maps from byte addresses to allocations. + /// Only the first byte of a pointer is inserted into the map. + pub relocations: BTreeMap, + /// Denotes undefined memory. Reading from undefined memory is forbidden in miri + pub undef_mask: UndefMask, + /// The alignment of the allocation to detect unaligned reads. + pub align: u64, +} + +impl Allocation { + pub fn from_bytes(slice: &[u8]) -> Self { + let mut undef_mask = UndefMask::new(0); + undef_mask.grow(slice.len() as u64, true); + Self { + bytes: slice.to_owned(), + relocations: BTreeMap::new(), + undef_mask, + align: 1, + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct UndefMask { + blocks: Vec, + len: u64, +} + +impl UndefMask { + pub fn new(size: u64) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: 0, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + pub fn is_range_defined(&self, start: u64, end: u64) -> bool { + if end > self.len { + return false; + } + for i in start..end { + if !self.get(i) { + return false; + } + } + true + } + + pub fn set_range(&mut self, start: u64, end: u64, new_state: bool) { + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(start, end, new_state); + } + + pub fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) { + for i in start..end { + self.set(i, new_state); + } + } + + pub fn get(&self, i: u64) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + pub fn set(&mut self, i: u64, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + pub fn grow(&mut self, amount: u64, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len; + if amount > unused_trailing_bits { + let additional_blocks = amount / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +fn bit_index(bits: u64) -> (usize, usize) { + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs new file mode 100644 index 0000000000000..33b177b60a81b --- /dev/null +++ b/src/librustc/mir/interpret/value.rs @@ -0,0 +1,350 @@ +#![allow(unknown_lints)] + +use ty::layout::HasDataLayout; + +use super::{EvalResult, MemoryPointer, PointerArithmetic}; +use syntax::ast::FloatTy; +use rustc_const_math::ConstFloat; + +#[derive(Copy, Clone, Debug)] +pub struct PtrAndAlign { + pub ptr: Pointer, + /// Remember whether this place is *supposed* to be aligned. + pub aligned: bool, +} + +impl PtrAndAlign { + pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> { + self.ptr.to_ptr() + } + pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + Ok(PtrAndAlign { + ptr: self.ptr.offset(i, cx)?, + aligned: self.aligned, + }) + } +} + +pub fn bytes_to_f32(bits: u128) -> ConstFloat { + ConstFloat { + bits, + ty: FloatTy::F32, + } +} + +pub fn bytes_to_f64(bits: u128) -> ConstFloat { + ConstFloat { + bits, + ty: FloatTy::F64, + } +} + +/// A `Value` represents a single self-contained Rust value. +/// +/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve +/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember +/// whether the pointer is supposed to be aligned or not (also see Place). +/// +/// For optimization of a few very common cases, there is also a representation for a pair of +/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's trans. +#[derive(Clone, Copy, Debug)] +pub enum Value { + ByRef(PtrAndAlign), + ByVal(PrimVal), + ByValPair(PrimVal, PrimVal), +} + +/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally. +/// This type clears up a few APIs where having a `PrimVal` argument for something that is +/// potentially an integer pointer or a pointer to an allocation was unclear. +/// +/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just +/// the representation of pointers. Also all the sites that convert between primvals and pointers +/// are explicit now (and rare!) +#[derive(Clone, Copy, Debug)] +pub struct Pointer { + primval: PrimVal, +} + +impl<'tcx> Pointer { + pub fn null() -> Self { + PrimVal::Bytes(0).into() + } + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + self.primval.to_ptr() + } + pub fn into_inner_primval(self) -> PrimVal { + self.primval + } + + pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from( + PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128), + )) + } + PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from( + PrimVal::Bytes(layout.offset(b as u64, i)? as u128), + )) + } + PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from(PrimVal::Bytes( + layout.wrapping_signed_offset(b as u64, i) as u128, + ))) + } + PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn is_null(self) -> EvalResult<'tcx, bool> { + match self.primval { + PrimVal::Bytes(b) => Ok(b == 0), + PrimVal::Ptr(_) => Ok(false), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn to_value_with_len(self, len: u64) -> Value { + Value::ByValPair(self.primval, PrimVal::from_u128(len as u128)) + } + + pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value { + Value::ByValPair(self.primval, PrimVal::Ptr(vtable)) + } + + pub fn to_value(self) -> Value { + Value::ByVal(self.primval) + } +} + +impl ::std::convert::From for Pointer { + fn from(primval: PrimVal) -> Self { + Pointer { primval } + } +} + +impl ::std::convert::From for Pointer { + fn from(ptr: MemoryPointer) -> Self { + PrimVal::Ptr(ptr).into() + } +} + +/// A `PrimVal` represents an immediate, primitive value existing outside of a +/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in +/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes +/// of a simple value, a pointer into another `Allocation`, or be undefined. +#[derive(Clone, Copy, Debug)] +pub enum PrimVal { + /// The raw bytes of a simple value. + Bytes(u128), + + /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of + /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `MemoryPointer` here. + Ptr(MemoryPointer), + + /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe + /// to copy around, just like undefined bytes in an `Allocation`. + Undef, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PrimValKind { + I8, I16, I32, I64, I128, + U8, U16, U32, U64, U128, + F32, F64, + Ptr, FnPtr, + Bool, + Char, +} + +impl<'a, 'tcx: 'a> Value { + #[inline] + pub fn by_ref(ptr: Pointer) -> Self { + Value::ByRef(PtrAndAlign { ptr, aligned: true }) + } +} + +impl<'tcx> PrimVal { + pub fn from_u128(n: u128) -> Self { + PrimVal::Bytes(n) + } + + pub fn from_i128(n: i128) -> Self { + PrimVal::Bytes(n as u128) + } + + pub fn from_float(f: ConstFloat) -> Self { + PrimVal::Bytes(f.bits) + } + + pub fn from_bool(b: bool) -> Self { + PrimVal::Bytes(b as u128) + } + + pub fn from_char(c: char) -> Self { + PrimVal::Bytes(c as u128) + } + + pub fn to_bytes(self) -> EvalResult<'tcx, u128> { + match self { + PrimVal::Bytes(b) => Ok(b), + PrimVal::Ptr(_) => err!(ReadPointerAsBytes), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + match self { + PrimVal::Bytes(_) => err!(ReadBytesAsPointer), + PrimVal::Ptr(p) => Ok(p), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn is_bytes(self) -> bool { + match self { + PrimVal::Bytes(_) => true, + _ => false, + } + } + + pub fn is_ptr(self) -> bool { + match self { + PrimVal::Ptr(_) => true, + _ => false, + } + } + + pub fn is_undef(self) -> bool { + match self { + PrimVal::Undef => true, + _ => false, + } + } + + pub fn to_u128(self) -> EvalResult<'tcx, u128> { + self.to_bytes() + } + + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.to_bytes().map(|b| { + assert_eq!(b as u64 as u128, b); + b as u64 + }) + } + + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.to_bytes().map(|b| { + assert_eq!(b as i32 as u128, b); + b as i32 + }) + } + + pub fn to_i128(self) -> EvalResult<'tcx, i128> { + self.to_bytes().map(|b| b as i128) + } + + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.to_bytes().map(|b| { + assert_eq!(b as i64 as u128, b); + b as i64 + }) + } + + pub fn to_f32(self) -> EvalResult<'tcx, ConstFloat> { + self.to_bytes().map(bytes_to_f32) + } + + pub fn to_f64(self) -> EvalResult<'tcx, ConstFloat> { + self.to_bytes().map(bytes_to_f64) + } + + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + match self.to_bytes()? { + 0 => Ok(false), + 1 => Ok(true), + _ => err!(InvalidBool), + } + } +} + +impl PrimValKind { + pub fn is_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true, + _ => false, + } + } + + pub fn is_signed_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 => true, + _ => false, + } + } + + pub fn is_float(self) -> bool { + use self::PrimValKind::*; + match self { + F32 | F64 => true, + _ => false, + } + } + + pub fn from_uint_size(size: u64) -> Self { + match size { + 1 => PrimValKind::U8, + 2 => PrimValKind::U16, + 4 => PrimValKind::U32, + 8 => PrimValKind::U64, + 16 => PrimValKind::U128, + _ => bug!("can't make uint with size {}", size), + } + } + + pub fn from_int_size(size: u64) -> Self { + match size { + 1 => PrimValKind::I8, + 2 => PrimValKind::I16, + 4 => PrimValKind::I32, + 8 => PrimValKind::I64, + 16 => PrimValKind::I128, + _ => bug!("can't make int with size {}", size), + } + } + + pub fn is_ptr(self) -> bool { + use self::PrimValKind::*; + match self { + Ptr | FnPtr => true, + _ => false, + } + } +} diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 64e601ab1e734..c61e776c6157c 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -43,6 +43,7 @@ mod cache; pub mod tcx; pub mod visit; pub mod traversal; +pub mod interpret; /// Types for locals type LocalDecls<'tcx> = IndexVec>; diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 0dcd3e8081080..e6138b34c8084 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1141,6 +1141,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print some statistics about MIR"), always_encode_mir: bool = (false, parse_bool, [TRACKED], "encode MIR of all functions into the crate metadata"), + miri: bool = (false, parse_bool, [TRACKED], + "check the miri const evaluator against the old ctfe"), osx_rpath_install_name: bool = (false, parse_bool, [TRACKED], "pass `-install_name @rpath/...` to the macOS linker"), sanitizer: Option = (None, parse_sanitizer, [TRACKED], diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index ce05acb01b001..2b264566415e2 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -30,9 +30,10 @@ use middle::cstore::EncodedMetadata; use middle::lang_items; use middle::resolve_lifetime::{self, ObjectLifetimeDefault}; use middle::stability; -use mir::Mir; +use mir::{Mir, interpret}; use ty::subst::{Kind, Substs}; use ty::ReprOptions; +use ty::Instance; use traits; use ty::{self, Ty, TypeAndMut}; use ty::{TyS, TypeVariants, Slice}; @@ -87,6 +88,8 @@ pub struct GlobalArenas<'tcx> { steal_mir: TypedArena>>, mir: TypedArena>, tables: TypedArena>, + /// miri allocations + const_allocs: TypedArena, } impl<'tcx> GlobalArenas<'tcx> { @@ -99,6 +102,7 @@ impl<'tcx> GlobalArenas<'tcx> { steal_mir: TypedArena::new(), mir: TypedArena::new(), tables: TypedArena::new(), + const_allocs: TypedArena::new(), } } } @@ -849,6 +853,8 @@ pub struct GlobalCtxt<'tcx> { stability_interner: RefCell>, + pub interpret_interner: RefCell>, + layout_interner: RefCell>, /// A vector of every trait accessible in the whole crate @@ -868,6 +874,104 @@ pub struct GlobalCtxt<'tcx> { output_filenames: Arc, } +/// Everything needed to efficiently work with interned allocations +#[derive(Debug, Default)] +pub struct InterpretInterner<'tcx> { + /// Stores the value of constants (and deduplicates the actual memory) + allocs: FxHashSet<&'tcx interpret::Allocation>, + + /// Allows obtaining function instance handles via a unique identifier + functions: FxHashMap>, + + /// Inverse map of `interpret_functions`. + /// Used so we don't allocate a new pointer every time we need one + function_cache: FxHashMap, u64>, + + /// Allows obtaining const allocs via a unique identifier + alloc_by_id: FxHashMap, + + /// The AllocId to assign to the next new regular allocation. + /// Always incremented, never gets smaller. + next_id: u64, + + /// Allows checking whether a constant already has an allocation + /// + /// The pointers are to the beginning of an `alloc_by_id` allocation + alloc_cache: FxHashMap, interpret::PtrAndAlign>, + + /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate + /// allocations for string and bytestring literals. + literal_alloc_cache: FxHashMap, u64>, +} + +impl<'tcx> InterpretInterner<'tcx> { + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> u64 { + if let Some(&alloc_id) = self.function_cache.get(&instance) { + return alloc_id; + } + let id = self.reserve(); + debug!("creating fn ptr: {}", id); + self.functions.insert(id, instance); + self.function_cache.insert(instance, id); + id + } + + pub fn get_fn( + &self, + id: u64, + ) -> Option> { + self.functions.get(&id).cloned() + } + + pub fn get_alloc( + &self, + id: u64, + ) -> Option<&'tcx interpret::Allocation> { + self.alloc_by_id.get(&id).cloned() + } + + pub fn get_cached( + &self, + global_id: interpret::GlobalId<'tcx>, + ) -> Option { + self.alloc_cache.get(&global_id).cloned() + } + + pub fn cache( + &mut self, + global_id: interpret::GlobalId<'tcx>, + ptr: interpret::PtrAndAlign, + ) { + if let Some(old) = self.alloc_cache.insert(global_id, ptr) { + bug!("tried to cache {:?}, but was already existing as {:#?}", global_id, old); + } + } + + pub fn intern_at_reserved( + &mut self, + id: u64, + alloc: &'tcx interpret::Allocation, + ) { + if let Some(old) = self.alloc_by_id.insert(id, alloc) { + bug!("tried to intern allocation at {}, but was already existing as {:#?}", id, old); + } + } + + /// obtains a new allocation ID that can be referenced but does not + /// yet have an allocation backing it. + pub fn reserve( + &mut self, + ) -> u64 { + let next = self.next_id; + self.next_id = self.next_id + .checked_add(1) + .expect("You overflowed a u64 by incrementing by 1... \ + You've just earned yourself a free drink if we ever meet. \ + Seriously, how did you do that?!"); + next + } +} + impl<'tcx> GlobalCtxt<'tcx> { /// Get the global TyCtxt. pub fn global_tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { @@ -935,6 +1039,41 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + pub fn intern_const_alloc( + self, + alloc: interpret::Allocation, + ) -> &'gcx interpret::Allocation { + if let Some(alloc) = self.interpret_interner.borrow().allocs.get(&alloc) { + return alloc; + } + + let interned = self.global_arenas.const_allocs.alloc(alloc); + if let Some(prev) = self.interpret_interner.borrow_mut().allocs.replace(interned) { + bug!("Tried to overwrite interned Allocation: {:#?}", prev) + } + interned + } + + /// Allocates a byte or string literal for `mir::interpret` + pub fn allocate_cached(self, bytes: &[u8]) -> u64 { + // check whether we already allocated this literal or a constant with the same memory + if let Some(&alloc_id) = self.interpret_interner.borrow().literal_alloc_cache.get(bytes) { + return alloc_id; + } + // create an allocation that just contains these bytes + let alloc = interpret::Allocation::from_bytes(bytes); + let alloc = self.intern_const_alloc(alloc); + + let mut int = self.interpret_interner.borrow_mut(); + // the next unique id + let id = int.reserve(); + // make the allocation identifiable + int.alloc_by_id.insert(id, alloc); + // cache it for the future + int.literal_alloc_cache.insert(bytes.to_owned(), id); + id + } + pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { if let Some(st) = self.stability_interner.borrow().get(&stab) { return st; @@ -1108,6 +1247,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { layout_depth: Cell::new(0), derive_macros: RefCell::new(NodeMap()), stability_interner: RefCell::new(FxHashSet()), + interpret_interner: Default::default(), all_traits: RefCell::new(None), tx_to_llvm_workers: tx, output_filenames: Arc::new(output_filenames.clone()), @@ -1554,6 +1694,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { println!("Substs interner: #{}", self.interners.substs.borrow().len()); println!("Region interner: #{}", self.interners.region.borrow().len()); println!("Stability interner: #{}", self.stability_interner.borrow().len()); + println!("Interpret interner: #{}", self.interpret_interner.borrow().allocs.len()); println!("Layout interner: #{}", self.layout_interner.borrow().len()); } } diff --git a/src/librustc/ty/instance.rs b/src/librustc/ty/instance.rs index 177c25ac5dba0..61b19227744c1 100644 --- a/src/librustc/ty/instance.rs +++ b/src/librustc/ty/instance.rs @@ -180,20 +180,20 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { debug!("resolve(def_id={:?}, substs={:?}) = {:?}", def_id, substs, result); result } -} -fn resolve_closure<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId, - substs: ty::ClosureSubsts<'tcx>, - requested_kind: ty::ClosureKind) --> Instance<'tcx> -{ - let actual_kind = substs.closure_kind(def_id, tcx); + pub fn resolve_closure( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + requested_kind: ty::ClosureKind) + -> Instance<'tcx> + { + let actual_kind = substs.closure_kind(def_id, tcx); - match needs_fn_once_adapter_shim(actual_kind, requested_kind) { - Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), - _ => Instance::new(def_id, substs.substs) + match needs_fn_once_adapter_shim(actual_kind, requested_kind) { + Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), + _ => Instance::new(def_id, substs.substs) + } } } @@ -202,8 +202,8 @@ fn resolve_associated_item<'a, 'tcx>( trait_item: &ty::AssociatedItem, param_env: ty::ParamEnv<'tcx>, trait_id: DefId, - rcvr_substs: &'tcx Substs<'tcx> - ) -> Option> { + rcvr_substs: &'tcx Substs<'tcx>, +) -> Option> { let def_id = trait_item.def_id; debug!("resolve_associated_item(trait_item={:?}, \ trait_id={:?}, \ @@ -230,7 +230,7 @@ fn resolve_associated_item<'a, 'tcx>( } traits::VtableClosure(closure_data) => { let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap(); - Some(resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs, + Some(Instance::resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs, trait_closure_kind)) } traits::VtableFnPointer(ref data) => { diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index eb4db6365cc57..81cd63b5407c3 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -682,35 +682,3 @@ impl<'a, 'tcx> ConstContext<'a, 'tcx> { compare_const_vals(tcx, span, &a.val, &b.val) } } - -pub(crate) fn const_eval<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> EvalResult<'tcx> { - let (def_id, substs) = if let Some(resolved) = lookup_const_by_id(tcx, key) { - resolved - } else { - return Err(ConstEvalErr { - span: tcx.def_span(key.value.0), - kind: TypeckError - }); - }; - - let tables = tcx.typeck_tables_of(def_id); - let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) { - let body_id = tcx.hir.body_owned_by(id); - - // Do match-check before building MIR - if tcx.check_match(def_id).is_err() { - return Err(ConstEvalErr { - span: tcx.def_span(key.value.0), - kind: CheckMatchError, - }); - } - - tcx.mir_const_qualif(def_id); - tcx.hir.body(body_id) - } else { - tcx.extern_const_body(def_id).body - }; - ConstContext::new(tcx, key.param_env.and(substs), tables).eval(&body.value) -} diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs index d4110f0091aeb..9d636b48bd0c5 100644 --- a/src/librustc_const_eval/lib.rs +++ b/src/librustc_const_eval/lib.rs @@ -50,7 +50,6 @@ use rustc::ty::maps::Providers; pub fn provide(providers: &mut Providers) { *providers = Providers { - const_eval: eval::const_eval, check_match: check_match::check_match, ..*providers }; diff --git a/src/librustc_mir/Cargo.toml b/src/librustc_mir/Cargo.toml index b7a576babeb67..40ea4e1801b26 100644 --- a/src/librustc_mir/Cargo.toml +++ b/src/librustc_mir/Cargo.toml @@ -12,6 +12,7 @@ crate-type = ["dylib"] bitflags = "1.0" graphviz = { path = "../libgraphviz" } log = "0.3" +log_settings = "0.1.1" rustc = { path = "../librustc" } rustc_const_eval = { path = "../librustc_const_eval" } rustc_const_math = { path = "../librustc_const_math" } @@ -20,3 +21,5 @@ rustc_errors = { path = "../librustc_errors" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +byteorder = { version = "1.1", features = ["i128"] } +rustc_apfloat = { path = "../librustc_apfloat" } diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs new file mode 100644 index 0000000000000..6f4a28fb28f01 --- /dev/null +++ b/src/librustc_mir/interpret/cast.rs @@ -0,0 +1,133 @@ +use rustc::ty::Ty; +use syntax::ast::{FloatTy, IntTy, UintTy}; + +use rustc_const_math::ConstFloat; +use super::{EvalContext, Machine}; +use rustc::mir::interpret::{PrimVal, EvalResult, MemoryPointer, PointerArithmetic}; +use rustc_apfloat::ieee::{Single, Double}; +use rustc_apfloat::Float; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub(super) fn cast_primval( + &self, + val: PrimVal, + src_ty: Ty<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty); + let src_kind = self.ty_to_primval_kind(src_ty)?; + + match val { + PrimVal::Undef => Ok(PrimVal::Undef), + PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty), + val @ PrimVal::Bytes(_) => { + use rustc::mir::interpret::PrimValKind::*; + match src_kind { + F32 => self.cast_from_float(val.to_f32()?, dest_ty), + F64 => self.cast_from_float(val.to_f64()?, dest_ty), + + I8 | I16 | I32 | I64 | I128 => { + self.cast_from_signed_int(val.to_i128()?, dest_ty) + } + + Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => { + self.cast_from_int(val.to_u128()?, dest_ty, false) + } + } + } + } + } + + fn cast_from_signed_int(&self, val: i128, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + self.cast_from_int(val as u128, ty, val < 0) + } + + fn int_to_int(&self, v: i128, ty: IntTy) -> u128 { + match ty { + IntTy::I8 => v as i8 as u128, + IntTy::I16 => v as i16 as u128, + IntTy::I32 => v as i32 as u128, + IntTy::I64 => v as i64 as u128, + IntTy::I128 => v as u128, + IntTy::Is => { + let ty = self.tcx.sess.target.isize_ty; + self.int_to_int(v, ty) + } + } + } + fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 { + match ty { + UintTy::U8 => v as u8 as u128, + UintTy::U16 => v as u16 as u128, + UintTy::U32 => v as u32 as u128, + UintTy::U64 => v as u64 as u128, + UintTy::U128 => v, + UintTy::Us => { + let ty = self.tcx.sess.target.usize_ty; + self.int_to_uint(v, ty) + } + } + } + + fn cast_from_int( + &self, + v: u128, + ty: Ty<'tcx>, + negative: bool, + ) -> EvalResult<'tcx, PrimVal> { + trace!("cast_from_int: {}, {}, {}", v, ty, negative); + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casts to bool are not permitted by rustc, no need to handle them here. + TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))), + TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))), + + TyFloat(fty) if negative => Ok(PrimVal::Bytes(ConstFloat::from_i128(v as i128, fty).bits)), + TyFloat(fty) => Ok(PrimVal::Bytes(ConstFloat::from_u128(v, fty).bits)), + + TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)), + TyChar => err!(InvalidChar(v)), + + // No alignment check needed for raw pointers. But we have to truncate to target ptr size. + TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)), + + _ => err!(Unimplemented(format!("int to {:?} cast", ty))), + } + } + + fn cast_from_float(&self, val: ConstFloat, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + TyUint(t) => { + let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8); + match val.ty { + FloatTy::F32 => Ok(PrimVal::Bytes(Single::from_bits(val.bits).to_u128(width).value)), + FloatTy::F64 => Ok(PrimVal::Bytes(Double::from_bits(val.bits).to_u128(width).value)), + } + }, + + TyInt(t) => { + let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8); + match val.ty { + FloatTy::F32 => Ok(PrimVal::from_i128(Single::from_bits(val.bits).to_i128(width).value)), + FloatTy::F64 => Ok(PrimVal::from_i128(Double::from_bits(val.bits).to_i128(width).value)), + } + }, + + TyFloat(fty) => Ok(PrimVal::from_float(val.convert(fty))), + _ => err!(Unimplemented(format!("float to {:?} cast", ty))), + } + } + + fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here. + TyRawPtr(_) | + TyInt(IntTy::Is) | + TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)), + TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes), + _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), + } + } +} diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs new file mode 100644 index 0000000000000..a78cd8477617f --- /dev/null +++ b/src/librustc_mir/interpret/const_eval.rs @@ -0,0 +1,587 @@ +use rustc::ty::{self, TyCtxt, Ty, Instance}; +use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::subst::Substs; +use rustc::hir::def_id::DefId; +use rustc::mir; +use rustc::middle::const_val::ErrKind::{CheckMatchError, TypeckError}; +use rustc::middle::const_val::{ConstEvalErr, ConstVal}; +use rustc_const_eval::{lookup_const_by_id, ConstContext}; +use rustc::mir::Field; +use rustc_data_structures::indexed_vec::Idx; + +use syntax::ast::Mutability; +use syntax::codemap::Span; + +use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, PrimVal, PtrAndAlign}; +use super::{Place, PlaceExtra, EvalContext, StackPopCleanup, ValTy, HasMemory}; + +use rustc_const_math::ConstInt; + +use std::fmt; +use std::error::Error; + +pub fn eval_body<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, + param_env: ty::ParamEnv<'tcx>, +) -> (EvalResult<'tcx, (PtrAndAlign, Ty<'tcx>)>, EvalContext<'a, 'tcx, CompileTimeEvaluator>) { + debug!("eval_body: {:?}, {:?}", instance, param_env); + let limits = super::ResourceLimits::default(); + let mut ecx = EvalContext::new(tcx, param_env, limits, CompileTimeEvaluator, ()); + let cid = GlobalId { + instance, + promoted: None, + }; + + let try = (|| { + if ecx.tcx.has_attr(instance.def_id(), "linkage") { + return Err(ConstEvalError::NotConst("extern global".to_string()).into()); + } + // FIXME(eddyb) use `Instance::ty` when it becomes available. + let instance_ty = + ecx.monomorphize(instance.def.def_ty(tcx), instance.substs); + if tcx.interpret_interner.borrow().get_cached(cid).is_none() { + let mir = ecx.load_mir(instance.def)?; + let layout = ecx.layout_of(instance_ty)?; + assert!(!layout.is_unsized()); + let ptr = ecx.memory.allocate( + layout.size.bytes(), + layout.align.abi(), + None, + )?; + tcx.interpret_interner.borrow_mut().cache( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned: !layout.is_packed(), + }, + ); + let cleanup = StackPopCleanup::MarkStatic(Mutability::Immutable); + let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id())); + trace!("const_eval: pushing stack frame for global: {}", name); + ecx.push_stack_frame( + instance, + mir.span, + mir, + Place::from_ptr(ptr), + cleanup.clone(), + )?; + + while ecx.step()? {} + + // reinsert the stack frame so any future queries have the correct substs + ecx.push_stack_frame( + instance, + mir.span, + mir, + Place::from_ptr(ptr), + cleanup, + )?; + } + let value = tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached"); + Ok((value, instance_ty)) + })(); + (try, ecx) +} + +pub fn eval_body_as_integer<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + instance: Instance<'tcx>, +) -> EvalResult<'tcx, ConstInt> { + let (ptr_ty, ecx) = eval_body(tcx, instance, param_env); + let (ptr, ty) = ptr_ty?; + let prim = match ecx.read_maybe_aligned(ptr.aligned, |ectx| ectx.try_read_value(ptr.ptr, ty))? { + Some(Value::ByVal(prim)) => prim.to_bytes()?, + _ => return err!(TypeNotPrimitive(ty)), + }; + use syntax::ast::{IntTy, UintTy}; + use rustc::ty::TypeVariants::*; + use rustc_const_math::{ConstIsize, ConstUsize}; + Ok(match ty.sty { + TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8), + TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16), + TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32), + TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64), + TyInt(IntTy::I128) => ConstInt::I128(prim as i128), + TyInt(IntTy::Is) => ConstInt::Isize( + ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty) + .expect("miri should already have errored"), + ), + TyUint(UintTy::U8) => ConstInt::U8(prim as u8), + TyUint(UintTy::U16) => ConstInt::U16(prim as u16), + TyUint(UintTy::U32) => ConstInt::U32(prim as u32), + TyUint(UintTy::U64) => ConstInt::U64(prim as u64), + TyUint(UintTy::U128) => ConstInt::U128(prim), + TyUint(UintTy::Us) => ConstInt::Usize( + ConstUsize::new(prim as u64, tcx.sess.target.usize_ty) + .expect("miri should already have errored"), + ), + _ => { + return Err( + ConstEvalError::NeedsRfc( + "evaluating anything other than isize/usize during typeck".to_string(), + ).into(), + ) + } + }) +} + +pub struct CompileTimeEvaluator; + +impl<'tcx> Into> for ConstEvalError { + fn into(self) -> EvalError<'tcx> { + EvalErrorKind::MachineError(Box::new(self)).into() + } +} + +#[derive(Clone, Debug)] +enum ConstEvalError { + NeedsRfc(String), + NotConst(String), +} + +impl fmt::Display for ConstEvalError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::ConstEvalError::*; + match *self { + NeedsRfc(ref msg) => { + write!( + f, + "\"{}\" needs an rfc before being allowed inside constants", + msg + ) + } + NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg), + } + } +} + +impl Error for ConstEvalError { + fn description(&self) -> &str { + use self::ConstEvalError::*; + match *self { + NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants", + NotConst(_) => "this feature is not compatible with constant evaluation", + } + } + + fn cause(&self) -> Option<&Error> { + None + } +} + +impl<'tcx> super::Machine<'tcx> for CompileTimeEvaluator { + type MemoryData = (); + type MemoryKinds = !; + fn eval_fn_call<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + destination: Option<(Place, mir::BasicBlock)>, + _args: &[ValTy<'tcx>], + span: Span, + _sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + debug!("eval_fn_call: {:?}", instance); + if !ecx.tcx.is_const_fn(instance.def_id()) { + return Err( + ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), + ); + } + let mir = match ecx.load_mir(instance.def) { + Ok(mir) => mir, + Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => { + // some simple things like `malloc` might get accepted in the future + return Err( + ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)) + .into(), + ); + } + Err(other) => return Err(other), + }; + let (return_place, return_to_block) = match destination { + Some((place, block)) => (place, StackPopCleanup::Goto(block)), + None => (Place::undef(), StackPopCleanup::None), + }; + + ecx.push_stack_frame( + instance, + span, + mir, + return_place, + return_to_block, + )?; + + Ok(false) + } + + + fn call_intrinsic<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + _args: &[ValTy<'tcx>], + dest: Place, + dest_layout: layout::TyLayout<'tcx>, + target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + let substs = instance.substs; + + let intrinsic_name = &ecx.tcx.item_name(instance.def_id())[..]; + match intrinsic_name { + "min_align_of" => { + let elem_ty = substs.type_at(0); + let elem_align = ecx.layout_of(elem_ty)?.align.abi(); + let align_val = PrimVal::from_u128(elem_align as u128); + ecx.write_primval(dest, align_val, dest_layout.ty)?; + } + + "size_of" => { + let ty = substs.type_at(0); + let size = ecx.layout_of(ty)?.size.bytes() as u128; + ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?; + } + + name => return Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into()), + } + + ecx.goto_block(target); + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + Ok(()) + } + + fn try_ptr_op<'a>( + _ecx: &EvalContext<'a, 'tcx, Self>, + _bin_op: mir::BinOp, + left: PrimVal, + _left_ty: Ty<'tcx>, + right: PrimVal, + _right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { + if left.is_bytes() && right.is_bytes() { + Ok(None) + } else { + Err( + ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(), + ) + } + } + + fn mark_static_initialized(m: !) -> EvalResult<'tcx> { + m + } + + fn box_alloc<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _ty: Ty<'tcx>, + _dest: Place, + ) -> EvalResult<'tcx> { + Err( + ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(), + ) + } + + fn global_item_with_linkage<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _instance: ty::Instance<'tcx>, + _mutability: Mutability, + ) -> EvalResult<'tcx> { + Err( + ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(), + ) + } +} + +pub fn const_eval_provider<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>, +) -> ::rustc::middle::const_val::EvalResult<'tcx> { + trace!("const eval: {:?}", key); + let (def_id, substs) = if let Some(resolved) = lookup_const_by_id(tcx, key) { + resolved + } else { + return Err(ConstEvalErr { + span: tcx.def_span(key.value.0), + kind: TypeckError + }); + }; + + let tables = tcx.typeck_tables_of(def_id); + let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) { + let body_id = tcx.hir.body_owned_by(id); + + // Do match-check before building MIR + if tcx.check_match(def_id).is_err() { + return Err(ConstEvalErr { + span: tcx.def_span(key.value.0), + kind: CheckMatchError, + }); + } + + tcx.mir_const_qualif(def_id); + tcx.hir.body(body_id) + } else { + tcx.extern_const_body(def_id).body + }; + + // do not continue into miri if typeck errors occurred + // it will fail horribly + if tables.tainted_by_errors { + return Err(ConstEvalErr { span: body.value.span, kind: TypeckError }) + } + + trace!("running old const eval"); + let old_result = ConstContext::new(tcx, key.param_env.and(substs), tables).eval(&body.value); + trace!("old const eval produced {:?}", old_result); + if tcx.sess.opts.debugging_opts.miri { + let instance = ty::Instance::new(def_id, substs); + trace!("const eval instance: {:?}, {:?}", instance, key.param_env); + let miri_result = ::interpret::eval_body(tcx, instance, key.param_env); + match (miri_result, old_result) { + ((Err(err), ecx), Ok(ok)) => { + trace!("miri failed, ctfe returned {:?}", ok); + tcx.sess.span_warn( + tcx.def_span(key.value.0), + "miri failed to eval, while ctfe succeeded", + ); + let () = unwrap_miri(&ecx, Err(err)); + Ok(ok) + }, + ((Ok(_), _), Err(err)) => { + Err(err) + }, + ((Err(_), _), Err(err)) => Err(err), + ((Ok((miri_val, miri_ty)), mut ecx), Ok(ctfe)) => { + check_ctfe_against_miri(&mut ecx, miri_val, miri_ty, ctfe.val); + Ok(ctfe) + } + } + } else { + old_result + } +} + +fn check_ctfe_against_miri<'a, 'tcx>( + ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>, + miri_val: PtrAndAlign, + miri_ty: Ty<'tcx>, + ctfe: ConstVal<'tcx>, +) { + use rustc::middle::const_val::ConstAggregate::*; + use rustc_const_math::ConstFloat; + use rustc::ty::TypeVariants::*; + match miri_ty.sty { + TyInt(int_ty) => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let prim = get_prim(ecx, value); + let c = ConstInt::new_signed_truncating(prim as i128, + int_ty, + ecx.tcx.sess.target.isize_ty); + let c = ConstVal::Integral(c); + assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe); + }, + TyUint(uint_ty) => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let prim = get_prim(ecx, value); + let c = ConstInt::new_unsigned_truncating(prim, + uint_ty, + ecx.tcx.sess.target.usize_ty); + let c = ConstVal::Integral(c); + assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe); + }, + TyFloat(ty) => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let prim = get_prim(ecx, value); + let f = ConstVal::Float(ConstFloat { bits: prim, ty }); + assert_eq!(f, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", f, ctfe); + }, + TyBool => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let bits = get_prim(ecx, value); + if bits > 1 { + bug!("miri evaluated to {}, but expected a bool {:?}", bits, ctfe); + } + let b = ConstVal::Bool(bits == 1); + assert_eq!(b, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", b, ctfe); + }, + TyChar => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let bits = get_prim(ecx, value); + if let Some(cm) = ::std::char::from_u32(bits as u32) { + assert_eq!( + ConstVal::Char(cm), ctfe, + "miri evaluated to {:?}, but expected {:?}", cm, ctfe, + ); + } else { + bug!("miri evaluated to {}, but expected a char {:?}", bits, ctfe); + } + }, + TyStr => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + if let Ok(Some(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len)))) = value { + let bytes = ecx + .memory + .read_bytes(ptr.into(), len as u64) + .expect("bad miri memory for str"); + if let Ok(s) = ::std::str::from_utf8(bytes) { + if let ConstVal::Str(s2) = ctfe { + assert_eq!(s, s2, "miri produced {:?}, but expected {:?}", s, s2); + } else { + bug!("miri produced {:?}, but expected {:?}", s, ctfe); + } + } else { + bug!( + "miri failed to produce valid utf8 {:?}, while ctfe produced {:?}", + bytes, + ctfe, + ); + } + } else { + bug!("miri evaluated to {:?}, but expected a str {:?}", value, ctfe); + } + }, + TyArray(elem_ty, n) => { + let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + let size = ecx.layout_of(elem_ty).unwrap().size.bytes(); + let vec: Vec<(ConstVal, Ty<'tcx>)> = match ctfe { + ConstVal::ByteStr(arr) => arr.data.iter().map(|&b| { + (ConstVal::Integral(ConstInt::U8(b)), ecx.tcx.types.u8) + }).collect(), + ConstVal::Aggregate(Array(v)) => { + v.iter().map(|c| (c.val, c.ty)).collect() + }, + ConstVal::Aggregate(Repeat(v, n)) => { + vec![(v.val, v.ty); n as usize] + }, + _ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe), + }; + for (i, elem) in vec.into_iter().enumerate() { + assert!((i as u64) < n); + let ptr = miri_val.offset(size * i as u64, &ecx).unwrap(); + check_ctfe_against_miri(ecx, ptr, elem_ty, elem.0); + } + }, + TyTuple(..) => { + let vec = match ctfe { + ConstVal::Aggregate(Tuple(v)) => v, + _ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe), + }; + let layout = ecx.layout_of(miri_ty).unwrap(); + for (i, elem) in vec.into_iter().enumerate() { + let offset = layout.fields.offset(i); + let ptr = miri_val.offset(offset.bytes(), &ecx).unwrap(); + check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val); + } + }, + TyAdt(def, _) => { + let (struct_variant, extra) = if def.is_enum() { + let discr = ecx.read_discriminant_value( + Place::Ptr { ptr: miri_val, extra: PlaceExtra::None }, + miri_ty).unwrap(); + let variant = def.discriminants(ecx.tcx).position(|variant_discr| { + variant_discr.to_u128_unchecked() == discr + }).expect("miri produced invalid enum discriminant"); + (&def.variants[variant], PlaceExtra::DowncastVariant(variant)) + } else { + (def.struct_variant(), PlaceExtra::None) + }; + let vec = match ctfe { + ConstVal::Aggregate(Struct(v)) => v, + ConstVal::Variant(did) => { + assert_eq!(struct_variant.fields.len(), 0); + assert_eq!(did, struct_variant.did); + return; + }, + ctfe => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe), + }; + let layout = ecx.layout_of(miri_ty).unwrap(); + for &(name, elem) in vec.into_iter() { + let field = struct_variant.fields.iter().position(|f| f.name == name).unwrap(); + let (place, _) = ecx.place_field( + Place::Ptr { ptr: miri_val, extra }, + Field::new(field), + layout, + ).unwrap(); + let ptr = place.to_ptr_extra_aligned().0; + check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val); + } + }, + TySlice(_) => bug!("miri produced a slice?"), + // not supported by ctfe + TyRawPtr(_) | + TyRef(..) => {} + TyDynamic(..) => bug!("miri produced a trait object"), + TyClosure(..) => bug!("miri produced a closure"), + TyGenerator(..) => bug!("miri produced a generator"), + TyNever => bug!("miri produced a value of the never type"), + TyProjection(_) => bug!("miri produced a projection"), + TyAnon(..) => bug!("miri produced an impl Trait type"), + TyParam(_) => bug!("miri produced an unmonomorphized type"), + TyInfer(_) => bug!("miri produced an uninferred type"), + TyError => bug!("miri produced a type error"), + TyForeign(_) => bug!("miri produced an extern type"), + // should be fine + TyFnDef(..) => {} + TyFnPtr(_) => { + let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| { + ectx.try_read_value(miri_val.ptr, miri_ty) + }); + let ptr = match value { + Ok(Some(Value::ByVal(PrimVal::Ptr(ptr)))) => ptr, + value => bug!("expected fn ptr, got {:?}", value), + }; + let inst = ecx.memory.get_fn(ptr).unwrap(); + match ctfe { + ConstVal::Function(did, substs) => { + let ctfe = ty::Instance::resolve( + ecx.tcx, + ecx.param_env, + did, + substs, + ).unwrap(); + assert_eq!(inst, ctfe, "expected fn ptr {:?}, but got {:?}", ctfe, inst); + }, + _ => bug!("ctfe produced {:?}, but miri produced function {:?}", ctfe, inst), + } + }, + } +} + +fn get_prim<'a, 'tcx>( + ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>, + res: Result, EvalError<'tcx>>, +) -> u128 { + match res { + Ok(Some(Value::ByVal(prim))) => unwrap_miri(ecx, prim.to_bytes()), + Err(err) => unwrap_miri(ecx, Err(err)), + val => bug!("got {:?}", val), + } +} + +fn unwrap_miri<'a, 'tcx, T>( + ecx: &EvalContext<'a, 'tcx, CompileTimeEvaluator>, + res: Result>, +) -> T { + match res { + Ok(val) => val, + Err(mut err) => { + ecx.report(&mut err); + ecx.tcx.sess.abort_if_errors(); + bug!("{:#?}", err); + } + } +} diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs new file mode 100644 index 0000000000000..6b33fd246daa8 --- /dev/null +++ b/src/librustc_mir/interpret/eval_context.rs @@ -0,0 +1,1739 @@ +use std::collections::HashSet; +use std::fmt::Write; + +use rustc::hir::def_id::DefId; +use rustc::hir::map::definitions::DefPathData; +use rustc::middle::const_val::ConstVal; +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::{self, Size, Align, HasDataLayout, LayoutOf, TyLayout}; +use rustc::ty::subst::{Subst, Substs, Kind}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc_data_structures::indexed_vec::Idx; +use syntax::codemap::{self, DUMMY_SP}; +use syntax::ast::Mutability; +use rustc::mir::interpret::{ + PtrAndAlign, GlobalId, Value, Pointer, PrimVal, PrimValKind, + EvalError, EvalResult, EvalErrorKind, MemoryPointer, +}; + +use super::{Place, PlaceExtra, Memory, + HasMemory, MemoryKind, operator, + Machine}; + +pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> { + /// Stores the `Machine` instance. + pub machine: M, + + /// The results of the type checker, from rustc. + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + + /// Bounds in scope for polymorphic evaluations. + pub param_env: ty::ParamEnv<'tcx>, + + /// The virtual memory system. + pub memory: Memory<'a, 'tcx, M>, + + /// The virtual call stack. + pub(crate) stack: Vec>, + + /// The maximum number of stack frames allowed + pub(crate) stack_limit: usize, + + /// The maximum number of operations that may be executed. + /// This prevents infinite loops and huge computations from freezing up const eval. + /// Remove once halting problem is solved. + pub(crate) steps_remaining: u64, +} + +/// A stack frame. +pub struct Frame<'tcx> { + //////////////////////////////////////////////////////////////////////////////// + // Function and callsite information + //////////////////////////////////////////////////////////////////////////////// + /// The MIR for the function called on this frame. + pub mir: &'tcx mir::Mir<'tcx>, + + /// The def_id and substs of the current function + pub instance: ty::Instance<'tcx>, + + /// The span of the call site. + pub span: codemap::Span, + + //////////////////////////////////////////////////////////////////////////////// + // Return place and locals + //////////////////////////////////////////////////////////////////////////////// + /// The block to return to when returning from the current stack frame + pub return_to_block: StackPopCleanup, + + /// The location where the result of the current stack frame should be written to. + pub return_place: Place, + + /// The list of locals for this stack frame, stored in order as + /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option`s. + /// `None` represents a local that is currently dead, while a live local + /// can either directly contain `PrimVal` or refer to some part of an `Allocation`. + /// + /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`. + pub locals: Vec>, + + //////////////////////////////////////////////////////////////////////////////// + // Current position within the function + //////////////////////////////////////////////////////////////////////////////// + /// The block that is currently executed (or will be executed after the above call stacks + /// return). + pub block: mir::BasicBlock, + + /// The index of the currently evaluated statment. + pub stmt: usize, +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum StackPopCleanup { + /// The stackframe existed to compute the initial value of a static/constant, make sure it + /// isn't modifyable afterwards in case of constants. + /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through + /// references or deallocated + MarkStatic(Mutability), + /// A regular stackframe added due to a function call will need to get forwarded to the next + /// block + Goto(mir::BasicBlock), + /// The main function and diverging functions have nowhere to return to + None, +} + +#[derive(Copy, Clone, Debug)] +pub struct ResourceLimits { + pub memory_size: u64, + pub step_limit: u64, + pub stack_limit: usize, +} + +impl Default for ResourceLimits { + fn default() -> Self { + ResourceLimits { + memory_size: 100 * 1024 * 1024, // 100 MB + step_limit: 1_000_000, + stack_limit: 100, + } + } +} + +#[derive(Copy, Clone, Debug)] +pub struct TyAndPacked<'tcx> { + pub ty: Ty<'tcx>, + pub packed: bool, +} + +#[derive(Copy, Clone, Debug)] +pub struct ValTy<'tcx> { + pub value: Value, + pub ty: Ty<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { + type Target = Value; + fn deref(&self) -> &Value { + &self.value + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> HasDataLayout for &'a EvalContext<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &layout::TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> HasDataLayout + for &'c &'b mut EvalContext<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &layout::TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'tcx, M> { + #[inline] + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.tcx + } +} + +impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasTyCtxt<'tcx> + for &'c &'b mut EvalContext<'a, 'tcx, M> { + #[inline] + fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> { + self.tcx + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> LayoutOf> for &'a EvalContext<'a, 'tcx, M> { + type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + (self.tcx, self.param_env).layout_of(ty) + .map_err(|layout| EvalErrorKind::Layout(layout).into()) + } +} + +impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> LayoutOf> + for &'c &'b mut EvalContext<'a, 'tcx, M> { + type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; + + #[inline] + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + (&**self).layout_of(ty) + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn new( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + limits: ResourceLimits, + machine: M, + memory_data: M::MemoryData, + ) -> Self { + EvalContext { + machine, + tcx, + param_env, + memory: Memory::new(tcx, limits.memory_size, memory_data), + stack: Vec::new(), + stack_limit: limits.stack_limit, + steps_remaining: limits.step_limit, + } + } + + pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + let layout = self.layout_of(ty)?; + assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); + + let size = layout.size.bytes(); + let align = layout.align.abi(); + self.memory.allocate(size, align, Some(MemoryKind::Stack)) + } + + pub fn memory(&self) -> &Memory<'a, 'tcx, M> { + &self.memory + } + + pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + &mut self.memory + } + + pub fn stack(&self) -> &[Frame<'tcx>] { + &self.stack + } + + #[inline] + pub fn cur_frame(&self) -> usize { + assert!(self.stack.len() > 0); + self.stack.len() - 1 + } + + pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + let ptr = self.memory.allocate_cached(s.as_bytes()); + Ok(Value::ByValPair( + PrimVal::Ptr(ptr), + PrimVal::from_u128(s.len() as u128), + )) + } + + pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> { + use rustc::middle::const_val::ConstVal::*; + + let primval = match *const_val { + Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()), + + Float(val) => PrimVal::Bytes(val.bits), + + Bool(b) => PrimVal::from_bool(b), + Char(c) => PrimVal::from_char(c), + + Str(ref s) => return self.str_to_value(s), + + ByteStr(ref bs) => { + let ptr = self.memory.allocate_cached(bs.data); + PrimVal::Ptr(ptr) + } + + Unevaluated(def_id, substs) => { + let instance = self.resolve(def_id, substs)?; + let cid = GlobalId { + instance, + promoted: None, + }; + return Ok(Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("static/const not cached"))); + } + + Aggregate(..) | + Variant(_) => bug!("should not have aggregate or variant constants in MIR"), + // function items are zero sized and thus have no readable value + Function(..) => PrimVal::Undef, + }; + + Ok(Value::ByVal(primval)) + } + + pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> { + let substs = self.tcx.trans_apply_param_substs(self.substs(), &substs); + ty::Instance::resolve( + self.tcx, + self.param_env, + def_id, + substs, + ).ok_or(EvalErrorKind::TypeckError.into()) // turn error prop into a panic to expose associated type in const issue + } + + pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + ty.is_sized(self.tcx, self.param_env, DUMMY_SP) + } + + pub fn load_mir( + &self, + instance: ty::InstanceDef<'tcx>, + ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { + // do not continue if typeck errors occurred (can only occur in local crate) + let did = instance.def_id(); + if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors { + return err!(TypeckError); + } + trace!("load mir {:?}", instance); + match instance { + ty::InstanceDef::Item(def_id) => { + self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| { + EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into() + }) + } + _ => Ok(self.tcx.instance_mir(instance)), + } + } + + pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + // miri doesn't care about lifetimes, and will choke on some crazy ones + // let's simply get rid of them + let without_lifetimes = self.tcx.erase_regions(&ty); + let substituted = without_lifetimes.subst(self.tcx, substs); + let substituted = self.tcx.fully_normalize_monormophic_ty(&substituted); + substituted + } + + /// Return the size and aligment of the value at the given type. + /// Note that the value does not matter if the type is sized. For unsized types, + /// the value has to be a fat pointer, and we only care about the "extra" data in it. + pub fn size_and_align_of_dst( + &mut self, + ty: Ty<'tcx>, + value: Value, + ) -> EvalResult<'tcx, (Size, Align)> { + let layout = self.layout_of(ty)?; + if !layout.is_unsized() { + Ok(layout.size_and_align()) + } else { + match ty.sty { + ty::TyAdt(..) | ty::TyTuple(..) => { + // First get the size of all statically known fields. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. + assert!(!ty.is_simd()); + debug!("DST {} layout: {:?}", ty, layout); + + let sized_size = layout.fields.offset(layout.fields.count() - 1); + let sized_align = layout.align; + debug!( + "DST {} statically sized prefix size: {:?} align: {:?}", + ty, + sized_size, + sized_align + ); + + // Recurse to get the size of the dynamically sized field (must be + // the last field). + let field_ty = layout.field(&self, layout.fields.count() - 1)?.ty; + let (unsized_size, unsized_align) = + self.size_and_align_of_dst(field_ty, value)?; + + // FIXME (#26403, #27023): We should be adding padding + // to `sized_size` (to accommodate the `unsized_align` + // required of the unsized field that follows) before + // summing it with `sized_size`. (Note that since #26403 + // is unfixed, we do not yet add the necessary padding + // here. But this is where the add would go.) + + // Return the sum of sizes and max of aligns. + let size = sized_size + unsized_size; + + // Choose max of two known alignments (combined value must + // be aligned according to more restrictive of the two). + let align = sized_align.max(unsized_align); + + // Issue #27023: must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + + Ok((size.abi_align(align), align)) + } + ty::TyDynamic(..) => { + let (_, vtable) = self.into_ptr_vtable_pair(value)?; + // the second entry in the vtable is the dynamic size of the object. + self.read_size_and_align_from_vtable(vtable) + } + + ty::TySlice(_) | ty::TyStr => { + let (elem_size, align) = layout.field(&self, 0)?.size_and_align(); + let (_, len) = self.into_slice(value)?; + Ok((elem_size * len, align)) + } + + _ => bug!("size_of_val::<{:?}>", ty), + } + } + } + + pub fn push_stack_frame( + &mut self, + instance: ty::Instance<'tcx>, + span: codemap::Span, + mir: &'tcx mir::Mir<'tcx>, + return_place: Place, + return_to_block: StackPopCleanup, + ) -> EvalResult<'tcx> { + ::log_settings::settings().indentation += 1; + + /// Return the set of locals that have a storage annotation anywhere + fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet { + use rustc::mir::StatementKind::*; + + let mut set = HashSet::new(); + for block in mir.basic_blocks() { + for stmt in block.statements.iter() { + match stmt.kind { + StorageLive(local) | + StorageDead(local) => { + set.insert(local); + } + _ => {} + } + } + } + set + } + + // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local + // `Value` for that. + let num_locals = mir.local_decls.len() - 1; + + let locals = { + let annotated_locals = collect_storage_annotations(mir); + let mut locals = vec![None; num_locals]; + for i in 0..num_locals { + let local = mir::Local::new(i + 1); + if !annotated_locals.contains(&local) { + locals[i] = Some(Value::ByVal(PrimVal::Undef)); + } + } + locals + }; + + self.stack.push(Frame { + mir, + block: mir::START_BLOCK, + return_to_block, + return_place, + locals, + span, + instance, + stmt: 0, + }); + + self.memory.cur_frame = self.cur_frame(); + + if self.stack.len() > self.stack_limit { + err!(StackFrameLimitReached) + } else { + Ok(()) + } + } + + pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { + ::log_settings::settings().indentation -= 1; + M::end_region(self, None)?; + let frame = self.stack.pop().expect( + "tried to pop a stack frame, but there were none", + ); + if !self.stack.is_empty() { + // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? + self.memory.cur_frame = self.cur_frame(); + } + match frame.return_to_block { + StackPopCleanup::MarkStatic(mutable) => { + if let Place::Ptr { ptr, .. } = frame.return_place { + // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions + self.memory.mark_static_initalized( + ptr.to_ptr()?.alloc_id, + mutable, + )? + } else { + bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_place); + } + } + StackPopCleanup::Goto(target) => self.goto_block(target), + StackPopCleanup::None => {} + } + // deallocate all locals that are backed by an allocation + for local in frame.locals { + self.deallocate_local(local)?; + } + + Ok(()) + } + + pub fn deallocate_local(&mut self, local: Option) -> EvalResult<'tcx> { + if let Some(Value::ByRef(ptr)) = local { + trace!("deallocating local"); + let ptr = ptr.to_ptr()?; + self.memory.dump_alloc(ptr.alloc_id); + self.memory.deallocate_local(ptr)?; + }; + Ok(()) + } + + /// Evaluate an assignment statement. + /// + /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue + /// type writes its results directly into the memory specified by the place. + pub(super) fn eval_rvalue_into_place( + &mut self, + rvalue: &mir::Rvalue<'tcx>, + place: &mir::Place<'tcx>, + ) -> EvalResult<'tcx> { + let dest = self.eval_place(place)?; + let dest_ty = self.place_ty(place); + + use rustc::mir::Rvalue::*; + match *rvalue { + Use(ref operand) => { + let value = self.eval_operand(operand)?.value; + let valty = ValTy { + value, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + + BinaryOp(bin_op, ref left, ref right) => { + let left = self.eval_operand(left)?; + let right = self.eval_operand(right)?; + if self.intrinsic_overflowing( + bin_op, + left, + right, + dest, + dest_ty, + )? + { + // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out. + // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops + // it emits in debug mode) is performance, but it doesn't cost us any performance in miri. + // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops, + // we have to go back to just ignoring the overflow here. + return err!(OverflowingMath); + } + } + + CheckedBinaryOp(bin_op, ref left, ref right) => { + let left = self.eval_operand(left)?; + let right = self.eval_operand(right)?; + self.intrinsic_with_overflow( + bin_op, + left, + right, + dest, + dest_ty, + )?; + } + + UnaryOp(un_op, ref operand) => { + let val = self.eval_operand_to_primval(operand)?; + let kind = self.ty_to_primval_kind(dest_ty)?; + self.write_primval( + dest, + operator::unary_op(un_op, val, kind)?, + dest_ty, + )?; + } + + Aggregate(ref kind, ref operands) => { + self.inc_step_counter_and_check_limit(operands.len() as u64)?; + + let (dest, active_field_index) = match **kind { + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + self.write_discriminant_value(dest_ty, dest, variant_index)?; + if adt_def.is_enum() { + (self.place_downcast(dest, variant_index)?, active_field_index) + } else { + (dest, active_field_index) + } + } + _ => (dest, None) + }; + + let layout = self.layout_of(dest_ty)?; + for (i, operand) in operands.iter().enumerate() { + let value = self.eval_operand(operand)?; + // Ignore zero-sized fields. + if !self.layout_of(value.ty)?.is_zst() { + let field_index = active_field_index.unwrap_or(i); + let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?; + self.write_value(value, field_dest)?; + } + } + } + + Repeat(ref operand, _) => { + let (elem_ty, length) = match dest_ty.sty { + ty::TyArray(elem_ty, n) => (elem_ty, n.val.to_const_int().unwrap().to_u64().unwrap()), + _ => { + bug!( + "tried to assign array-repeat to non-array type {:?}", + dest_ty + ) + } + }; + let elem_size = self.layout_of(elem_ty)?.size.bytes(); + let value = self.eval_operand(operand)?.value; + + let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?); + + // FIXME: speed up repeat filling + for i in 0..length { + let elem_dest = dest.offset(i * elem_size, &self)?; + self.write_value_to_ptr(value, elem_dest, elem_ty)?; + } + } + + Len(ref place) => { + // FIXME(CTFE): don't allow computing the length of arrays in const eval + let src = self.eval_place(place)?; + let ty = self.place_ty(place); + let (_, len) = src.elem_ty_and_len(ty); + self.write_primval( + dest, + PrimVal::from_u128(len as u128), + dest_ty, + )?; + } + + Ref(_, _, ref place) => { + let src = self.eval_place(place)?; + // We ignore the alignment of the place here -- special handling for packed structs ends + // at the `&` operator. + let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned(); + + let val = match extra { + PlaceExtra::None => ptr.ptr.to_value(), + PlaceExtra::Length(len) => ptr.ptr.to_value_with_len(len), + PlaceExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable), + PlaceExtra::DowncastVariant(..) => { + bug!("attempted to take a reference to an enum downcast place") + } + }; + let valty = ValTy { + value: val, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + + NullaryOp(mir::NullOp::Box, ty) => { + let ty = self.monomorphize(ty, self.substs()); + M::box_alloc(self, ty, dest)?; + } + + NullaryOp(mir::NullOp::SizeOf, ty) => { + let ty = self.monomorphize(ty, self.substs()); + let layout = self.layout_of(ty)?; + assert!(!layout.is_unsized(), + "SizeOf nullary MIR operator called for unsized type"); + self.write_primval( + dest, + PrimVal::from_u128(layout.size.bytes() as u128), + dest_ty, + )?; + } + + Cast(kind, ref operand, cast_ty) => { + debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty); + use rustc::mir::CastKind::*; + match kind { + Unsize => { + let src = self.eval_operand(operand)?; + self.unsize_into(src.value, src.ty, dest, dest_ty)?; + } + + Misc => { + let src = self.eval_operand(operand)?; + if self.type_is_fat_ptr(src.ty) { + match (src.value, self.type_is_fat_ptr(dest_ty)) { + (Value::ByRef { .. }, _) | + (Value::ByValPair(..), true) => { + let valty = ValTy { + value: src.value, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + (Value::ByValPair(data, _), false) => { + let valty = ValTy { + value: Value::ByVal(data), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + (Value::ByVal(_), _) => bug!("expected fat ptr"), + } + } else { + let src_val = self.value_to_primval(src)?; + let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?; + let valty = ValTy { + value: Value::ByVal(dest_val), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + } + + ReifyFnPointer => { + match self.eval_operand(operand)?.ty.sty { + ty::TyFnDef(def_id, substs) => { + let instance = self.resolve(def_id, substs)?; + let fn_ptr = self.memory.create_fn_alloc(instance); + let valty = ValTy { + value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + ref other => bug!("reify fn pointer on {:?}", other), + } + } + + UnsafeFnPointer => { + match dest_ty.sty { + ty::TyFnPtr(_) => { + let mut src = self.eval_operand(operand)?; + src.ty = dest_ty; + self.write_value(src, dest)?; + } + ref other => bug!("fn to unsafe fn cast on {:?}", other), + } + } + + ClosureFnPointer => { + match self.eval_operand(operand)?.ty.sty { + ty::TyClosure(def_id, substs) => { + let substs = self.tcx.trans_apply_param_substs(self.substs(), &substs); + let instance = ty::Instance::resolve_closure( + self.tcx, + def_id, + substs, + ty::ClosureKind::FnOnce, + ); + let fn_ptr = self.memory.create_fn_alloc(instance); + let valty = ValTy { + value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + ref other => bug!("closure fn pointer on {:?}", other), + } + } + } + } + + Discriminant(ref place) => { + let ty = self.place_ty(place); + let place = self.eval_place(place)?; + let discr_val = self.read_discriminant_value(place, ty)?; + if let ty::TyAdt(adt_def, _) = ty.sty { + trace!("Read discriminant {}, valid discriminants {:?}", discr_val, adt_def.discriminants(self.tcx).collect::>()); + if adt_def.discriminants(self.tcx).all(|v| { + discr_val != v.to_u128_unchecked() + }) + { + return err!(InvalidDiscriminant); + } + self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + } else { + bug!("rustc only generates Rvalue::Discriminant for enums"); + } + } + } + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(dest); + } + + Ok(()) + } + + pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty), + ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), + _ => false, + } + } + + pub(super) fn eval_operand_to_primval( + &mut self, + op: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + let valty = self.eval_operand(op)?; + self.value_to_primval(valty) + } + + pub(crate) fn operands_to_args( + &mut self, + ops: &[mir::Operand<'tcx>], + ) -> EvalResult<'tcx, Vec>> { + ops.into_iter() + .map(|op| self.eval_operand(op)) + .collect() + } + + pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { + use rustc::mir::Operand::*; + let ty = self.monomorphize(op.ty(self.mir(), self.tcx), self.substs()); + match *op { + // FIXME: do some more logic on `move` to invalidate the old location + Copy(ref place) | + Move(ref place) => { + Ok(ValTy { + value: self.eval_and_read_place(place)?, + ty + }) + }, + + Constant(ref constant) => { + use rustc::mir::Literal; + let mir::Constant { ref literal, .. } = **constant; + let value = match *literal { + Literal::Value { ref value } => self.const_to_value(&value.val)?, + + Literal::Promoted { index } => { + let cid = GlobalId { + instance: self.frame().instance, + promoted: Some(index), + }; + Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("promoted not cached")) + } + }; + + Ok(ValTy { + value, + ty, + }) + } + } + } + + pub fn read_discriminant_value( + &mut self, + place: Place, + ty: Ty<'tcx>, + ) -> EvalResult<'tcx, u128> { + let layout = self.layout_of(ty)?; + //trace!("read_discriminant_value {:#?}", layout); + + match layout.variants { + layout::Variants::Single { index } => { + return Ok(index as u128); + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } + + let (discr_place, discr) = self.place_field(place, mir::Field::new(0), layout)?; + let raw_discr = self.value_to_primval(ValTy { + value: self.read_place(discr_place)?, + ty: discr.ty + })?; + let discr_val = match layout.variants { + layout::Variants::Single { .. } => bug!(), + layout::Variants::Tagged { .. } => raw_discr.to_bytes()?, + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + let variants_start = niche_variants.start as u128; + let variants_end = niche_variants.end as u128; + match raw_discr { + PrimVal::Ptr(_) => { + assert!(niche_start == 0); + assert!(variants_start == variants_end); + dataful_variant as u128 + }, + PrimVal::Bytes(raw_discr) => { + let discr = raw_discr.wrapping_sub(niche_start) + .wrapping_add(variants_start); + if variants_start <= discr && discr <= variants_end { + discr + } else { + dataful_variant as u128 + } + }, + PrimVal::Undef => return err!(ReadUndefBytes), + } + } + }; + + Ok(discr_val) + } + + + pub(crate) fn write_discriminant_value( + &mut self, + dest_ty: Ty<'tcx>, + dest: Place, + variant_index: usize, + ) -> EvalResult<'tcx> { + let layout = self.layout_of(dest_ty)?; + + match layout.variants { + layout::Variants::Single { index } => { + if index != variant_index { + // If the layout of an enum is `Single`, all + // other variants are necessarily uninhabited. + assert_eq!(layout.for_variant(&self, variant_index).abi, + layout::Abi::Uninhabited); + } + } + layout::Variants::Tagged { .. } => { + let discr_val = dest_ty.ty_adt_def().unwrap() + .discriminant_for_variant(self.tcx, variant_index) + .to_u128_unchecked(); + + let (discr_dest, discr) = self.place_field(dest, mir::Field::new(0), layout)?; + self.write_primval(discr_dest, PrimVal::Bytes(discr_val), discr.ty)?; + } + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + if variant_index != dataful_variant { + let (niche_dest, niche) = + self.place_field(dest, mir::Field::new(0), layout)?; + let niche_value = ((variant_index - niche_variants.start) as u128) + .wrapping_add(niche_start); + self.write_primval(niche_dest, PrimVal::Bytes(niche_value), niche.ty)?; + } + } + } + + Ok(()) + } + + pub fn read_global_as_value(&self, gid: GlobalId) -> Value { + Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached")) + } + + fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> { + let layout = self.layout_of(ty)?; + assert!(!layout.is_unsized(), "cannot copy from an unsized type"); + let size = layout.size.bytes(); + let align = layout.align.abi(); + self.memory.copy(src, dest, size, align, false)?; + Ok(()) + } + + pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { + let new_place = match place { + Place::Local { frame, local } => { + // -1 since we don't store the return value + match self.stack[frame].locals[local.index() - 1] { + None => return err!(DeadLocal), + Some(Value::ByRef(ptr)) => { + Place::Ptr { + ptr, + extra: PlaceExtra::None, + } + } + Some(val) => { + let ty = self.stack[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack[frame].instance.substs); + let ptr = self.alloc_ptr(ty)?; + self.stack[frame].locals[local.index() - 1] = + Some(Value::by_ref(ptr.into())); // it stays live + self.write_value_to_ptr(val, ptr.into(), ty)?; + Place::from_ptr(ptr) + } + } + } + Place::Ptr { .. } => place, + }; + Ok(new_place) + } + + /// ensures this Value is not a ByRef + pub fn follow_by_ref_value( + &self, + value: Value, + ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { + match value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty)) + } + other => Ok(other), + } + } + + pub fn value_to_primval( + &self, + ValTy { value, ty } : ValTy<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + match self.follow_by_ref_value(value, ty)? { + Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"), + + Value::ByVal(primval) => { + // TODO: Do we really want insta-UB here? + self.ensure_valid_value(primval, ty)?; + Ok(primval) + } + + Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"), + } + } + + pub fn write_ptr(&mut self, dest: Place, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { + let valty = ValTy { + value: val.to_value(), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + pub fn write_primval( + &mut self, + dest: Place, + val: PrimVal, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let valty = ValTy { + value: Value::ByVal(val), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + pub fn write_value( + &mut self, + ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>, + dest: Place, + ) -> EvalResult<'tcx> { + //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty); + // Note that it is really important that the type here is the right one, and matches the type things are read at. + // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only + // correct if we never look at this data with the wrong type. + + match dest { + Place::Ptr { + ptr: PtrAndAlign { ptr, aligned }, + extra, + } => { + assert_eq!(extra, PlaceExtra::None); + self.write_maybe_aligned_mut( + aligned, + |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty), + ) + } + + Place::Local { frame, local } => { + let dest = self.stack[frame].get_local(local)?; + self.write_value_possibly_by_val( + src_val, + |this, val| this.stack[frame].set_local(local, val), + dest, + dest_ty, + ) + } + } + } + + // The cases here can be a bit subtle. Read carefully! + fn write_value_possibly_by_val EvalResult<'tcx>>( + &mut self, + src_val: Value, + write_dest: F, + old_dest_val: Value, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + if let Value::ByRef(PtrAndAlign { + ptr: dest_ptr, + aligned, + }) = old_dest_val + { + // If the value is already `ByRef` (that is, backed by an `Allocation`), + // then we must write the new value into this allocation, because there may be + // other pointers into the allocation. These other pointers are logically + // pointers into the local variable, and must be able to observe the change. + // + // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we + // knew for certain that there were no outstanding pointers to this allocation. + self.write_maybe_aligned_mut(aligned, |ectx| { + ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty) + })?; + + } else if let Value::ByRef(PtrAndAlign { + ptr: src_ptr, + aligned, + }) = src_val + { + // If the value is not `ByRef`, then we know there are no pointers to it + // and we can simply overwrite the `Value` in the locals array directly. + // + // In this specific case, where the source value is `ByRef`, we must duplicate + // the allocation, because this is a by-value operation. It would be incorrect + // if they referred to the same allocation, since then a change to one would + // implicitly change the other. + // + // It is a valid optimization to attempt reading a primitive value out of the + // source and write that into the destination without making an allocation, so + // we do so here. + self.read_maybe_aligned_mut(aligned, |ectx| { + if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) { + write_dest(ectx, src_val)?; + } else { + let dest_ptr = ectx.alloc_ptr(dest_ty)?.into(); + ectx.copy(src_ptr, dest_ptr, dest_ty)?; + write_dest(ectx, Value::by_ref(dest_ptr))?; + } + Ok(()) + })?; + + } else { + // Finally, we have the simple case where neither source nor destination are + // `ByRef`. We may simply copy the source value over the the destintion. + write_dest(self, src_val)?; + } + Ok(()) + } + + pub fn write_value_to_ptr( + &mut self, + value: Value, + dest: Pointer, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + trace!("write_value_to_ptr: {:#?}", value); + match value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty)) + } + Value::ByVal(primval) => { + let layout = self.layout_of(dest_ty)?; + if layout.is_zst() { + assert!(primval.is_undef()); + Ok(()) + } else { + // TODO: Do we need signedness? + self.memory.write_maybe_aligned_mut(!layout.is_packed(), |mem| { + mem.write_primval(dest.to_ptr()?, primval, layout.size.bytes(), false) + }) + } + } + Value::ByValPair(a, b) => { + let ptr = dest.to_ptr()?; + let mut layout = self.layout_of(dest_ty)?; + trace!("write_value_to_ptr valpair: {:#?}", layout); + let mut packed = layout.is_packed(); + 'outer: loop { + for i in 0..layout.fields.count() { + let field = layout.field(&self, i)?; + if layout.fields.offset(i).bytes() == 0 && layout.size == field.size { + layout = field; + packed |= layout.is_packed(); + continue 'outer; + } + } + break; + } + trace!("write_value_to_ptr valpair: {:#?}", layout); + assert_eq!(layout.fields.count(), 2); + let field_0 = layout.field(&self, 0)?; + let field_1 = layout.field(&self, 1)?; + trace!("write_value_to_ptr field 0: {:#?}", field_0); + trace!("write_value_to_ptr field 1: {:#?}", field_1); + assert_eq!( + field_0.is_packed(), + field_1.is_packed(), + "the two fields must agree on being packed" + ); + packed |= field_0.is_packed(); + let field_0_ptr = ptr.offset(layout.fields.offset(0).bytes(), &self)?.into(); + let field_1_ptr = ptr.offset(layout.fields.offset(1).bytes(), &self)?.into(); + // TODO: What about signedess? + self.memory.write_maybe_aligned_mut(!packed, |mem| { + mem.write_primval(field_0_ptr, a, field_0.size.bytes(), false)?; + mem.write_primval(field_1_ptr, b, field_1.size.bytes(), false) + })?; + Ok(()) + } + } + } + + pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> { + use syntax::ast::FloatTy; + + let kind = match ty.sty { + ty::TyBool => PrimValKind::Bool, + ty::TyChar => PrimValKind::Char, + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + PrimValKind::from_int_size(size) + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + PrimValKind::from_uint_size(size) + } + + ty::TyFloat(FloatTy::F32) => PrimValKind::F32, + ty::TyFloat(FloatTy::F64) => PrimValKind::F64, + + ty::TyFnPtr(_) => PrimValKind::FnPtr, + + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr, + + ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr, + + ty::TyAdt(..) => { + match self.layout_of(ty)?.abi { + layout::Abi::Scalar(ref scalar) => { + use rustc::ty::layout::Primitive::*; + match scalar.value { + Int(i, false) => PrimValKind::from_uint_size(i.size().bytes()), + Int(i, true) => PrimValKind::from_int_size(i.size().bytes()), + F32 => PrimValKind::F32, + F64 => PrimValKind::F64, + Pointer => PrimValKind::Ptr, + } + } + + _ => return err!(TypeNotPrimitive(ty)), + } + } + + _ => return err!(TypeNotPrimitive(ty)), + }; + + Ok(kind) + } + + fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> { + match ty.sty { + ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool), + + ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => { + err!(InvalidChar(val.to_bytes()? as u32 as u128)) + } + + _ => Ok(()), + } + } + + pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + if let Some(val) = self.try_read_value(ptr, ty)? { + Ok(val) + } else { + bug!("primitive read failed for type: {:?}", ty); + } + } + + pub(crate) fn read_ptr( + &self, + ptr: MemoryPointer, + pointee_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { + let ptr_size = self.memory.pointer_size(); + let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into(); + if self.type_is_sized(pointee_ty) { + Ok(p.to_value()) + } else { + trace!("reading fat pointer extra of type {}", pointee_ty); + let extra = ptr.offset(ptr_size, self)?; + match self.tcx.struct_tail(pointee_ty).sty { + ty::TyDynamic(..) => Ok(p.to_value_with_vtable( + self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?, + )), + ty::TySlice(..) | ty::TyStr => Ok( + p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64), + ), + _ => bug!("unsized primval ptr read from {:?}", pointee_ty), + } + } + } + + pub fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + use syntax::ast::FloatTy; + + let ptr = ptr.to_ptr()?; + let val = match ty.sty { + ty::TyBool => { + let val = self.memory.read_primval(ptr, 1, false)?; + let val = match val { + PrimVal::Bytes(0) => false, + PrimVal::Bytes(1) => true, + // TODO: This seems a little overeager, should reading at bool type already be insta-UB? + _ => return err!(InvalidBool), + }; + PrimVal::from_bool(val) + } + ty::TyChar => { + let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32; + match ::std::char::from_u32(c) { + Some(ch) => PrimVal::from_char(ch), + None => return err!(InvalidChar(c as u128)), + } + } + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + self.memory.read_primval(ptr, size, true)? + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + self.memory.read_primval(ptr, size, false)? + } + + ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?), + ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?), + + ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?, + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some), + + ty::TyAdt(def, _) => { + if def.is_box() { + return self.read_ptr(ptr, ty.boxed_ty()).map(Some); + } + + if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { + let mut signed = false; + if let layout::Int(_, s) = scalar.value { + signed = s; + } + let size = scalar.value.size(self).bytes(); + self.memory.read_primval(ptr, size, signed)? + } else { + return Ok(None); + } + } + + _ => return Ok(None), + }; + + Ok(Some(Value::ByVal(val))) + } + + pub fn frame(&self) -> &Frame<'tcx> { + self.stack.last().expect("no call frames exist") + } + + pub fn frame_mut(&mut self) -> &mut Frame<'tcx> { + self.stack.last_mut().expect("no call frames exist") + } + + pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> { + self.frame().mir + } + + pub fn substs(&self) -> &'tcx Substs<'tcx> { + if let Some(frame) = self.stack.last() { + frame.instance.substs + } else { + Substs::empty() + } + } + + fn unsize_into_ptr( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dest: Place, + dest_ty: Ty<'tcx>, + sty: Ty<'tcx>, + dty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // A -> A conversion + let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); + + match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { + (&ty::TyArray(_, length), &ty::TySlice(_)) => { + let ptr = self.into_ptr(src)?; + // u64 cast is from usize to u64, which is always good + let valty = ValTy { + value: ptr.to_value_with_len(length.val.to_const_int().unwrap().to_u64().unwrap() ), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + let valty = ValTy { + value: src, + ty: dest_ty, + }; + self.write_value(valty, dest) + } + (_, &ty::TyDynamic(ref data, _)) => { + let trait_ref = data.principal().unwrap().with_self_ty( + self.tcx, + src_pointee_ty, + ); + let trait_ref = self.tcx.erase_regions(&trait_ref); + let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; + let ptr = self.into_ptr(src)?; + let valty = ValTy { + value: ptr.to_value_with_vtable(vtable), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), + } + } + + fn unsize_into( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dst: Place, + dst_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let src_layout = self.layout_of(src_ty)?; + let dst_layout = self.layout_of(dst_ty)?; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) | + (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) | + (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => { + self.unsize_into_ptr(src, src_ty, dst, dst_ty, s.ty, d.ty) + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + if def_a.is_box() || def_b.is_box() { + if !def_a.is_box() || !def_b.is_box() { + panic!("invalid unsizing between {:?} -> {:?}", src_ty, dst_ty); + } + return self.unsize_into_ptr( + src, + src_ty, + dst, + dst_ty, + src_ty.boxed_ty(), + dst_ty.boxed_ty(), + ); + } + if self.ty_to_primval_kind(src_ty).is_ok() { + // TODO: We ignore the packed flag here + let sty = src_layout.field(&self, 0)?.ty; + let dty = dst_layout.field(&self, 0)?.ty; + return self.unsize_into(src, sty, dst, dty); + } + // unsizing of generic struct with pointer fields + // Example: `Arc` -> `Arc` + // here we need to increase the size of every &T thin ptr field to a fat ptr + + assert_eq!(def_a, def_b); + + let src_ptr = match src { + Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr, + // the entire struct is just a pointer + Value::ByVal(_) => { + for i in 0..src_layout.fields.count() { + let src_field = src_layout.field(&self, i)?; + let dst_field = dst_layout.field(&self, i)?; + if dst_layout.is_zst() { + continue; + } + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + assert_eq!(src_field.size, src_layout.size); + assert_eq!(dst_field.size, dst_layout.size); + return self.unsize_into( + src, + src_field.ty, + dst, + dst_field.ty, + ); + } + bug!("by val unsize into where the value doesn't cover the entire type") + } + // TODO: Is it possible for unaligned pointers to occur here? + _ => bug!("expected aligned pointer, got {:?}", src), + }; + + // FIXME(solson) + let dst = self.force_allocation(dst)?.to_ptr()?; + for i in 0..src_layout.fields.count() { + let src_field = src_layout.field(&self, i)?; + let dst_field = dst_layout.field(&self, i)?; + if dst_field.is_zst() { + continue; + } + let src_field_offset = src_layout.fields.offset(i).bytes(); + let dst_field_offset = dst_layout.fields.offset(i).bytes(); + let src_f_ptr = src_ptr.offset(src_field_offset, &self)?; + let dst_f_ptr = dst.offset(dst_field_offset, &self)?; + if src_field.ty == dst_field.ty { + self.copy(src_f_ptr, dst_f_ptr.into(), src_field.ty)?; + } else { + self.unsize_into( + Value::by_ref(src_f_ptr), + src_field.ty, + Place::from_ptr(dst_f_ptr), + dst_field.ty, + )?; + } + } + Ok(()) + } + _ => { + bug!( + "unsize_into: invalid conversion: {:?} -> {:?}", + src_ty, + dst_ty + ) + } + } + } + + pub fn dump_local(&self, place: Place) { + // Debug output + match place { + Place::Local { frame, local } => { + let mut allocs = Vec::new(); + let mut msg = format!("{:?}", local); + if frame != self.cur_frame() { + write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap(); + } + write!(msg, ":").unwrap(); + + match self.stack[frame].get_local(local) { + Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => { + write!(msg, " is dead").unwrap(); + } + Err(err) => { + panic!("Failed to access local: {:?}", err); + } + Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => { + match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }) + .unwrap(); + allocs.push(ptr.alloc_id); + } + ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + } + } + Ok(Value::ByVal(val)) => { + write!(msg, " {:?}", val).unwrap(); + if let PrimVal::Ptr(ptr) = val { + allocs.push(ptr.alloc_id); + } + } + Ok(Value::ByValPair(val1, val2)) => { + write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); + if let PrimVal::Ptr(ptr) = val1 { + allocs.push(ptr.alloc_id); + } + if let PrimVal::Ptr(ptr) = val2 { + allocs.push(ptr.alloc_id); + } + } + } + + trace!("{}", msg); + self.memory.dump_allocs(allocs); + } + Place::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => { + match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + trace!("by {}ref:", if aligned { "" } else { "unaligned " }); + self.memory.dump_alloc(ptr.alloc_id); + } + ptr => trace!(" integral by ref: {:?}", ptr), + } + } + } + } + + /// Convenience function to ensure correct usage of locals + pub fn modify_local(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx> + where + F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, + { + let val = self.stack[frame].get_local(local)?; + let new_val = f(self, val)?; + self.stack[frame].set_local(local, new_val)?; + // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) + // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { + // self.memory.deallocate(ptr)?; + // } + Ok(()) + } + + pub fn report(&self, e: &mut EvalError) { + if let Some(ref mut backtrace) = e.backtrace { + let mut trace_text = "\n\nAn error occurred in miri:\n".to_string(); + backtrace.resolve(); + write!(trace_text, "backtrace frames: {}\n", backtrace.frames().len()).unwrap(); + 'frames: for (i, frame) in backtrace.frames().iter().enumerate() { + if frame.symbols().is_empty() { + write!(trace_text, "{}: no symbols\n", i).unwrap(); + } + for symbol in frame.symbols() { + write!(trace_text, "{}: ", i).unwrap(); + if let Some(name) = symbol.name() { + write!(trace_text, "{}\n", name).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + write!(trace_text, "\tat ").unwrap(); + if let Some(file_path) = symbol.filename() { + write!(trace_text, "{}", file_path.display()).unwrap(); + } else { + write!(trace_text, "").unwrap(); + } + if let Some(line) = symbol.lineno() { + write!(trace_text, ":{}\n", line).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + } + } + error!("{}", trace_text); + } + if let Some(frame) = self.stack().last() { + let block = &frame.mir.basic_blocks()[frame.block]; + let span = if frame.stmt < block.statements.len() { + block.statements[frame.stmt].source_info.span + } else { + block.terminator().source_info.span + }; + let mut err = self.tcx.sess.struct_span_err(span, &e.to_string()); + for &Frame { instance, span, .. } in self.stack().iter().rev() { + if self.tcx.def_key(instance.def_id()).disambiguated_data.data == + DefPathData::ClosureExpr + { + err.span_note(span, "inside call to closure"); + continue; + } + err.span_note(span, &format!("inside call to {}", instance)); + } + err.emit(); + } else { + self.tcx.sess.err(&e.to_string()); + } + } +} + +impl<'tcx> Frame<'tcx> { + pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + self.locals[local.index() - 1].ok_or(EvalErrorKind::DeadLocal.into()) + } + + fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + match self.locals[local.index() - 1] { + None => err!(DeadLocal), + Some(ref mut local) => { + *local = value; + Ok(()) + } + } + } + + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now live", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored + return Ok(old); + } + + /// Returns the old value of the local + pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now dead", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = None; + return Ok(old); + } +} + +// TODO(solson): Upstream these methods into rustc::ty::layout. + +pub fn resolve_drop_in_place<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, +) -> ty::Instance<'tcx> { + let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem); + let substs = tcx.intern_substs(&[Kind::from(ty)]); + ty::Instance::resolve(tcx, ty::ParamEnv::empty(Reveal::All), def_id, substs).unwrap() +} diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs new file mode 100644 index 0000000000000..47a6bfeb37ba3 --- /dev/null +++ b/src/librustc_mir/interpret/machine.rs @@ -0,0 +1,117 @@ +//! This module contains everything needed to instantiate an interpreter. +//! This separation exists to ensure that no fancy miri features like +//! interpreting common C functions leak into CTFE. + +use rustc::mir::interpret::{EvalResult, PrimVal, MemoryPointer, AccessKind}; +use super::{EvalContext, Place, ValTy, Memory}; + +use rustc::mir; +use rustc::ty::{self, Ty}; +use syntax::codemap::Span; +use syntax::ast::Mutability; + +/// Methods of this trait signifies a point where CTFE evaluation would fail +/// and some use case dependent behaviour can instead be applied +pub trait Machine<'tcx>: Sized { + /// Additional data that can be accessed via the Memory + type MemoryData; + + /// Additional memory kinds a machine wishes to distinguish from the builtin ones + type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone; + + /// Entry point to all function calls. + /// + /// Returns Ok(true) when the function was handled completely + /// e.g. due to missing mir + /// + /// Returns Ok(false) if a new stack frame was pushed + fn eval_fn_call<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + destination: Option<(Place, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool>; + + /// directly process an intrinsic without pushing a stack frame. + fn call_intrinsic<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + args: &[ValTy<'tcx>], + dest: Place, + dest_layout: ty::layout::TyLayout<'tcx>, + target: mir::BasicBlock, + ) -> EvalResult<'tcx>; + + /// Called for all binary operations except on float types. + /// + /// Returns `None` if the operation should be handled by the integer + /// op code in order to share more code between machines + /// + /// Returns a (value, overflowed) pair if the operation succeeded + fn try_ptr_op<'a>( + ecx: &EvalContext<'a, 'tcx, Self>, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: Ty<'tcx>, + right: PrimVal, + right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>; + + /// Called when trying to mark machine defined `MemoryKinds` as static + fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>; + + /// Heap allocations via the `box` keyword + /// + /// Returns a pointer to the allocated memory + fn box_alloc<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + ty: Ty<'tcx>, + dest: Place, + ) -> EvalResult<'tcx>; + + /// Called when trying to access a global declared with a `linkage` attribute + fn global_item_with_linkage<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + mutability: Mutability, + ) -> EvalResult<'tcx>; + + fn check_locks<'a>( + _mem: &Memory<'a, 'tcx, Self>, + _ptr: MemoryPointer, + _size: u64, + _access: AccessKind, + ) -> EvalResult<'tcx> { + Ok(()) + } + + fn add_lock<'a>( + _mem: &mut Memory<'a, 'tcx, Self>, + _id: u64, + ) {} + + fn free_lock<'a>( + _mem: &mut Memory<'a, 'tcx, Self>, + _id: u64, + _len: u64, + ) -> EvalResult<'tcx> { + Ok(()) + } + + fn end_region<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _reg: Option<::rustc::middle::region::Scope>, + ) -> EvalResult<'tcx> { + Ok(()) + } + + fn validation_op<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _op: ::rustc::mir::ValidationOp, + _operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>, + ) -> EvalResult<'tcx> { + Ok(()) + } +} diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs new file mode 100644 index 0000000000000..490ac0e0fb767 --- /dev/null +++ b/src/librustc_mir/interpret/memory.rs @@ -0,0 +1,1141 @@ +use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian}; +use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque}; +use std::{ptr, mem, io}; +use std::cell::Cell; + +use rustc::ty::{Instance, TyCtxt}; +use rustc::ty::layout::{self, TargetDataLayout}; +use syntax::ast::Mutability; + +use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, PtrAndAlign, Value, Pointer, + EvalResult, PrimVal, EvalErrorKind}; + +use super::{EvalContext, Machine}; + +//////////////////////////////////////////////////////////////////////////////// +// Allocations and pointers +//////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum MemoryKind { + /// Error if deallocated except during a stack pop + Stack, + /// A mutable Static. All the others are interned in the tcx + MutableStatic, // FIXME: move me into the machine, rustc const eval doesn't need them + /// Additional memory kinds a machine wishes to distinguish from the builtin ones + Machine(T), +} + +//////////////////////////////////////////////////////////////////////////////// +// Top-level interpreter memory +//////////////////////////////////////////////////////////////////////////////// + +pub struct Memory<'a, 'tcx: 'a, M: Machine<'tcx>> { + /// Additional data required by the Machine + pub data: M::MemoryData, + + /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate` + alloc_kind: HashMap>, + + /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). + alloc_map: HashMap, + + /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). + /// + /// Stores statics while they are being processed, before they are interned and thus frozen + uninitialized_statics: HashMap, + + /// Number of virtual bytes allocated. + memory_usage: u64, + + /// Maximum number of virtual bytes that may be allocated. + memory_size: u64, + + /// To avoid having to pass flags to every single memory access, we have some global state saying whether + /// alignment checking is currently enforced for read and/or write accesses. + reads_are_aligned: Cell, + writes_are_aligned: Cell, + + /// The current stack frame. Used to check accesses against locks. + pub cur_frame: usize, + + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, max_memory: u64, data: M::MemoryData) -> Self { + Memory { + data, + alloc_kind: HashMap::new(), + alloc_map: HashMap::new(), + uninitialized_statics: HashMap::new(), + tcx, + memory_size: max_memory, + memory_usage: 0, + reads_are_aligned: Cell::new(true), + writes_are_aligned: Cell::new(true), + cur_frame: usize::max_value(), + } + } + + pub fn allocations<'x>( + &'x self, + ) -> impl Iterator { + self.alloc_map.iter().map(|(&id, alloc)| (AllocId(id), alloc)) + } + + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer { + let id = self.tcx.interpret_interner.borrow_mut().create_fn_alloc(instance); + MemoryPointer::new(AllocId(id), 0) + } + + pub fn allocate_cached(&mut self, bytes: &[u8]) -> MemoryPointer { + let id = self.tcx.allocate_cached(bytes); + MemoryPointer::new(AllocId(id), 0) + } + + /// kind is `None` for statics + pub fn allocate( + &mut self, + size: u64, + align: u64, + kind: Option>, + ) -> EvalResult<'tcx, MemoryPointer> { + assert_ne!(align, 0); + assert!(align.is_power_of_two()); + + if self.memory_size - self.memory_usage < size { + return err!(OutOfMemory { + allocation_size: size, + memory_size: self.memory_size, + memory_usage: self.memory_usage, + }); + } + self.memory_usage += size; + assert_eq!(size as usize as u64, size); + let alloc = Allocation { + bytes: vec![0; size as usize], + relocations: BTreeMap::new(), + undef_mask: UndefMask::new(size), + align, + }; + let id = self.tcx.interpret_interner.borrow_mut().reserve(); + M::add_lock(self, id); + match kind { + Some(kind @ MemoryKind::Stack) | + Some(kind @ MemoryKind::Machine(_)) => { + self.alloc_map.insert(id, alloc); + self.alloc_kind.insert(id, kind); + }, + None => { + self.uninitialized_statics.insert(id, alloc); + }, + Some(MemoryKind::MutableStatic) => bug!("don't allocate mutable statics directly") + } + Ok(MemoryPointer::new(AllocId(id), 0)) + } + + pub fn reallocate( + &mut self, + ptr: MemoryPointer, + old_size: u64, + old_align: u64, + new_size: u64, + new_align: u64, + kind: MemoryKind, + ) -> EvalResult<'tcx, MemoryPointer> { + use std::cmp::min; + + if ptr.offset != 0 { + return err!(ReallocateNonBasePtr); + } + if self.alloc_map.contains_key(&ptr.alloc_id.0) { + let alloc_kind = self.alloc_kind[&ptr.alloc_id.0]; + if alloc_kind != kind { + return err!(ReallocatedWrongMemoryKind( + format!("{:?}", alloc_kind), + format!("{:?}", kind), + )); + } + } + + // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" + let new_ptr = self.allocate(new_size, new_align, Some(kind))?; + self.copy( + ptr.into(), + new_ptr.into(), + min(old_size, new_size), + min(old_align, new_align), + /*nonoverlapping*/ + true, + )?; + self.deallocate(ptr, Some((old_size, old_align)), kind)?; + + Ok(new_ptr) + } + + pub fn deallocate_local(&mut self, ptr: MemoryPointer) -> EvalResult<'tcx> { + match self.alloc_kind.get(&ptr.alloc_id.0).cloned() { + // for a constant like `const FOO: &i32 = &1;` the local containing + // the `1` is referred to by the global. We transitively marked everything + // the global refers to as static itself, so we don't free it here + Some(MemoryKind::MutableStatic) => Ok(()), + Some(MemoryKind::Stack) => self.deallocate(ptr, None, MemoryKind::Stack), + // Happens if the memory was interned into immutable memory + None => Ok(()), + other => bug!("local contained non-stack memory: {:?}", other), + } + } + + pub fn deallocate( + &mut self, + ptr: MemoryPointer, + size_and_align: Option<(u64, u64)>, + kind: MemoryKind, + ) -> EvalResult<'tcx> { + if ptr.offset != 0 { + return err!(DeallocateNonBasePtr); + } + + let alloc = match self.alloc_map.remove(&ptr.alloc_id.0) { + Some(alloc) => alloc, + None => if self.uninitialized_statics.contains_key(&ptr.alloc_id.0) { + return err!(DeallocatedWrongMemoryKind( + "uninitializedstatic".to_string(), + format!("{:?}", kind), + )) + } else if self.tcx.interpret_interner.borrow().get_fn(ptr.alloc_id.0).is_some() { + return err!(DeallocatedWrongMemoryKind( + "function".to_string(), + format!("{:?}", kind), + )) + } else if self.tcx.interpret_interner.borrow().get_alloc(ptr.alloc_id.0).is_some() { + return err!(DeallocatedWrongMemoryKind( + "static".to_string(), + format!("{:?}", kind), + )) + } else { + return err!(DoubleFree) + }, + }; + + let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id.0).expect("alloc_map out of sync with alloc_kind"); + + // It is okay for us to still holds locks on deallocation -- for example, we could store data we own + // in a local, and the local could be deallocated (from StorageDead) before the function returns. + // However, we should check *something*. For now, we make sure that there is no conflicting write + // lock by another frame. We *have* to permit deallocation if we hold a read lock. + // TODO: Figure out the exact rules here. + M::free_lock(self, ptr.alloc_id.0, alloc.bytes.len() as u64)?; + + if alloc_kind != kind { + return err!(DeallocatedWrongMemoryKind( + format!("{:?}", alloc_kind), + format!("{:?}", kind), + )); + } + if let Some((size, align)) = size_and_align { + if size != alloc.bytes.len() as u64 || align != alloc.align { + return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align, alloc.align)); + } + } + + self.memory_usage -= alloc.bytes.len() as u64; + debug!("deallocated : {}", ptr.alloc_id); + + Ok(()) + } + + pub fn pointer_size(&self) -> u64 { + self.tcx.data_layout.pointer_size.bytes() + } + + pub fn endianess(&self) -> layout::Endian { + self.tcx.data_layout.endian + } + + /// Check that the pointer is aligned AND non-NULL. + pub fn check_align(&self, ptr: Pointer, align: u64, access: Option) -> EvalResult<'tcx> { + // Check non-NULL/Undef, extract offset + let (offset, alloc_align) = match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + let alloc = self.get(ptr.alloc_id)?; + (ptr.offset, alloc.align) + } + PrimVal::Bytes(bytes) => { + let v = ((bytes as u128) % (1 << self.pointer_size())) as u64; + if v == 0 { + return err!(InvalidNullPointerUsage); + } + (v, align) // the base address if the "integer allocation" is 0 and hence always aligned + } + PrimVal::Undef => return err!(ReadUndefBytes), + }; + // See if alignment checking is disabled + let enforce_alignment = match access { + Some(AccessKind::Read) => self.reads_are_aligned.get(), + Some(AccessKind::Write) => self.writes_are_aligned.get(), + None => true, + }; + if !enforce_alignment { + return Ok(()); + } + // Check alignment + if alloc_align < align { + return err!(AlignmentCheckFailed { + has: alloc_align, + required: align, + }); + } + if offset % align == 0 { + Ok(()) + } else { + err!(AlignmentCheckFailed { + has: offset % align, + required: align, + }) + } + } + + pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + let allocation_size = alloc.bytes.len() as u64; + if ptr.offset > allocation_size { + return err!(PointerOutOfBounds { + ptr, + access, + allocation_size, + }); + } + Ok(()) + } +} + +/// Allocation accessors +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { + // normal alloc? + match self.alloc_map.get(&id.0) { + Some(alloc) => Ok(alloc), + // uninitialized static alloc? + None => match self.uninitialized_statics.get(&id.0) { + Some(alloc) => Ok(alloc), + None => { + let int = self.tcx.interpret_interner.borrow(); + // static alloc? + int.get_alloc(id.0) + // no alloc? produce an error + .ok_or_else(|| if int.get_fn(id.0).is_some() { + EvalErrorKind::DerefFunctionPointer.into() + } else { + EvalErrorKind::DanglingPointerDeref.into() + }) + }, + }, + } + } + + fn get_mut( + &mut self, + id: AllocId, + ) -> EvalResult<'tcx, &mut Allocation> { + // normal alloc? + match self.alloc_map.get_mut(&id.0) { + Some(alloc) => Ok(alloc), + // uninitialized static alloc? + None => match self.uninitialized_statics.get_mut(&id.0) { + Some(alloc) => Ok(alloc), + None => { + let int = self.tcx.interpret_interner.borrow(); + // no alloc or immutable alloc? produce an error + if int.get_alloc(id.0).is_some() { + err!(ModifiedConstantMemory) + } else if int.get_fn(id.0).is_some() { + err!(DerefFunctionPointer) + } else { + err!(DanglingPointerDeref) + } + }, + }, + } + } + + pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> { + if ptr.offset != 0 { + return err!(InvalidFunctionPointer); + } + debug!("reading fn ptr: {}", ptr.alloc_id); + self.tcx + .interpret_interner + .borrow() + .get_fn(ptr.alloc_id.0) + .ok_or(EvalErrorKind::ExecuteMemory.into()) + } + + /// For debugging, print an allocation and all allocations it points to, recursively. + pub fn dump_alloc(&self, id: AllocId) { + self.dump_allocs(vec![id]); + } + + /// For debugging, print a list of allocations and all allocations they point to, recursively. + pub fn dump_allocs(&self, mut allocs: Vec) { + use std::fmt::Write; + allocs.sort(); + allocs.dedup(); + let mut allocs_to_print = VecDeque::from(allocs); + let mut allocs_seen = HashSet::new(); + + while let Some(id) = allocs_to_print.pop_front() { + let mut msg = format!("Alloc {:<5} ", format!("{}:", id)); + let prefix_len = msg.len(); + let mut relocations = vec![]; + + let (alloc, immutable) = + // normal alloc? + match self.alloc_map.get(&id.0) { + Some(a) => (a, match self.alloc_kind[&id.0] { + MemoryKind::Stack => " (stack)".to_owned(), + MemoryKind::Machine(m) => format!(" ({:?})", m), + MemoryKind::MutableStatic => " (static mut)".to_owned(), + }), + // uninitialized static alloc? + None => match self.uninitialized_statics.get(&id.0) { + Some(a) => (a, " (static in the process of initialization)".to_owned()), + None => { + let int = self.tcx.interpret_interner.borrow(); + // static alloc? + match int.get_alloc(id.0) { + Some(a) => (a, "(immutable)".to_owned()), + None => if let Some(func) = int.get_fn(id.0) { + trace!("{} {}", msg, func); + continue; + } else { + trace!("{} (deallocated)", msg); + continue; + }, + } + }, + }, + }; + + for i in 0..(alloc.bytes.len() as u64) { + if let Some(&target_id) = alloc.relocations.get(&i) { + if allocs_seen.insert(target_id) { + allocs_to_print.push_back(target_id); + } + relocations.push((i, target_id)); + } + if alloc.undef_mask.is_range_defined(i, i + 1) { + // this `as usize` is fine, since `i` came from a `usize` + write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap(); + } else { + msg.push_str("__ "); + } + } + + trace!( + "{}({} bytes, alignment {}){}", + msg, + alloc.bytes.len(), + alloc.align, + immutable + ); + + if !relocations.is_empty() { + msg.clear(); + write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. + let mut pos = 0; + let relocation_width = (self.pointer_size() - 1) * 3; + for (i, target_id) in relocations { + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap(); + let target = format!("({})", target_id); + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); + pos = i + self.pointer_size(); + } + trace!("{}", msg); + } + } + } + + pub fn leak_report(&self) -> usize { + trace!("### LEAK REPORT ###"); + let kinds = &self.alloc_kind; + let leaks: Vec<_> = self.alloc_map + .keys() + .filter_map(|key| if kinds[key] != MemoryKind::MutableStatic { + Some(AllocId(*key)) + } else { + None + }) + .collect(); + let n = leaks.len(); + self.dump_allocs(leaks); + n + } +} + +/// Byte accessors +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + fn get_bytes_unchecked( + &self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &[u8]> { + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL + self.check_align(ptr.into(), align, Some(AccessKind::Read))?; + if size == 0 { + return Ok(&[]); + } + M::check_locks(self, ptr, size, AccessKind::Read)?; + self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes_unchecked_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL + self.check_align(ptr.into(), align, Some(AccessKind::Write))?; + if size == 0 { + return Ok(&mut []); + } + M::check_locks(self, ptr, size, AccessKind::Write)?; + self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get_mut(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&mut alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { + assert_ne!(size, 0); + if self.relocations(ptr, size)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + self.check_defined(ptr, size)?; + self.get_bytes_unchecked(ptr, size, align) + } + + fn get_bytes_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { + assert_ne!(size, 0); + self.clear_relocations(ptr, size)?; + self.mark_definedness(ptr.into(), size, true)?; + self.get_bytes_unchecked_mut(ptr, size, align) + } +} + +/// Reading and writing +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + /// mark an allocation pointed to by a static as static and initialized + fn mark_inner_allocation_initialized( + &mut self, + alloc: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { + match self.alloc_kind.get(&alloc.0) { + // do not go into immutable statics + None | + // or mutable statics + Some(&MemoryKind::MutableStatic) => Ok(()), + // just locals and machine allocs + Some(_) => self.mark_static_initalized(alloc, mutability), + } + } + + /// mark an allocation as static and initialized, either mutable or not + pub fn mark_static_initalized( + &mut self, + alloc_id: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { + trace!( + "mark_static_initalized {:?}, mutability: {:?}", + alloc_id, + mutability + ); + if mutability == Mutability::Immutable { + let alloc = self.alloc_map.remove(&alloc_id.0); + let kind = self.alloc_kind.remove(&alloc_id.0); + assert_ne!(kind, Some(MemoryKind::MutableStatic)); + let uninit = self.uninitialized_statics.remove(&alloc_id.0); + if let Some(alloc) = alloc.or(uninit) { + let alloc = self.tcx.intern_const_alloc(alloc); + self.tcx.interpret_interner.borrow_mut().intern_at_reserved(alloc_id.0, alloc); + // recurse into inner allocations + for &alloc in alloc.relocations.values() { + self.mark_inner_allocation_initialized(alloc, mutability)?; + } + } + return Ok(()); + } + // We are marking the static as initialized, so move it out of the uninit map + if let Some(uninit) = self.uninitialized_statics.remove(&alloc_id.0) { + self.alloc_map.insert(alloc_id.0, uninit); + } + // do not use `self.get_mut(alloc_id)` here, because we might have already marked a + // sub-element or have circular pointers (e.g. `Rc`-cycles) + let relocations = match self.alloc_map.get_mut(&alloc_id.0) { + Some(&mut Allocation { + ref mut relocations, + .. + }) => { + match self.alloc_kind.get(&alloc_id.0) { + // const eval results can refer to "locals". + // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1` + None | + Some(&MemoryKind::Stack) => {}, + Some(&MemoryKind::Machine(m)) => M::mark_static_initialized(m)?, + Some(&MemoryKind::MutableStatic) => { + trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized"); + return Ok(()); + }, + } + // overwrite or insert + self.alloc_kind.insert(alloc_id.0, MemoryKind::MutableStatic); + // take out the relocations vector to free the borrow on self, so we can call + // mark recursively + mem::replace(relocations, Default::default()) + } + None => return err!(DanglingPointerDeref), + }; + // recurse into inner allocations + for &alloc in relocations.values() { + self.mark_inner_allocation_initialized(alloc, mutability)?; + } + // put back the relocations + self.alloc_map + .get_mut(&alloc_id.0) + .expect("checked above") + .relocations = relocations; + Ok(()) + } + + pub fn copy( + &mut self, + src: Pointer, + dest: Pointer, + size: u64, + align: u64, + nonoverlapping: bool, + ) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be aligned + self.check_align(src, align, Some(AccessKind::Read))?; + self.check_align(dest, align, Some(AccessKind::Write))?; + if size == 0 { + return Ok(()); + } + let src = src.to_ptr()?; + let dest = dest.to_ptr()?; + self.check_relocation_edges(src, size)?; + + // first copy the relocations to a temporary buffer, because + // `get_bytes_mut` will clear the relocations, which is correct, + // since we don't want to keep any relocations at the target. + + let relocations: Vec<_> = self.relocations(src, size)? + .map(|(&offset, &alloc_id)| { + // Update relocation offsets for the new positions in the destination allocation. + (offset + dest.offset - src.offset, alloc_id) + }) + .collect(); + + let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr(); + let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr(); + + // SAFE: The above indexing would have panicked if there weren't at least `size` bytes + // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and + // `dest` could possibly overlap. + unsafe { + assert_eq!(size as usize as u64, size); + if src.alloc_id == dest.alloc_id { + if nonoverlapping { + if (src.offset <= dest.offset && src.offset + size > dest.offset) || + (dest.offset <= src.offset && dest.offset + size > src.offset) + { + return err!(Intrinsic( + format!("copy_nonoverlapping called on overlapping ranges"), + )); + } + } + ptr::copy(src_bytes, dest_bytes, size as usize); + } else { + ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize); + } + } + + self.copy_undef_mask(src, dest, size)?; + // copy back the relocations + self.get_mut(dest.alloc_id)?.relocations.extend(relocations); + + Ok(()) + } + + pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + let offset = ptr.offset as usize; + match alloc.bytes[offset..].iter().position(|&c| c == 0) { + Some(size) => { + if self.relocations(ptr, (size + 1) as u64)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + self.check_defined(ptr, (size + 1) as u64)?; + M::check_locks(self, ptr, (size + 1) as u64, AccessKind::Read)?; + Ok(&alloc.bytes[offset..offset + size]) + } + None => err!(UnterminatedCString(ptr)), + } + } + + pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Read))?; + if size == 0 { + return Ok(&[]); + } + self.get_bytes(ptr.to_ptr()?, size, 1) + } + + pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Write))?; + if src.is_empty() { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?; + bytes.clone_from_slice(src); + Ok(()) + } + + pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Write))?; + if count == 0 { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; + for b in bytes { + *b = val; + } + Ok(()) + } + + pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> { + self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer + let endianess = self.endianess(); + let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?; + // Undef check happens *after* we established that the alignment is correct. + // We must not return Ok() for unaligned pointers! + if self.check_defined(ptr, size).is_err() { + return Ok(PrimVal::Undef.into()); + } + // Now we do the actual reading + let bytes = if signed { + read_target_int(endianess, bytes).unwrap() as u128 + } else { + read_target_uint(endianess, bytes).unwrap() + }; + // See if we got a pointer + if size != self.pointer_size() { + if self.relocations(ptr, size)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + } else { + let alloc = self.get(ptr.alloc_id)?; + match alloc.relocations.get(&ptr.offset) { + Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))), + None => {}, + } + } + // We don't. Just return the bytes. + Ok(PrimVal::Bytes(bytes)) + } + + pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> { + self.read_primval(ptr, self.pointer_size(), false) + } + + pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> { + let endianess = self.endianess(); + + let bytes = match val { + PrimVal::Ptr(val) => { + assert_eq!(size, self.pointer_size()); + val.offset as u128 + } + + PrimVal::Bytes(bytes) => { + // We need to mask here, or the byteorder crate can die when given a u64 larger + // than fits in an integer of the requested size. + let mask = match size { + 1 => !0u8 as u128, + 2 => !0u16 as u128, + 4 => !0u32 as u128, + 8 => !0u64 as u128, + 16 => !0, + n => bug!("unexpected PrimVal::Bytes size: {}", n), + }; + bytes & mask + } + + PrimVal::Undef => { + self.mark_definedness(PrimVal::Ptr(ptr).into(), size, false)?; + return Ok(()); + } + }; + + { + let align = self.int_align(size); + let dst = self.get_bytes_mut(ptr, size, align)?; + if signed { + write_target_int(endianess, dst, bytes as i128).unwrap(); + } else { + write_target_uint(endianess, dst, bytes).unwrap(); + } + } + + // See if we have to also write a relocation + match val { + PrimVal::Ptr(val) => { + self.get_mut(ptr.alloc_id)?.relocations.insert( + ptr.offset, + val.alloc_id, + ); + } + _ => {} + } + + Ok(()) + } + + pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> { + let ptr_size = self.pointer_size(); + self.write_primval(ptr, val, ptr_size, false) + } + + fn int_align(&self, size: u64) -> u64 { + // We assume pointer-sized integers have the same alignment as pointers. + // We also assume signed and unsigned integers of the same size have the same alignment. + match size { + 1 => self.tcx.data_layout.i8_align.abi(), + 2 => self.tcx.data_layout.i16_align.abi(), + 4 => self.tcx.data_layout.i32_align.abi(), + 8 => self.tcx.data_layout.i64_align.abi(), + 16 => self.tcx.data_layout.i128_align.abi(), + _ => bug!("bad integer size: {}", size), + } + } +} + +/// Relocations +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + fn relocations( + &self, + ptr: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx, btree_map::Range> { + let start = ptr.offset.saturating_sub(self.pointer_size() - 1); + let end = ptr.offset + size; + Ok(self.get(ptr.alloc_id)?.relocations.range(start..end)) + } + + fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + // Find all relocations overlapping the given range. + let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect(); + if keys.is_empty() { + return Ok(()); + } + + // Find the start and end of the given range and its outermost relocations. + let start = ptr.offset; + let end = start + size; + let first = *keys.first().unwrap(); + let last = *keys.last().unwrap() + self.pointer_size(); + + let alloc = self.get_mut(ptr.alloc_id)?; + + // Mark parts of the outermost relocations as undefined if they partially fall outside the + // given range. + if first < start { + alloc.undef_mask.set_range(first, start, false); + } + if last > end { + alloc.undef_mask.set_range(end, last, false); + } + + // Forget all the relocations. + for k in keys { + alloc.relocations.remove(&k); + } + + Ok(()) + } + + fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let overlapping_start = self.relocations(ptr, 0)?.count(); + let overlapping_end = self.relocations(ptr.offset(size, self)?, 0)?.count(); + if overlapping_start + overlapping_end != 0 { + return err!(ReadPointerAsBytes); + } + Ok(()) + } +} + +/// Undefined bytes +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + // FIXME(solson): This is a very naive, slow version. + fn copy_undef_mask( + &mut self, + src: MemoryPointer, + dest: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx> { + // The bits have to be saved locally before writing to dest in case src and dest overlap. + assert_eq!(size as usize as u64, size); + let mut v = Vec::with_capacity(size as usize); + for i in 0..size { + let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i); + v.push(defined); + } + for (i, defined) in v.into_iter().enumerate() { + self.get_mut(dest.alloc_id)?.undef_mask.set( + dest.offset + + i as u64, + defined, + ); + } + Ok(()) + } + + fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + if !alloc.undef_mask.is_range_defined( + ptr.offset, + ptr.offset + size, + ) + { + return err!(ReadUndefBytes); + } + Ok(()) + } + + pub fn mark_definedness( + &mut self, + ptr: Pointer, + size: u64, + new_state: bool, + ) -> EvalResult<'tcx> { + if size == 0 { + return Ok(()); + } + let ptr = ptr.to_ptr()?; + let alloc = self.get_mut(ptr.alloc_id)?; + alloc.undef_mask.set_range( + ptr.offset, + ptr.offset + size, + new_state, + ); + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access integers in the target endianess +//////////////////////////////////////////////////////////////////////////////// + +fn write_target_uint( + endianess: layout::Endian, + mut target: &mut [u8], + data: u128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_uint128::(data, len), + layout::Endian::Big => target.write_uint128::(data, len), + } +} +fn write_target_int( + endianess: layout::Endian, + mut target: &mut [u8], + data: i128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_int128::(data, len), + layout::Endian::Big => target.write_int128::(data, len), + } +} + +fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_uint128::(source.len()), + layout::Endian::Big => source.read_uint128::(source.len()), + } +} + +fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_int128::(source.len()), + layout::Endian::Big => source.read_int128::(source.len()), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Unaligned accesses +//////////////////////////////////////////////////////////////////////////////// + +pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> { + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>; + fn memory(&self) -> &Memory<'a, 'tcx, M>; + + // These are not supposed to be overriden. + fn read_maybe_aligned(&self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().reads_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().reads_are_aligned.set(old && aligned); + let t = f(self); + self.memory().reads_are_aligned.set(old); + t + } + + fn read_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().reads_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().reads_are_aligned.set(old && aligned); + let t = f(self); + self.memory().reads_are_aligned.set(old); + t + } + + fn write_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().writes_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().writes_are_aligned.set(old && aligned); + let t = f(self); + self.memory().writes_are_aligned.set(old); + t + } + + /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, + /// this may have to perform a load. + fn into_ptr( + &self, + value: Value, + ) -> EvalResult<'tcx, Pointer> { + Ok(match value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + self.memory().read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))? + } + Value::ByVal(ptr) | + Value::ByValPair(ptr, _) => ptr, + }.into()) + } + + fn into_ptr_vtable_pair( + &self, + value: Value, + ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { + match value { + Value::ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { + self.memory().read_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); + let vtable = mem.read_ptr_sized_unsigned( + ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, + )?.to_ptr()?; + Ok((ptr, vtable)) + }) + } + + Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), + + Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes), + _ => bug!("expected ptr and vtable, got {:?}", value), + } + } + + fn into_slice( + &self, + value: Value, + ) -> EvalResult<'tcx, (Pointer, u64)> { + match value { + Value::ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { + self.memory().read_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); + let len = mem.read_ptr_sized_unsigned( + ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, + )?.to_bytes()? as u64; + Ok((ptr, len)) + }) + } + Value::ByValPair(ptr, val) => { + let len = val.to_u128()?; + assert_eq!(len as u64 as u128, len); + Ok((ptr.into(), len as u64)) + } + Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes), + Value::ByVal(_) => bug!("expected ptr and length, got {:?}", value), + } + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for Memory<'a, 'tcx, M> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + self + } + + #[inline] + fn memory(&self) -> &Memory<'a, 'tcx, M> { + self + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for EvalContext<'a, 'tcx, M> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + &mut self.memory + } + + #[inline] + fn memory(&self) -> &Memory<'a, 'tcx, M> { + &self.memory + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a Memory<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + &self.tcx.data_layout + } +} diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs new file mode 100644 index 0000000000000..fee62c8a82e2f --- /dev/null +++ b/src/librustc_mir/interpret/mod.rs @@ -0,0 +1,23 @@ +//! An interpreter for MIR used in CTFE and by miri + +mod cast; +mod const_eval; +mod eval_context; +mod place; +mod machine; +mod memory; +mod operator; +mod step; +mod terminator; +mod traits; + +pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, + TyAndPacked, ValTy}; + +pub use self::place::{Place, PlaceExtra}; + +pub use self::memory::{Memory, MemoryKind, HasMemory}; + +pub use self::const_eval::{eval_body_as_integer, eval_body, CompileTimeEvaluator, const_eval_provider}; + +pub use self::machine::Machine; diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs new file mode 100644 index 0000000000000..6ab1aec38b863 --- /dev/null +++ b/src/librustc_mir/interpret/operator.rs @@ -0,0 +1,267 @@ +use rustc::mir; +use rustc::ty::Ty; +use rustc_const_math::ConstFloat; +use syntax::ast::FloatTy; +use std::cmp::Ordering; + +use super::{EvalContext, Place, Machine, ValTy}; + +use rustc::mir::interpret::{EvalResult, PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + fn binop_with_overflow( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + let left_val = self.value_to_primval(left)?; + let right_val = self.value_to_primval(right)?; + self.binary_op(op, left_val, left.ty, right_val, right.ty) + } + + /// Applies the binary operation `op` to the two operands and writes a tuple of the result + /// and a boolean signifying the potential overflow to the destination. + pub fn intrinsic_with_overflow( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + dest: Place, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + let val = Value::ByValPair(val, PrimVal::from_bool(overflowed)); + let valty = ValTy { + value: val, + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + /// Applies the binary operation `op` to the arguments and writes the result to the + /// destination. Returns `true` if the operation overflowed. + pub fn intrinsic_overflowing( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + dest: Place, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, bool> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + self.write_primval(dest, val, dest_ty)?; + Ok(overflowed) + } +} + +macro_rules! overflow { + ($op:ident, $l:expr, $r:expr) => ({ + let (val, overflowed) = $l.$op($r); + let primval = PrimVal::Bytes(val as u128); + Ok((primval, overflowed)) + }) +} + +macro_rules! int_arithmetic { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + use rustc::mir::interpret::PrimValKind::*; + match $kind { + I8 => overflow!($int_op, l as i8, r as i8), + I16 => overflow!($int_op, l as i16, r as i16), + I32 => overflow!($int_op, l as i32, r as i32), + I64 => overflow!($int_op, l as i64, r as i64), + I128 => overflow!($int_op, l as i128, r as i128), + U8 => overflow!($int_op, l as u8, r as u8), + U16 => overflow!($int_op, l as u16, r as u16), + U32 => overflow!($int_op, l as u32, r as u32), + U64 => overflow!($int_op, l as u64, r as u64), + U128 => overflow!($int_op, l as u128, r as u128), + _ => bug!("int_arithmetic should only be called on int primvals"), + } + }) +} + +macro_rules! int_shift { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + let r_wrapped = r as u32; + match $kind { + I8 => overflow!($int_op, l as i8, r_wrapped), + I16 => overflow!($int_op, l as i16, r_wrapped), + I32 => overflow!($int_op, l as i32, r_wrapped), + I64 => overflow!($int_op, l as i64, r_wrapped), + I128 => overflow!($int_op, l as i128, r_wrapped), + U8 => overflow!($int_op, l as u8, r_wrapped), + U16 => overflow!($int_op, l as u16, r_wrapped), + U32 => overflow!($int_op, l as u32, r_wrapped), + U64 => overflow!($int_op, l as u64, r_wrapped), + U128 => overflow!($int_op, l as u128, r_wrapped), + _ => bug!("int_shift should only be called on int primvals"), + }.map(|(val, over)| (val, over || r != r_wrapped as u128)) + }) +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + /// Returns the result of the specified operation and whether it overflowed. + pub fn binary_op( + &self, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: Ty<'tcx>, + right: PrimVal, + right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + use rustc::mir::BinOp::*; + use rustc::mir::interpret::PrimValKind::*; + + let left_kind = self.ty_to_primval_kind(left_ty)?; + let right_kind = self.ty_to_primval_kind(right_ty)?; + //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + + // I: Handle operations that support pointers + if !left_kind.is_float() && !right_kind.is_float() { + if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? { + return Ok(handled); + } + } + + // II: From now on, everything must be bytes, no pointers + let l = left.to_bytes()?; + let r = right.to_bytes()?; + + // These ops can have an RHS with a different numeric type. + if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) { + return match bin_op { + Shl => int_shift!(left_kind, overflowing_shl, l, r), + Shr => int_shift!(left_kind, overflowing_shr, l, r), + _ => bug!("it has already been checked that this is a shift op"), + }; + } + + if left_kind != right_kind { + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); + return err!(Unimplemented(msg)); + } + + let float_op = |op, l, r, ty| { + let l = ConstFloat { + bits: l, + ty, + }; + let r = ConstFloat { + bits: r, + ty, + }; + match op { + Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal), + Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal), + Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less), + Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater), + Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater), + Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less), + Add => PrimVal::Bytes((l + r).unwrap().bits), + Sub => PrimVal::Bytes((l - r).unwrap().bits), + Mul => PrimVal::Bytes((l * r).unwrap().bits), + Div => PrimVal::Bytes((l / r).unwrap().bits), + Rem => PrimVal::Bytes((l % r).unwrap().bits), + _ => bug!("invalid float op: `{:?}`", op), + } + }; + + let val = match (bin_op, left_kind) { + (_, F32) => float_op(bin_op, l, r, FloatTy::F32), + (_, F64) => float_op(bin_op, l, r, FloatTy::F64), + + + (Eq, _) => PrimVal::from_bool(l == r), + (Ne, _) => PrimVal::from_bool(l != r), + + (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)), + (Lt, _) => PrimVal::from_bool(l < r), + (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)), + (Le, _) => PrimVal::from_bool(l <= r), + (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)), + (Gt, _) => PrimVal::from_bool(l > r), + (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)), + (Ge, _) => PrimVal::from_bool(l >= r), + + (BitOr, _) => PrimVal::Bytes(l | r), + (BitAnd, _) => PrimVal::Bytes(l & r), + (BitXor, _) => PrimVal::Bytes(l ^ r), + + (Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r), + (Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r), + (Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r), + (Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r), + (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r), + + _ => { + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); + return err!(Unimplemented(msg)); + } + }; + + Ok((val, false)) + } +} + +pub fn unary_op<'tcx>( + un_op: mir::UnOp, + val: PrimVal, + val_kind: PrimValKind, +) -> EvalResult<'tcx, PrimVal> { + use rustc::mir::UnOp::*; + use rustc::mir::interpret::PrimValKind::*; + + let bytes = val.to_bytes()?; + + let result_bytes = match (un_op, val_kind) { + (Not, Bool) => !val.to_bool()? as u128, + + (Not, U8) => !(bytes as u8) as u128, + (Not, U16) => !(bytes as u16) as u128, + (Not, U32) => !(bytes as u32) as u128, + (Not, U64) => !(bytes as u64) as u128, + (Not, U128) => !bytes, + + (Not, I8) => !(bytes as i8) as u128, + (Not, I16) => !(bytes as i16) as u128, + (Not, I32) => !(bytes as i32) as u128, + (Not, I64) => !(bytes as i64) as u128, + (Not, I128) => !(bytes as i128) as u128, + + (Neg, I8) => -(bytes as i8) as u128, + (Neg, I16) => -(bytes as i16) as u128, + (Neg, I32) => -(bytes as i32) as u128, + (Neg, I64) => -(bytes as i64) as u128, + (Neg, I128) => -(bytes as i128) as u128, + + (Neg, F32) => (-bytes_to_f32(bytes)).bits, + (Neg, F64) => (-bytes_to_f64(bytes)).bits, + + _ => { + let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val); + return err!(Unimplemented(msg)); + } + }; + + Ok(PrimVal::Bytes(result_bytes)) +} diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs new file mode 100644 index 0000000000000..0e44b414d7fe5 --- /dev/null +++ b/src/librustc_mir/interpret/place.rs @@ -0,0 +1,428 @@ +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc_data_structures::indexed_vec::Idx; +use rustc::mir::interpret::{GlobalId, PtrAndAlign}; + +use rustc::mir::interpret::{Value, PrimVal, EvalResult, Pointer, MemoryPointer}; +use super::{EvalContext, Machine, ValTy}; +use interpret::memory::HasMemory; + +#[derive(Copy, Clone, Debug)] +pub enum Place { + /// An place referring to a value allocated in the `Memory` system. + Ptr { + /// An place may have an invalid (integral or undef) pointer, + /// since it might be turned back into a reference + /// before ever being dereferenced. + ptr: PtrAndAlign, + extra: PlaceExtra, + }, + + /// An place referring to a value on the stack. Represented by a stack frame index paired with + /// a Mir local index. + Local { frame: usize, local: mir::Local }, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum PlaceExtra { + None, + Length(u64), + Vtable(MemoryPointer), + DowncastVariant(usize), +} + +impl<'tcx> Place { + /// Produces an Place that will error if attempted to be read from + pub fn undef() -> Self { + Self::from_primval_ptr(PrimVal::Undef.into()) + } + + pub fn from_primval_ptr(ptr: Pointer) -> Self { + Place::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: PlaceExtra::None, + } + } + + pub fn from_ptr(ptr: MemoryPointer) -> Self { + Self::from_primval_ptr(ptr.into()) + } + + pub fn to_ptr_extra_aligned(self) -> (PtrAndAlign, PlaceExtra) { + match self { + Place::Ptr { ptr, extra } => (ptr, extra), + _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self), + + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + let (ptr, extra) = self.to_ptr_extra_aligned(); + // At this point, we forget about the alignment information -- the place has been turned into a reference, + // and no matter where it came from, it now must be aligned. + assert_eq!(extra, PlaceExtra::None); + ptr.to_ptr() + } + + pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) { + match ty.sty { + ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64), + + ty::TySlice(elem) => { + match self { + Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len), + _ => { + bug!( + "elem_ty_and_len of a TySlice given non-slice place: {:?}", + self + ) + } + } + } + + _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty), + } + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + /// Reads a value from the place without going through the intermediate step of obtaining + /// a `miri::Place` + pub fn try_read_place( + &mut self, + place: &mir::Place<'tcx>, + ) -> EvalResult<'tcx, Option> { + use rustc::mir::Place::*; + match *place { + // Might allow this in the future, right now there's no way to do this from Rust code anyway + Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer), + // Directly reading a local will always succeed + Local(local) => self.frame().get_local(local).map(Some), + // Directly reading a static will always succeed + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + Ok(Some(Value::ByRef( + self.tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached"), + ))) + } + Projection(ref proj) => self.try_read_place_projection(proj), + } + } + + fn try_read_place_projection( + &mut self, + proj: &mir::PlaceProjection<'tcx>, + ) -> EvalResult<'tcx, Option> { + use rustc::mir::ProjectionElem::*; + let base = match self.try_read_place(&proj.base)? { + Some(base) => base, + None => return Ok(None), + }; + let base_ty = self.place_ty(&proj.base); + match proj.elem { + Field(field, _) => { + let base_layout = self.layout_of(base_ty)?; + let field_index = field.index(); + let field = base_layout.field(&self, field_index)?; + let offset = base_layout.fields.offset(field_index); + match base { + // the field covers the entire type + Value::ByValPair(..) | + Value::ByVal(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some(base)), + // split fat pointers, 2 element tuples, ... + Value::ByValPair(a, b) if base_layout.fields.count() == 2 => { + let val = [a, b][field_index]; + Ok(Some(Value::ByVal(val))) + }, + _ => Ok(None), + } + }, + // The NullablePointer cases should work fine, need to take care for normal enums + Downcast(..) | + Subslice { .. } | + // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized + ConstantIndex { .. } | Index(_) | + // No way to optimize this projection any better than the normal place path + Deref => Ok(None), + } + } + + /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. + pub(super) fn eval_and_read_place( + &mut self, + place: &mir::Place<'tcx>, + ) -> EvalResult<'tcx, Value> { + // Shortcut for things like accessing a fat pointer's field, + // which would otherwise (in the `eval_place` path) require moving a `ByValPair` to memory + // and returning an `Place::Ptr` to it + if let Some(val) = self.try_read_place(place)? { + return Ok(val); + } + let place = self.eval_place(place)?; + self.read_place(place) + } + + pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> { + match place { + Place::Ptr { ptr, extra } => { + assert_eq!(extra, PlaceExtra::None); + Ok(Value::ByRef(ptr)) + } + Place::Local { frame, local } => self.stack[frame].get_local(local), + } + } + + pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> { + use rustc::mir::Place::*; + let place = match *mir_place { + Local(mir::RETURN_PLACE) => self.frame().return_place, + Local(local) => Place::Local { + frame: self.cur_frame(), + local, + }, + + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + let gid = GlobalId { + instance, + promoted: None, + }; + Place::Ptr { + ptr: self.tcx.interpret_interner.borrow().get_cached(gid).expect("uncached global"), + extra: PlaceExtra::None, + } + } + + Projection(ref proj) => { + let ty = self.place_ty(&proj.base); + let place = self.eval_place(&proj.base)?; + return self.eval_place_projection(place, ty, &proj.elem); + } + }; + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(place); + } + + Ok(place) + } + + pub fn place_field( + &mut self, + base: Place, + field: mir::Field, + mut base_layout: TyLayout<'tcx>, + ) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> { + match base { + Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => { + base_layout = base_layout.for_variant(&self, variant_index); + } + _ => {} + } + let field_index = field.index(); + let field = base_layout.field(&self, field_index)?; + let offset = base_layout.fields.offset(field_index); + + // Do not allocate in trivial cases + let (base_ptr, base_extra) = match base { + Place::Ptr { ptr, extra } => (ptr, extra), + Place::Local { frame, local } => { + match self.stack[frame].get_local(local)? { + // in case the field covers the entire type, just return the value + Value::ByVal(_) if offset.bytes() == 0 && + field.size == base_layout.size => { + return Ok((base, field)); + } + Value::ByRef { .. } | + Value::ByValPair(..) | + Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), + } + } + }; + + let offset = match base_extra { + PlaceExtra::Vtable(tab) => { + let (_, align) = self.size_and_align_of_dst( + base_layout.ty, + base_ptr.ptr.to_value_with_vtable(tab), + )?; + offset.abi_align(align).bytes() + } + _ => offset.bytes(), + }; + + let mut ptr = base_ptr.offset(offset, &self)?; + // if we were unaligned, stay unaligned + // no matter what we were, if we are packed, we must not be aligned anymore + ptr.aligned &= !base_layout.is_packed(); + + let extra = if !field.is_unsized() { + PlaceExtra::None + } else { + match base_extra { + PlaceExtra::None => bug!("expected fat pointer"), + PlaceExtra::DowncastVariant(..) => { + bug!("Rust doesn't support unsized fields in enum variants") + } + PlaceExtra::Vtable(_) | + PlaceExtra::Length(_) => {} + } + base_extra + }; + + Ok((Place::Ptr { ptr, extra }, field)) + } + + pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> { + Ok(match self.tcx.struct_tail(ty).sty { + ty::TyDynamic(..) => { + let (ptr, vtable) = self.into_ptr_vtable_pair(val)?; + Place::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: PlaceExtra::Vtable(vtable), + } + } + ty::TyStr | ty::TySlice(_) => { + let (ptr, len) = self.into_slice(val)?; + Place::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: PlaceExtra::Length(len), + } + } + _ => Place::from_primval_ptr(self.into_ptr(val)?), + }) + } + + pub fn place_index( + &mut self, + base: Place, + outer_ty: Ty<'tcx>, + n: u64, + ) -> EvalResult<'tcx, Place> { + // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length. + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, len) = base.elem_ty_and_len(outer_ty); + let elem_size = self.layout_of(elem_ty)?.size.bytes(); + assert!( + n < len, + "Tried to access element {} of array/slice with length {}", + n, + len + ); + let ptr = base_ptr.offset(n * elem_size, &*self)?; + Ok(Place::Ptr { + ptr, + extra: PlaceExtra::None, + }) + } + + pub(super) fn place_downcast( + &mut self, + base: Place, + variant: usize, + ) -> EvalResult<'tcx, Place> { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (ptr, _) = base.to_ptr_extra_aligned(); + let extra = PlaceExtra::DowncastVariant(variant); + Ok(Place::Ptr { ptr, extra }) + } + + pub fn eval_place_projection( + &mut self, + base: Place, + base_ty: Ty<'tcx>, + proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, + ) -> EvalResult<'tcx, Place> { + use rustc::mir::ProjectionElem::*; + let (ptr, extra) = match *proj_elem { + Field(field, _) => { + let layout = self.layout_of(base_ty)?; + return Ok(self.place_field(base, field, layout)?.0); + } + + Downcast(_, variant) => { + return self.place_downcast(base, variant); + } + + Deref => { + let val = self.read_place(base)?; + + let pointee_type = match base_ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => tam.ty, + ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(), + _ => bug!("can only deref pointer types"), + }; + + trace!("deref to {} on {:?}", pointee_type, val); + + return self.val_to_place(val, pointee_type); + } + + Index(local) => { + let value = self.frame().get_local(local)?; + let ty = self.tcx.types.usize; + let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?; + return self.place_index(base, base_ty, n); + } + + ConstantIndex { + offset, + min_length, + from_end, + } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.layout_of(elem_ty)?.size.bytes(); + assert!(n >= min_length as u64); + + let index = if from_end { + n - u64::from(offset) + } else { + u64::from(offset) + }; + + let ptr = base_ptr.offset(index * elem_size, &self)?; + (ptr, PlaceExtra::None) + } + + Subslice { from, to } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.layout_of(elem_ty)?.size.bytes(); + assert!(u64::from(from) <= n - u64::from(to)); + let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?; + // sublicing arrays produces arrays + let extra = if self.type_is_sized(base_ty) { + PlaceExtra::None + } else { + PlaceExtra::Length(n - u64::from(to) - u64::from(from)) + }; + (ptr, extra) + } + }; + + Ok(Place::Ptr { ptr, extra }) + } + + pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { + self.monomorphize( + place.ty(self.mir(), self.tcx).to_ty(self.tcx), + self.substs(), + ) + } +} diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs new file mode 100644 index 0000000000000..352e151e3a195 --- /dev/null +++ b/src/librustc_mir/interpret/step.rs @@ -0,0 +1,349 @@ +//! This module contains the `EvalContext` methods for executing a single step of the interpreter. +//! +//! The main entry point is the `step` method. + +use rustc::hir; +use rustc::mir::visit::{Visitor, PlaceContext}; +use rustc::mir; +use rustc::ty::{self, Instance}; +use rustc::ty::layout::LayoutOf; +use rustc::middle::const_val::ConstVal; +use rustc::mir::interpret::{PtrAndAlign, GlobalId}; + +use rustc::mir::interpret::{EvalResult, EvalErrorKind}; +use super::{EvalContext, StackPopCleanup, Place, Machine}; + +use syntax::codemap::Span; +use syntax::ast::Mutability; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> { + self.steps_remaining = self.steps_remaining.saturating_sub(n); + if self.steps_remaining > 0 { + Ok(()) + } else { + err!(ExecutionTimeLimitReached) + } + } + + /// Returns true as long as there are more things to do. + pub fn step(&mut self) -> EvalResult<'tcx, bool> { + self.inc_step_counter_and_check_limit(1)?; + if self.stack.is_empty() { + return Ok(false); + } + + let block = self.frame().block; + let stmt_id = self.frame().stmt; + let mir = self.mir(); + let basic_block = &mir.basic_blocks()[block]; + + let old_frames = self.cur_frame(); + + if let Some(stmt) = basic_block.statements.get(stmt_id) { + let mut new = Ok(false); + ConstantExtractor { + span: stmt.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constant: &mut new, + }.visit_statement( + block, + stmt, + mir::Location { + block, + statement_index: stmt_id, + }, + ); + // if ConstantExtractor added a new frame, we don't execute anything here + // but await the next call to step + if !new? { + assert_eq!(old_frames, self.cur_frame()); + self.statement(stmt)?; + } + return Ok(true); + } + + let terminator = basic_block.terminator(); + let mut new = Ok(false); + ConstantExtractor { + span: terminator.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constant: &mut new, + }.visit_terminator( + block, + terminator, + mir::Location { + block, + statement_index: stmt_id, + }, + ); + // if ConstantExtractor added a new frame, we don't execute anything here + // but await the next call to step + if !new? { + assert_eq!(old_frames, self.cur_frame()); + self.terminator(terminator)?; + } + Ok(true) + } + + fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", stmt); + + use rustc::mir::StatementKind::*; + + // Some statements (e.g. box) push new stack frames. We have to record the stack frame number + // *before* executing the statement. + let frame_idx = self.cur_frame(); + + match stmt.kind { + Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?, + + SetDiscriminant { + ref place, + variant_index, + } => { + let dest = self.eval_place(place)?; + let dest_ty = self.place_ty(place); + self.write_discriminant_value(dest_ty, dest, variant_index)?; + } + + // Mark locals as alive + StorageLive(local) => { + let old_val = self.frame_mut().storage_live(local)?; + self.deallocate_local(old_val)?; + } + + // Mark locals as dead + StorageDead(local) => { + let old_val = self.frame_mut().storage_dead(local)?; + self.deallocate_local(old_val)?; + } + + // Validity checks. + Validate(op, ref places) => { + for operand in places { + M::validation_op(self, op, operand)?; + } + } + EndRegion(ce) => { + M::end_region(self, Some(ce))?; + } + + // Defined to do nothing. These are added by optimization passes, to avoid changing the + // size of MIR constantly. + Nop => {} + + InlineAsm { .. } => return err!(InlineAsm), + } + + self.stack[frame_idx].stmt += 1; + Ok(()) + } + + fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", terminator.kind); + self.eval_terminator(terminator)?; + if !self.stack.is_empty() { + trace!("// {:?}", self.frame().block); + } + Ok(()) + } + + /// returns `true` if a stackframe was pushed + fn global_item( + &mut self, + instance: Instance<'tcx>, + span: Span, + mutability: Mutability, + ) -> EvalResult<'tcx, bool> { + debug!("global_item: {:?}", instance); + let cid = GlobalId { + instance, + promoted: None, + }; + if self.tcx.interpret_interner.borrow().get_cached(cid).is_some() { + return Ok(false); + } + if self.tcx.has_attr(instance.def_id(), "linkage") { + M::global_item_with_linkage(self, cid.instance, mutability)?; + return Ok(false); + } + // FIXME(eddyb) use `Instance::ty` when it becomes available. + let instance_ty = + self.monomorphize(instance.def.def_ty(self.tcx), instance.substs); + let layout = self.layout_of(instance_ty)?; + assert!(!layout.is_unsized()); + let ptr = self.memory.allocate( + layout.size.bytes(), + layout.align.abi(), + None, + )?; + self.tcx.interpret_interner.borrow_mut().cache( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned: !layout.is_packed(), + }, + ); + let internally_mutable = !layout.ty.is_freeze(self.tcx, self.param_env, span); + let mutability = if mutability == Mutability::Mutable || internally_mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id())); + trace!("pushing stack frame for global: {}", name); + let mir = self.load_mir(instance.def)?; + self.push_stack_frame( + instance, + span, + mir, + Place::from_ptr(ptr), + cleanup, + )?; + Ok(true) + } +} + +struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> { + span: Span, + ecx: &'a mut EvalContext<'b, 'tcx, M>, + mir: &'tcx mir::Mir<'tcx>, + instance: ty::Instance<'tcx>, + // Whether a stackframe for a new constant has been pushed + new_constant: &'a mut EvalResult<'tcx, bool>, +} + +impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> { + fn try EvalResult<'tcx, bool>>(&mut self, f: F) { + match *self.new_constant { + // already computed a constant, don't do more than one per iteration + Ok(true) => {}, + // no constants computed yet + Ok(false) => *self.new_constant = f(self), + // error happened, abort the visitor traversing + Err(_) => {}, + } + } +} + +impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> { + fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) { + self.super_constant(constant, location); + self.try(|this| { + match constant.literal { + // already computed by rustc + mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => { + debug!("global_item: {:?}, {:#?}", def_id, substs); + let substs = this.ecx.tcx.trans_apply_param_substs(this.instance.substs, &substs); + debug!("global_item_new_substs: {:#?}", substs); + debug!("global_item_param_env: {:#?}", this.ecx.param_env); + let instance = Instance::resolve( + this.ecx.tcx, + this.ecx.param_env, + def_id, + substs, + ).ok_or(EvalErrorKind::TypeckError)?; // turn error prop into a panic to expose associated type in const issue + this.ecx.global_item( + instance, + constant.span, + Mutability::Immutable, + ) + } + mir::Literal::Value { .. } => Ok(false), + mir::Literal::Promoted { index } => { + let cid = GlobalId { + instance: this.instance, + promoted: Some(index), + }; + if this.ecx.tcx.interpret_interner.borrow().get_cached(cid).is_some() { + return Ok(false); + } + let mir = &this.mir.promoted[index]; + let ty = this.ecx.monomorphize(mir.return_ty(), this.instance.substs); + let layout = this.ecx.layout_of(ty)?; + assert!(!layout.is_unsized()); + let ptr = this.ecx.memory.allocate( + layout.size.bytes(), + layout.align.abi(), + None, + )?; + this.ecx.tcx.interpret_interner.borrow_mut().cache( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned: !layout.is_packed(), + }, + ); + trace!("pushing stack frame for {:?}", index); + this.ecx.push_stack_frame( + this.instance, + constant.span, + mir, + Place::from_ptr(ptr), + StackPopCleanup::MarkStatic(Mutability::Immutable), + )?; + Ok(true) + } + } + }); + } + + fn visit_place( + &mut self, + place: &mir::Place<'tcx>, + context: PlaceContext<'tcx>, + location: mir::Location, + ) { + self.super_place(place, context, location); + self.try(|this| { + if let mir::Place::Static(ref static_) = *place { + let def_id = static_.def_id; + let span = this.span; + if let Some(node_item) = this.ecx.tcx.hir.get_if_local(def_id) { + if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item { + if let hir::ItemStatic(_, m, _) = *node { + let instance = Instance::mono(this.ecx.tcx, def_id); + this.ecx.global_item( + instance, + span, + if m == hir::MutMutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + } else { + bug!("static def id doesn't point to static"); + } + } else { + bug!("static def id doesn't point to item"); + } + } else { + let def = this.ecx.tcx.describe_def(def_id).expect("static not found"); + if let hir::def::Def::Static(_, mutable) = def { + let instance = Instance::mono(this.ecx.tcx, def_id); + this.ecx.global_item( + instance, + span, + if mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + } else { + bug!("static found but isn't a static: {:?}", def); + } + } + } else { + Ok(false) + } + }); + } +} diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs new file mode 100644 index 0000000000000..5db46149834d2 --- /dev/null +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -0,0 +1,83 @@ +use rustc::mir::BasicBlock; +use rustc::ty::{self, Ty}; +use syntax::codemap::Span; + +use rustc::mir::interpret::{EvalResult, PrimVal, Value}; +use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub(crate) fn drop_place( + &mut self, + place: Place, + instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + target: BasicBlock, + ) -> EvalResult<'tcx> { + trace!("drop_place: {:#?}", place); + // We take the address of the object. This may well be unaligned, which is fine for us here. + // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared + // by rustc. + let val = match self.force_allocation(place)? { + Place::Ptr { + ptr, + extra: PlaceExtra::Vtable(vtable), + } => ptr.ptr.to_value_with_vtable(vtable), + Place::Ptr { + ptr, + extra: PlaceExtra::Length(len), + } => ptr.ptr.to_value_with_len(len), + Place::Ptr { + ptr, + extra: PlaceExtra::None, + } => ptr.ptr.to_value(), + _ => bug!("force_allocation broken"), + }; + self.drop(val, instance, ty, span, target) + } + + fn drop( + &mut self, + arg: Value, + instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + target: BasicBlock, + ) -> EvalResult<'tcx> { + trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def); + + let instance = match ty.sty { + ty::TyDynamic(..) => { + let vtable = match arg { + Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable, + _ => bug!("expected fat ptr, got {:?}", arg), + }; + match self.read_drop_type_from_vtable(vtable)? { + Some(func) => func, + // no drop fn -> bail out + None => { + self.goto_block(target); + return Ok(()) + }, + } + } + _ => instance, + }; + + // the drop function expects a reference to the value + let valty = ValTy { + value: arg, + ty: self.tcx.mk_mut_ptr(ty), + }; + + let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone(); + + self.eval_fn_call( + instance, + Some((Place::undef(), target)), + &vec![valty], + span, + fn_sig, + ) + } +} diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs new file mode 100644 index 0000000000000..1cdfe1ff9ceac --- /dev/null +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -0,0 +1,420 @@ +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::LayoutOf; +use syntax::codemap::Span; +use syntax::abi::Abi; + +use rustc::mir::interpret::{PtrAndAlign, EvalResult, PrimVal, Value}; +use super::{EvalContext, eval_context, + Place, Machine, ValTy}; + +use rustc_data_structures::indexed_vec::Idx; +use interpret::memory::HasMemory; + +mod drop; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn goto_block(&mut self, target: mir::BasicBlock) { + self.frame_mut().block = target; + self.frame_mut().stmt = 0; + } + + pub(super) fn eval_terminator( + &mut self, + terminator: &mir::Terminator<'tcx>, + ) -> EvalResult<'tcx> { + use rustc::mir::TerminatorKind::*; + match terminator.kind { + Return => { + self.dump_local(self.frame().return_place); + self.pop_stack_frame()? + } + + Goto { target } => self.goto_block(target), + + SwitchInt { + ref discr, + ref values, + ref targets, + .. + } => { + // FIXME(CTFE): forbid branching + let discr_val = self.eval_operand(discr)?; + let discr_prim = self.value_to_primval(discr_val)?; + + // Branch to the `otherwise` case by default, if no match is found. + let mut target_block = targets[targets.len() - 1]; + + for (index, const_int) in values.iter().enumerate() { + let prim = PrimVal::Bytes(const_int.to_u128_unchecked()); + if discr_prim.to_bytes()? == prim.to_bytes()? { + target_block = targets[index]; + break; + } + } + + self.goto_block(target_block); + } + + Call { + ref func, + ref args, + ref destination, + .. + } => { + let destination = match *destination { + Some((ref lv, target)) => Some((self.eval_place(lv)?, target)), + None => None, + }; + + let func = self.eval_operand(func)?; + let (fn_def, sig) = match func.ty.sty { + ty::TyFnPtr(sig) => { + let fn_ptr = self.value_to_primval(func)?.to_ptr()?; + let instance = self.memory.get_fn(fn_ptr)?; + // FIXME(eddyb) use `Instance::ty` when it becomes available. + let instance_ty = + self.monomorphize(instance.def.def_ty(self.tcx), instance.substs); + match instance_ty.sty { + ty::TyFnDef(..) => { + let real_sig = instance_ty.fn_sig(self.tcx); + let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig); + let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig); + if !self.check_sig_compat(sig, real_sig)? { + return err!(FunctionPointerTyMismatch(real_sig, sig)); + } + } + ref other => bug!("instance def ty: {:?}", other), + } + (instance, sig) + } + ty::TyFnDef(def_id, substs) => ( + self.resolve(def_id, substs)?, + func.ty.fn_sig(self.tcx), + ), + _ => { + let msg = format!("can't handle callee of type {:?}", func.ty); + return err!(Unimplemented(msg)); + } + }; + let args = self.operands_to_args(args)?; + let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig); + self.eval_fn_call( + fn_def, + destination, + &args, + terminator.source_info.span, + sig, + )?; + } + + Drop { + ref location, + target, + .. + } => { + // FIXME(CTFE): forbid drop in const eval + let place = self.eval_place(location)?; + let ty = self.place_ty(location); + let ty = self.tcx.trans_apply_param_substs(self.substs(), &ty); + trace!("TerminatorKind::drop: {:?}, type {}", location, ty); + + let instance = eval_context::resolve_drop_in_place(self.tcx, ty); + self.drop_place( + place, + instance, + ty, + terminator.source_info.span, + target, + )?; + } + + Assert { + ref cond, + expected, + ref msg, + target, + .. + } => { + let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?; + if expected == cond_val { + self.goto_block(target); + } else { + use rustc::mir::AssertMessage::*; + return match *msg { + BoundsCheck { ref len, ref index } => { + let span = terminator.source_info.span; + let len = self.eval_operand_to_primval(len) + .expect("can't eval len") + .to_u64()?; + let index = self.eval_operand_to_primval(index) + .expect("can't eval index") + .to_u64()?; + err!(ArrayIndexOutOfBounds(span, len, index)) + } + Math(ref err) => { + err!(Math(terminator.source_info.span, err.clone())) + } + GeneratorResumedAfterReturn | + GeneratorResumedAfterPanic => unimplemented!(), + }; + } + } + + Yield { .. } => unimplemented!("{:#?}", terminator.kind), + GeneratorDrop => unimplemented!(), + DropAndReplace { .. } => unimplemented!(), + Resume => unimplemented!(), + FalseEdges { .. } => bug!("should have been eliminated by `simplify_branches` mir pass"), + Unreachable => return err!(Unreachable), + } + + Ok(()) + } + + /// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`. + /// FIXME: This should take into account the platform-dependent ABI description. + fn check_sig_compat( + &mut self, + sig: ty::FnSig<'tcx>, + real_sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool { + if ty == real_ty { + return true; + } // This is actually a fast pointer comparison + return match (&ty.sty, &real_ty.sty) { + // Permit changing the pointer type of raw pointers and references as well as + // mutability of raw pointers. + // TODO: Should not be allowed when fat pointers are involved. + (&ty::TyRawPtr(_), &ty::TyRawPtr(_)) => true, + (&ty::TyRef(_, _), &ty::TyRef(_, _)) => { + ty.is_mutable_pointer() == real_ty.is_mutable_pointer() + } + // rule out everything else + _ => false, + }; + } + + if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic && + sig.inputs_and_output.len() == real_sig.inputs_and_output.len() && + sig.inputs_and_output + .iter() + .zip(real_sig.inputs_and_output) + .all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) + { + // Definitely good. + return Ok(true); + } + + if sig.variadic || real_sig.variadic { + // We're not touching this + return Ok(false); + } + + // We need to allow what comes up when a non-capturing closure is cast to a fn(). + match (sig.abi, real_sig.abi) { + (Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric. + if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => { + // First argument of real_sig must be a ZST + let fst_ty = real_sig.inputs_and_output[0]; + if self.layout_of(fst_ty)?.is_zst() { + // Second argument must be a tuple matching the argument list of sig + let snd_ty = real_sig.inputs_and_output[1]; + match snd_ty.sty { + ty::TyTuple(tys, _) if sig.inputs().len() == tys.len() => + if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { + return Ok(true) + }, + _ => {} + } + } + } + _ => {} + }; + + // Nope, this doesn't work. + return Ok(false); + } + + fn eval_fn_call( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Place, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx> { + trace!("eval_fn_call: {:#?}", instance); + match instance.def { + ty::InstanceDef::Intrinsic(..) => { + let (ret, target) = match destination { + Some(dest) => dest, + _ => return err!(Unreachable), + }; + let ty = sig.output(); + let layout = self.layout_of(ty)?; + M::call_intrinsic(self, instance, args, ret, layout, target)?; + self.dump_local(ret); + Ok(()) + } + // FIXME: figure out why we can't just go through the shim + ty::InstanceDef::ClosureOnceShim { .. } => { + if M::eval_fn_call(self, instance, destination, args, span, sig)? { + return Ok(()); + } + let mut arg_locals = self.frame().mir.args_iter(); + match sig.abi { + // closure as closure once + Abi::RustCall => { + for (arg_local, &valty) in arg_locals.zip(args) { + let dest = self.eval_place(&mir::Place::Local(arg_local))?; + self.write_value(valty, dest)?; + } + } + // non capture closure as fn ptr + // need to inject zst ptr for closure object (aka do nothing) + // and need to pack arguments + Abi::Rust => { + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); + trace!("args: {:?}", args); + let local = arg_locals.nth(1).unwrap(); + for (i, &valty) in args.into_iter().enumerate() { + let dest = self.eval_place(&mir::Place::Local(local).field( + mir::Field::new(i), + valty.ty, + ))?; + self.write_value(valty, dest)?; + } + } + _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), + } + Ok(()) + } + ty::InstanceDef::FnPtrShim(..) | + ty::InstanceDef::DropGlue(..) | + ty::InstanceDef::CloneShim(..) | + ty::InstanceDef::Item(_) => { + // Push the stack frame, and potentially be entirely done if the call got hooked + if M::eval_fn_call(self, instance, destination, args, span, sig)? { + return Ok(()); + } + + // Pass the arguments + let mut arg_locals = self.frame().mir.args_iter(); + trace!("ABI: {:?}", sig.abi); + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); + trace!("args: {:?}", args); + match sig.abi { + Abi::RustCall => { + assert_eq!(args.len(), 2); + + { + // write first argument + let first_local = arg_locals.next().unwrap(); + let dest = self.eval_place(&mir::Place::Local(first_local))?; + self.write_value(args[0], dest)?; + } + + // unpack and write all other args + let layout = self.layout_of(args[1].ty)?; + if let ty::TyTuple(..) = args[1].ty.sty { + if self.frame().mir.args_iter().count() == layout.fields.count() + 1 { + match args[1].value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + assert!( + aligned, + "Unaligned ByRef-values cannot occur as function arguments" + ); + for (i, arg_local) in arg_locals.enumerate() { + let field = layout.field(&self, i)?; + let offset = layout.fields.offset(i).bytes(); + let arg = Value::by_ref(ptr.offset(offset, &self)?); + let dest = + self.eval_place(&mir::Place::Local(arg_local))?; + trace!( + "writing arg {:?} to {:?} (type: {})", + arg, + dest, + field.ty + ); + let valty = ValTy { + value: arg, + ty: field.ty, + }; + self.write_value(valty, dest)?; + } + } + Value::ByVal(PrimVal::Undef) => {} + other => { + trace!("{:#?}, {:#?}", other, layout); + let mut layout = layout; + 'outer: loop { + for i in 0..layout.fields.count() { + let field = layout.field(&self, i)?; + if layout.fields.offset(i).bytes() == 0 && layout.size == field.size { + layout = field; + continue 'outer; + } + } + break; + } + let dest = self.eval_place(&mir::Place::Local( + arg_locals.next().unwrap(), + ))?; + let valty = ValTy { + value: other, + ty: layout.ty, + }; + self.write_value(valty, dest)?; + } + } + } else { + trace!("manual impl of rust-call ABI"); + // called a manual impl of a rust-call function + let dest = self.eval_place( + &mir::Place::Local(arg_locals.next().unwrap()), + )?; + self.write_value(args[1], dest)?; + } + } else { + bug!( + "rust-call ABI tuple argument was {:#?}, {:#?}", + args[1].ty, + layout + ); + } + } + _ => { + for (arg_local, &valty) in arg_locals.zip(args) { + let dest = self.eval_place(&mir::Place::Local(arg_local))?; + self.write_value(valty, dest)?; + } + } + } + Ok(()) + } + // cannot use the shim here, because that will only result in infinite recursion + ty::InstanceDef::Virtual(_, idx) => { + let ptr_size = self.memory.pointer_size(); + let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; + let fn_ptr = self.memory.read_ptr_sized_unsigned( + vtable.offset(ptr_size * (idx as u64 + 3), &self)? + )?.to_ptr()?; + let instance = self.memory.get_fn(fn_ptr)?; + let mut args = args.to_vec(); + let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; + args[0].ty = ty; + args[0].value = ptr.to_value(); + // recurse with concrete function + self.eval_fn_call(instance, destination, &args, span, sig) + } + } + } +} diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs new file mode 100644 index 0000000000000..c73b95c717c3d --- /dev/null +++ b/src/librustc_mir/interpret/traits.rs @@ -0,0 +1,86 @@ +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{Size, Align, LayoutOf}; +use syntax::ast::Mutability; + +use rustc::mir::interpret::{PrimVal, Value, MemoryPointer, EvalResult}; +use super::{EvalContext, eval_context, + Machine}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + /// Creates a dynamic vtable for the given type and vtable origin. This is used only for + /// objects. + /// + /// The `trait_ref` encodes the erased self type. Hence if we are + /// making an object `Foo` from a value of type `Foo`, then + /// `trait_ref` would map `T:Trait`. + pub fn get_vtable( + &mut self, + ty: Ty<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, + ) -> EvalResult<'tcx, MemoryPointer> { + debug!("get_vtable(trait_ref={:?})", trait_ref); + + let layout = self.layout_of(trait_ref.self_ty())?; + assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); + let size = layout.size.bytes(); + let align = layout.align.abi(); + + let ptr_size = self.memory.pointer_size(); + let methods = self.tcx.vtable_methods(trait_ref); + let vtable = self.memory.allocate( + ptr_size * (3 + methods.len() as u64), + ptr_size, + None, + )?; + + let drop = eval_context::resolve_drop_in_place(self.tcx, ty); + let drop = self.memory.create_fn_alloc(drop); + self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?; + + let size_ptr = vtable.offset(ptr_size, &self)?; + self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?; + let align_ptr = vtable.offset(ptr_size * 2, &self)?; + self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?; + + for (i, method) in methods.iter().enumerate() { + if let Some((def_id, substs)) = *method { + let instance = self.resolve(def_id, substs)?; + let fn_ptr = self.memory.create_fn_alloc(instance); + let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; + self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?; + } + } + + self.memory.mark_static_initalized( + vtable.alloc_id, + Mutability::Mutable, + )?; + + Ok(vtable) + } + + pub fn read_drop_type_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, Option>> { + // we don't care about the pointee type, we just want a pointer + match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? { + // some values don't need to call a drop impl, so the value is null + Value::ByVal(PrimVal::Bytes(0)) => Ok(None), + Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + _ => err!(ReadBytesAsPointer), + } + } + + pub fn read_size_and_align_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, (Size, Align)> { + let pointer_size = self.memory.pointer_size(); + let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64; + let align = self.memory.read_ptr_sized_unsigned( + vtable.offset(pointer_size * 2, self)? + )?.to_bytes()? as u64; + Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) + } +} diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 53f9b885ac6c6..e7dd94f75e5b4 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -25,7 +25,10 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(decl_macro)] #![feature(i128_type)] #![feature(inclusive_range_syntax)] +#![feature(inclusive_range)] +#![feature(macro_vis_matcher)] #![feature(match_default_bindings)] +#![feature(never_type)] #![feature(range_contains)] #![feature(rustc_diagnostic_macros)] #![feature(placement_in_syntax)] @@ -48,6 +51,9 @@ extern crate syntax_pos; extern crate rustc_const_math; extern crate rustc_const_eval; extern crate core; // for NonZero +extern crate log_settings; +extern crate rustc_apfloat; +extern crate byteorder; mod diagnostics; @@ -58,6 +64,7 @@ mod hair; mod shim; pub mod transform; pub mod util; +pub mod interpret; use rustc::ty::maps::Providers; @@ -65,6 +72,7 @@ pub fn provide(providers: &mut Providers) { borrow_check::provide(providers); shim::provide(providers); transform::provide(providers); + providers.const_eval = interpret::const_eval_provider; } __build_diagnostic_array! { librustc_mir, DIAGNOSTICS } diff --git a/src/libstd/sys/redox/fast_thread_local.rs b/src/libstd/sys/redox/fast_thread_local.rs index 9f0eee024d56f..6a007e98827b6 100644 --- a/src/libstd/sys/redox/fast_thread_local.rs +++ b/src/libstd/sys/redox/fast_thread_local.rs @@ -81,7 +81,7 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { unsafe extern fn run_dtors(mut ptr: *mut u8) { while !ptr.is_null() { let list: Box = Box::from_raw(ptr as *mut List); - for &(ptr, dtor) in list.iter() { + for (ptr, dtor) in list.into_iter() { dtor(ptr); } ptr = DTORS.get(); diff --git a/src/libstd/sys_common/thread_local.rs b/src/libstd/sys_common/thread_local.rs index 87ffd304e1a33..a4aa3d96d25c0 100644 --- a/src/libstd/sys_common/thread_local.rs +++ b/src/libstd/sys_common/thread_local.rs @@ -262,7 +262,7 @@ pub unsafe fn register_dtor_fallback(t: *mut u8, unsafe extern fn run_dtors(mut ptr: *mut u8) { while !ptr.is_null() { let list: Box = Box::from_raw(ptr as *mut List); - for &(ptr, dtor) in list.iter() { + for (ptr, dtor) in list.into_iter() { dtor(ptr); } ptr = DTORS.get(); diff --git a/src/test/compile-fail/E0080.rs b/src/test/compile-fail/E0080.rs index 0329209d44bc7..2f199c48e46e7 100644 --- a/src/test/compile-fail/E0080.rs +++ b/src/test/compile-fail/E0080.rs @@ -10,7 +10,9 @@ enum Enum { X = (1 << 500), //~ ERROR E0080 + //~| WARNING shift left with overflow Y = (1 / 0) //~ ERROR E0080 + //~| WARNING divide by zero } fn main() { diff --git a/src/test/compile-fail/const-eval-overflow-4.rs b/src/test/compile-fail/const-eval-overflow-4.rs index 06b7d0206b128..2b1c1017b5b00 100644 --- a/src/test/compile-fail/const-eval-overflow-4.rs +++ b/src/test/compile-fail/const-eval-overflow-4.rs @@ -23,6 +23,8 @@ const A_I8_T : [u32; (i8::MAX as i8 + 1i8) as usize] //~^ ERROR constant evaluation error //~^^ NOTE attempt to add with overflow + //~| WARNING constant evaluation error + //~| NOTE on by default = [0; (i8::MAX as usize) + 1]; fn main() { diff --git a/src/test/compile-fail/const-fn-error.rs b/src/test/compile-fail/const-fn-error.rs index 385daef44dfe5..baf836b4dad1b 100644 --- a/src/test/compile-fail/const-fn-error.rs +++ b/src/test/compile-fail/const-fn-error.rs @@ -13,8 +13,9 @@ const X : usize = 2; const fn f(x: usize) -> usize { - let mut sum = 0; - for i in 0..x { + let mut sum = 0; //~ ERROR blocks in constant functions are limited + for i in 0..x { //~ ERROR calls in constant functions + //~| ERROR constant function contains unimplemented sum += i; } sum //~ ERROR E0080 @@ -24,4 +25,6 @@ const fn f(x: usize) -> usize { #[allow(unused_variables)] fn main() { let a : [i32; f(X)]; //~ NOTE for constant expression here + //~| WARNING constant evaluation error: non-constant path + //~| on by default } diff --git a/src/test/compile-fail/const-len-underflow-separate-spans.rs b/src/test/compile-fail/const-len-underflow-separate-spans.rs index 3c84810554214..eaad9e7e92bab 100644 --- a/src/test/compile-fail/const-len-underflow-separate-spans.rs +++ b/src/test/compile-fail/const-len-underflow-separate-spans.rs @@ -17,6 +17,8 @@ const TWO: usize = 2; const LEN: usize = ONE - TWO; //~^ ERROR E0080 //~| attempt to subtract with overflow +//~| NOTE attempt to subtract with overflow +//~| NOTE on by default fn main() { let a: [i8; LEN] = unimplemented!(); diff --git a/src/test/compile-fail/union/union-const-eval.rs b/src/test/compile-fail/union/union-const-eval.rs index ee4d9fe99eeb8..73b7743fc45c7 100644 --- a/src/test/compile-fail/union/union-const-eval.rs +++ b/src/test/compile-fail/union/union-const-eval.rs @@ -20,5 +20,7 @@ fn main() { let a: [u8; C.a]; // OK let b: [u8; C.b]; //~ ERROR constant evaluation error //~^ NOTE nonexistent struct field + //~| WARNING constant evaluation error + //~| NOTE on by default } } diff --git a/src/tools/miri b/src/tools/miri index 6dbfe23c4d1af..bde093fa140cb 160000 --- a/src/tools/miri +++ b/src/tools/miri @@ -1 +1 @@ -Subproject commit 6dbfe23c4d1af109c894ff9d7d5da97c025584e5 +Subproject commit bde093fa140cbf95023482a94b92b0b16af4b521 diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs index bd49f288eb2eb..3fd844f326184 100644 --- a/src/tools/tidy/src/lib.rs +++ b/src/tools/tidy/src/lib.rs @@ -66,6 +66,8 @@ fn filter_dirs(path: &Path) -> bool { "src/tools/rust-installer", "src/tools/rustfmt", "src/tools/miri", + "src/librustc/mir/interpret", + "src/librustc_mir/interpret", ]; skip.iter().any(|p| path.ends_with(p)) } diff --git a/src/tools/toolstate.toml b/src/tools/toolstate.toml index 744a0f96ad734..08783cf145815 100644 --- a/src/tools/toolstate.toml +++ b/src/tools/toolstate.toml @@ -23,7 +23,7 @@ # Each tool has a list of people to ping # ping @oli-obk @RalfJung @eddyb -miri = "Broken" +miri = "Testing" # ping @Manishearth @llogiq @mcarton @oli-obk clippy = "Testing"