提交 f906c545 编写于 作者: O Oliver Schneider 提交者: GitHub

Merge pull request #272 from oli-obk/mir-validate

Mir validate
......@@ -6,8 +6,10 @@ dependencies = [
"cargo_metadata 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"compiletest_rs 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
......@@ -18,6 +20,14 @@ dependencies = [
"memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "aho-corasick"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byteorder"
version = "1.0.0"
......@@ -101,6 +111,14 @@ dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "memchr"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.1.37"
......@@ -123,11 +141,28 @@ dependencies = [
"utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex-syntax"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc-serialize"
version = "0.3.24"
......@@ -203,16 +238,43 @@ dependencies = [
"thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread_local"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "unicode-xid"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unreachable"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "utf8-ranges"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "utf8-ranges"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.2.8"
......@@ -225,6 +287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66"
"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699"
"checksum byteorder 1.0.0 (git+https://github.com/BurntSushi/byteorder)" = "<none>"
"checksum cargo_metadata 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5d84cb53c78e573aa126a4b9f963fdb2629f8183b26e235da08bb36dc7381162"
"checksum compiletest_rs 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "617b23d0ed4f57b3bcff6b5fe0a78f0010f1efb636298317665a960b6dbc0533"
......@@ -237,10 +300,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
"checksum num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "e1cbfa3781f3fe73dc05321bed52a06d2d491eaa764c52335cf4399f046ece99"
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f"
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
"checksum serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3b46a59dd63931010fdb1d88538513f3279090d88b5c22ef4fe8440cfffcc6e3"
"checksum serde_derive 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6c06b68790963518008b8ae0152d48be4bbbe77015d2c717f6282eea1824be9a"
......@@ -250,7 +316,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5"
"checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14"
"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc"
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
......@@ -32,6 +32,8 @@ env_logger = "0.3.3"
log = "0.3.6"
log_settings = "0.1.1"
cargo_metadata = "0.2"
regex = "0.2.2"
lazy_static = "0.2.8"
[dev-dependencies]
compiletest_rs = "0.2.6"
......@@ -2,7 +2,7 @@
use std::fmt;
use rustc::mir;
use rustc::ty::{FnSig, Ty, layout};
use memory::{MemoryPointer, Kind};
use memory::{MemoryPointer, LockInfo, AccessKind, Kind};
use rustc_const_math::ConstMathErr;
use syntax::codemap::Span;
......@@ -51,6 +51,30 @@ pub enum EvalError<'tcx> {
required: u64,
has: u64,
},
MemoryLockViolation {
ptr: MemoryPointer,
len: u64,
frame: usize,
access: AccessKind,
lock: LockInfo,
},
MemoryAcquireConflict {
ptr: MemoryPointer,
len: u64,
kind: AccessKind,
lock: LockInfo,
},
InvalidMemoryLockRelease {
ptr: MemoryPointer,
len: u64,
frame: usize,
lock: LockInfo,
},
DeallocatedLockedMemory {
ptr: MemoryPointer,
lock: LockInfo,
},
ValidationFailure(String),
CalledClosureAsFunction,
VtableForArgumentlessMethod,
ModifiedConstantMemory,
......@@ -97,6 +121,16 @@ fn description(&self) -> &str {
"pointer offset outside bounds of allocation",
InvalidNullPointerUsage =>
"invalid use of NULL pointer",
MemoryLockViolation { .. } =>
"memory access conflicts with lock",
MemoryAcquireConflict { .. } =>
"new memory lock conflicts with existing lock",
ValidationFailure(..) =>
"type validation failed",
InvalidMemoryLockRelease { .. } =>
"invalid attempt to release write lock",
DeallocatedLockedMemory { .. } =>
"tried to deallocate memory in conflict with a lock",
ReadPointerAsBytes =>
"a raw memory access tried to access part of a pointer value as raw bytes",
ReadBytesAsPointer =>
......@@ -196,6 +230,25 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if access { "memory access" } else { "pointer computed" },
ptr.offset, ptr.alloc_id, allocation_size)
},
MemoryLockViolation { ptr, len, frame, access, ref lock } => {
write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
access, frame, ptr, len, lock)
}
MemoryAcquireConflict { ptr, len, kind, ref lock } => {
write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
kind, ptr, len, lock)
}
InvalidMemoryLockRelease { ptr, len, frame, ref lock } => {
write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}",
frame, ptr, len, lock)
}
DeallocatedLockedMemory { ptr, ref lock } => {
write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
ptr, lock)
}
ValidationFailure(ref err) => {
write!(f, "type validation failed: {}", err)
}
NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
FunctionPointerTyMismatch(sig, got) =>
write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got),
......
......@@ -4,6 +4,7 @@
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::middle::const_val::ConstVal;
use rustc::middle::region::CodeExtent;
use rustc::mir;
use rustc::traits::Reveal;
use rustc::ty::layout::{self, Layout, Size};
......@@ -21,6 +22,7 @@
use memory::Kind as MemoryKind;
use operator;
use value::{PrimVal, PrimValKind, Value, Pointer};
use validation::ValidationQuery;
pub struct EvalContext<'a, 'tcx: 'a> {
/// The results of the type checker, from rustc.
......@@ -29,6 +31,11 @@ pub struct EvalContext<'a, 'tcx: 'a> {
/// The virtual memory system.
pub(crate) memory: Memory<'a, 'tcx>,
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
/// Lvalues that were suspended by the validation subsystem, and will be recovered later
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// Precomputed statics, constants and promoteds.
pub(crate) globals: HashMap<GlobalId<'tcx>, Global<'tcx>>,
......@@ -112,6 +119,12 @@ pub enum StackPopCleanup {
None,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DynamicLifetime {
pub frame: usize,
pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
}
#[derive(Copy, Clone, Debug)]
pub struct ResourceLimits {
pub memory_size: u64,
......@@ -134,6 +147,7 @@ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, limits: ResourceLimits) -> Self {
EvalContext {
tcx,
memory: Memory::new(&tcx.data_layout, limits.memory_size),
suspended: HashMap::new(),
globals: HashMap::new(),
stack: Vec::new(),
stack_limit: limits.stack_limit,
......@@ -169,6 +183,12 @@ pub fn stack(&self) -> &[Frame<'tcx>] {
&self.stack
}
#[inline]
pub fn cur_frame(&self) -> usize {
assert!(self.stack.len() > 0);
self.stack.len() - 1
}
/// Returns true if the current frame or any parent frame is part of a ctfe.
///
/// Used to disable features in const eval, which do not have a rfc enabling
......@@ -336,6 +356,9 @@ fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::
stmt: 0,
});
let cur_frame = self.cur_frame();
self.memory.set_cur_frame(cur_frame);
if self.stack.len() > self.stack_limit {
Err(EvalError::StackFrameLimitReached)
} else {
......@@ -345,7 +368,13 @@ fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::
pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
::log_settings::settings().indentation -= 1;
self.memory.locks_lifetime_ended(None);
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
if !self.stack.is_empty() {
// TODO: IS this the correct time to start considering these accesses as originating from the returned-to stack frame?
let cur_frame = self.cur_frame();
self.memory.set_cur_frame(cur_frame);
}
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => if let Lvalue::Global(id) = frame.return_lvalue {
let global_value = self.globals.get_mut(&id)
......@@ -1551,9 +1580,8 @@ pub(super) fn dump_local(&self, lvalue: Lvalue<'tcx>) {
if let Lvalue::Local { frame, local } = lvalue {
let mut allocs = Vec::new();
let mut msg = format!("{:?}", local);
let last_frame = self.stack.len() - 1;
if frame != last_frame {
write!(msg, " ({} frames up)", last_frame - frame).unwrap();
if frame != self.cur_frame() {
write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
}
write!(msg, ":").unwrap();
......
......@@ -7,7 +7,7 @@
use error::{EvalError, EvalResult};
use eval_context::EvalContext;
use memory::MemoryPointer;
use value::{PrimVal, Value, Pointer};
use value::{PrimVal, Pointer, Value};
#[derive(Copy, Clone, Debug)]
pub enum Lvalue<'tcx> {
......@@ -211,7 +211,7 @@ pub(super) fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResu
use rustc::mir::Lvalue::*;
let lvalue = match *mir_lvalue {
Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
Local(local) => Lvalue::Local { frame: self.stack.len() - 1, local },
Local(local) => Lvalue::Local { frame: self.cur_frame(), local },
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
......@@ -349,7 +349,33 @@ pub fn lvalue_field(
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
}
fn eval_lvalue_projection(
pub(super) fn val_to_lvalue(&mut self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.into_slice(&mut self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
},
_ => Lvalue::Ptr { ptr: val.into_ptr(&mut self.memory)?, extra: LvalueExtra::None, aligned: true },
})
}
pub(super) fn lvalue_index(&mut self, base: Lvalue<'tcx>, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue<'tcx>> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned })
}
pub(super) fn eval_lvalue_projection(
&mut self,
base: Lvalue<'tcx>,
base_ty: Ty<'tcx>,
......@@ -388,32 +414,15 @@ fn eval_lvalue_projection(
trace!("deref to {} on {:?}", pointee_type, val);
match self.tcx.struct_tail(pointee_type).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
(ptr, LvalueExtra::Vtable(vtable), true)
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.into_slice(&mut self.memory)?;
(ptr, LvalueExtra::Length(len), true)
},
_ => (val.into_ptr(&mut self.memory)?, LvalueExtra::None, true),
}
return self.val_to_lvalue(val, pointee_type);
}
Index(ref operand) => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
let n_ptr = self.eval_operand(operand)?;
let usize = self.tcx.types.usize;
let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
let ptr = base_ptr.offset(n * elem_size, &self)?;
(ptr, LvalueExtra::None, aligned)
return self.lvalue_index(base, base_ty, n);
}
ConstantIndex { offset, min_length, from_end } => {
......
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
use std::{fmt, iter, ptr, mem, io};
use std::{fmt, iter, ptr, mem, io, ops};
use rustc::ty;
use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
use syntax::ast::Mutability;
use rustc::middle::region::CodeExtent;
use error::{EvalError, EvalResult};
use value::{PrimVal, Pointer};
use eval_context::EvalContext;
use eval_context::{EvalContext, DynamicLifetime};
////////////////////////////////////////////////////////////////////////////////
// Locks
////////////////////////////////////////////////////////////////////////////////
mod range {
use super::*;
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct MemoryRange {
start: u64,
end: u64,
}
impl MemoryRange {
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
pub fn new(offset: u64, len: u64) -> MemoryRange {
assert!(len > 0);
MemoryRange {
start: offset,
end: offset + len,
}
}
pub fn range(offset: u64, len: u64) -> ops::Range<MemoryRange> {
assert!(len > 0);
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if "self.contains() || self.overlaps() == true" implies that self is in-range.
let left = MemoryRange {
start: 0,
end: offset,
};
let right = MemoryRange {
start: offset + len + 1,
end: 0,
};
left..right
}
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
pub fn contained_in(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset <= self.start && self.end <= (offset + len)
}
pub fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
//let non_overlap = (offset + len) <= self.start || self.end <= offset;
(offset + len) > self.start && self.end > offset
}
}
}
use self::range::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum AccessKind {
Read,
Write,
}
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
pub enum LockInfo {
WriteLock(DynamicLifetime),
ReadLock(Vec<DynamicLifetime>), // This should never be empty -- that would be a read lock held and nobody there to release it...
}
use self::LockInfo::*;
impl LockInfo {
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
use self::AccessKind::*;
match (self, access) {
(&ReadLock(_), Read) => true, // Read access to read-locked region is okay, no matter who's holding the read lock.
(&WriteLock(ref lft), _) if Some(lft.frame) == frame => true, // All access is okay when we hold the write lock.
_ => false, // Nothing else is okay.
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
......@@ -41,6 +129,35 @@ pub struct Allocation {
/// allocation is modified or deallocated in the future.
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: Kind,
/// Memory regions that are locked by some function
locks: BTreeMap<MemoryRange, LockInfo>,
}
impl Allocation {
fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a LockInfo)> + 'a {
self.locks.range(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
fn iter_locks_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut LockInfo)> + 'a {
self.locks.range_mut(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
fn check_locks<'tcx>(&self, frame: Option<usize>, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
if len == 0 {
return Ok(())
}
for (_, lock) in self.iter_locks(offset, len) {
// Check if the lock is in conflict with the access.
if !lock.access_permitted(frame, access) {
return Err(lock.clone());
}
}
Ok(())
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
......@@ -96,6 +213,10 @@ pub(crate) fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx,
}
}
////////////////////////////////////////////////////////////////////////////////
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
pub type TlsKey = usize;
#[derive(Copy, Clone, Debug)]
......@@ -104,10 +225,6 @@ pub struct TlsEntry<'tcx> {
dtor: Option<ty::Instance<'tcx>>,
}
////////////////////////////////////////////////////////////////////////////////
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
pub struct Memory<'a, 'tcx> {
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
alloc_map: HashMap<AllocId, Allocation>,
......@@ -151,6 +268,9 @@ pub struct Memory<'a, 'tcx> {
/// alignment checking is currently enforced for read and/or write accesses.
reads_are_aligned: bool,
writes_are_aligned: bool,
/// The current stack frame. Used to check accesses against locks.
cur_frame: usize,
}
impl<'a, 'tcx> Memory<'a, 'tcx> {
......@@ -169,6 +289,7 @@ pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self {
next_thread_local: 0,
reads_are_aligned: true,
writes_are_aligned: true,
cur_frame: usize::max_value(),
}
}
......@@ -220,6 +341,7 @@ pub fn allocate(&mut self, size: u64, align: u64, kind: Kind) -> EvalResult<'tcx
align,
kind,
mutable: Mutability::Mutable,
locks: BTreeMap::new(),
};
let id = self.next_id;
self.next_id.0 += 1;
......@@ -257,6 +379,14 @@ pub fn deallocate(&mut self, ptr: MemoryPointer, size_and_align: Option<(u64, u6
None => return Err(EvalError::DoubleFree),
};
// It is okay for us to still holds locks on deallocation -- for example, we could store data we own
// in a local, and the local could be deallocated (from StorageDead) before the function returns.
// However, we should check *something*. For now, we make sure that there is no conflicting write
// lock by another frame. We *have* to permit deallocation if we hold a read lock.
// TODO: Figure out the exact rules here.
alloc.check_locks(Some(self.cur_frame), 0, alloc.bytes.len() as u64, AccessKind::Read)
.map_err(|lock| EvalError::DeallocatedLockedMemory { ptr, lock })?;
if alloc.kind != kind {
return Err(EvalError::DeallocatedWrongMemoryKind(alloc.kind, kind));
}
......@@ -280,7 +410,7 @@ pub fn endianess(&self) -> layout::Endian {
self.layout.endian
}
/// Check that the pointer is aligned and non-NULL
/// Check that the pointer is aligned AND non-NULL.
pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
let offset = match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
......@@ -321,6 +451,10 @@ pub(crate) fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResul
Ok(())
}
pub(crate) fn set_cur_frame(&mut self, cur_frame: usize) {
self.cur_frame = cur_frame;
}
pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
let new_key = self.next_thread_local;
self.next_thread_local += 1;
......@@ -397,6 +531,129 @@ pub(crate) fn fetch_tls_dtor(&mut self, key: Option<TlsKey>) -> EvalResult<'tcx,
}
}
/// Locking
impl<'a, 'tcx> Memory<'a, 'tcx> {
pub(crate) fn check_locks(&self, ptr: MemoryPointer, len: u64, access: AccessKind) -> EvalResult<'tcx> {
if len == 0 {
return Ok(())
}
let alloc = self.get(ptr.alloc_id)?;
let frame = self.cur_frame;
alloc.check_locks(Some(frame), ptr.offset, len, access)
.map_err(|lock| EvalError::MemoryLockViolation { ptr, len, frame, access, lock })
}
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
/// Acquire the lock for the given lifetime
pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option<CodeExtent>, kind: AccessKind) -> EvalResult<'tcx> {
use std::collections::btree_map::Entry::*;
let frame = self.cur_frame;
assert!(len > 0);
trace!("Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", frame, kind, ptr, len, region);
self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
// Check if this conflicts with other locks
alloc.check_locks(None, ptr.offset, len, kind)
.map_err(|lock| EvalError::MemoryAcquireConflict { ptr, len, kind, lock })?;
let lifetime = DynamicLifetime { frame, region };
match (alloc.locks.entry(MemoryRange::new(ptr.offset, len)), kind) {
(Vacant(entry), AccessKind::Read) => { entry.insert(ReadLock(vec![lifetime])); },
(Vacant(entry), AccessKind::Write) => { entry.insert(WriteLock(lifetime)); },
(Occupied(mut entry), AccessKind::Read) =>
match *entry.get_mut() {
ReadLock(ref mut lifetimes) => lifetimes.push(lifetime),
WriteLock(_) => bug!("We already checked that there is no conflicting write lock"),
},
(Occupied(_), AccessKind::Write) => bug!("We already checked that there is no conflicting lock"),
};
Ok(())
}
#[allow(dead_code)]
// FIXME(@RalfJung): validation branch
/// Release a write lock prematurely. If there's a read lock or someone else's lock, fail.
pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.iter_locks_mut(ptr.offset, len) {
match *lock {
WriteLock(ref lft) => {
// Make sure we can release this lock
if lft.frame != cur_frame {
return Err(EvalError::InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.clone() });
}
if !range.contained_in(ptr.offset, len) {
return Err(EvalError::Unimplemented(format!("miri does not support releasing part of a write-locked region")));
}
// Release it later. We cannot do this now.
remove_list.push(*range);
}
ReadLock(_) => {
// Abort here and bubble the error outwards so that we do not even register a suspension.
return Err(EvalError::InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.clone() });
},
}
}
for range in remove_list {
trace!("Releasing {:?}", alloc.locks[&range]);
alloc.locks.remove(&range);
}
// TODO: Test that we actually released a write lock for the entire covered region.
Ok(())
}
pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<CodeExtent>) {
let cur_frame = self.cur_frame;
trace!("Releasing frame {} locks that expire at {:?}", cur_frame, ending_region);
let has_ended = |lifetime: &DynamicLifetime| -> bool {
if lifetime.frame != cur_frame {
return false;
}
match ending_region {
None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
// when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
// end of a function. Same for a function still having recoveries.
Some(ending_region) => lifetime.region == Some(ending_region),
}
};
for alloc in self.alloc_map.values_mut() {
// Collect things for removal as we cannot remove while iterating
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.locks.iter_mut() {
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
match *lock {
WriteLock(ref lft) => {
if has_ended(lft) {
remove_list.push(*range);
}
}
ReadLock(ref mut lfts) => {
lfts.retain(|lft| !has_ended(lft));
if lfts.is_empty() {
remove_list.push(*range);
}
},
}
}
// Perform delayed removal
for range in remove_list {
alloc.locks.remove(&range);
}
}
}
}
/// Allocation accessors
impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
......@@ -408,14 +665,10 @@ pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
}
}
}
pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
fn get_mut_unchecked(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
match self.alloc_map.get_mut(&id) {
Some(alloc) => if alloc.mutable == Mutability::Mutable {
Ok(alloc)
} else {
Err(EvalError::ModifiedConstantMemory)
},
Some(alloc) => Ok(alloc),
None => match self.functions.get(&id) {
Some(_) => Err(EvalError::DerefFunctionPointer),
None => Err(EvalError::DanglingPointerDeref),
......@@ -423,6 +676,15 @@ pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
}
}
pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
let alloc = self.get_mut_unchecked(id)?;
if alloc.mutable == Mutability::Mutable {
Ok(alloc)
} else {
Err(EvalError::ModifiedConstantMemory)
}
}
pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, ty::Instance<'tcx>> {
if ptr.offset != 0 {
return Err(EvalError::InvalidFunctionPointer);
......@@ -540,6 +802,7 @@ fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> Eval
if size == 0 {
return Ok(&[]);
}
self.check_locks(ptr, size, AccessKind::Read)?;
self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
......@@ -556,6 +819,7 @@ fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64)
if size == 0 {
return Ok(&mut []);
}
self.check_locks(ptr, size, AccessKind::Write)?;
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
......@@ -694,6 +958,7 @@ pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
return Err(EvalError::ReadPointerAsBytes);
}
self.check_defined(ptr, (size + 1) as u64)?;
self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?;
Ok(&alloc.bytes[offset..offset + size])
},
None => Err(EvalError::UnterminatedCString(ptr)),
......
#![feature(
i128_type,
rustc_private,
conservative_impl_trait,
)]
// From rustc.
......@@ -15,12 +16,16 @@
// From crates.io.
extern crate byteorder;
#[macro_use]
extern crate lazy_static;
extern crate regex;
mod cast;
mod const_eval;
mod error;
mod eval_context;
mod lvalue;
mod validation;
mod memory;
mod operator;
mod step;
......
......@@ -7,8 +7,9 @@
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc::mir;
use rustc::traits::Reveal;
use rustc::ty;
use rustc::ty::layout::Layout;
use rustc::ty::{subst, self};
use rustc::ty::subst::Substs;
use error::{EvalResult, EvalError};
use eval_context::{EvalContext, StackPopCleanup};
......@@ -116,7 +117,7 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
// Mark locals as dead or alive.
StorageLive(ref lvalue) | StorageDead(ref lvalue)=> {
let (frame, local) = match self.eval_lvalue(lvalue)? {
Lvalue::Local{ frame, local } if self.stack.len() == frame+1 => (frame, local),
Lvalue::Local{ frame, local } if self.cur_frame() == frame => (frame, local),
_ => return Err(EvalError::Unimplemented("Storage annotations must refer to locals of the topmost stack frame.".to_owned())) // FIXME maybe this should get its own error type
};
let old_val = match stmt.kind {
......@@ -127,8 +128,8 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
self.deallocate_local(old_val)?;
}
// Just a borrowck thing
EndRegion(..) => {}
// NOPs for now.
EndRegion(_ce) => {}
// Defined to do nothing. These are added by optimization passes, to avoid changing the
// size of MIR constantly.
......@@ -167,7 +168,7 @@ impl<'a, 'b, 'tcx> ConstantExtractor<'a, 'b, 'tcx> {
fn global_item(
&mut self,
def_id: DefId,
substs: &'tcx subst::Substs<'tcx>,
substs: &'tcx Substs<'tcx>,
span: Span,
mutability: Mutability,
) {
......
......@@ -467,7 +467,7 @@ fn eval_fn_call_inner(
pub fn read_discriminant_value(&self, adt_ptr: MemoryPointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
use rustc::ty::layout::Layout::*;
let adt_layout = self.type_layout(adt_ty)?;
trace!("read_discriminant_value {:#?}", adt_layout);
//trace!("read_discriminant_value {:#?}", adt_layout);
let discr_val = match *adt_layout {
General { discr, .. } | CEnum { discr, signed: false, .. } => {
......
// code for @RalfJung's validation branch is dead for now
#![allow(dead_code)]
use rustc::hir::Mutability;
use rustc::hir::Mutability::*;
use rustc::mir;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::subst::Subst;
use rustc::traits::Reveal;
use rustc::infer::TransNormalize;
use rustc::middle::region::CodeExtent;
use error::{EvalError, EvalResult};
use eval_context::{EvalContext, DynamicLifetime};
use memory::{AccessKind, LockInfo};
use value::{PrimVal, Value};
use lvalue::{Lvalue, LvalueExtra};
// FIXME remove this once it lands in rustc
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum ValidationOp {
Acquire,
Release,
Suspend(CodeExtent),
}
#[derive(Clone, Debug)]
pub struct ValidationOperand<'tcx, T> {
pub lval: T,
pub ty: Ty<'tcx>,
pub re: Option<CodeExtent>,
pub mutbl: Mutability,
}
// FIXME end
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue<'tcx>>;
#[derive(Copy, Clone, Debug)]
enum ValidationMode {
Acquire,
/// Recover because the given region ended
Recover(CodeExtent),
Release
}
impl ValidationMode {
fn acquiring(self) -> bool {
use self::ValidationMode::*;
match self {
Acquire | Recover(_) => true,
Release => false,
}
}
}
// Validity checks
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>) -> EvalResult<'tcx> {
// HACK: Determine if this method is whitelisted and hence we do not perform any validation.
{
// The regexp we use for filtering
use regex::Regex;
lazy_static! {
static ref RE: Regex = Regex::new("^(\
std::mem::swap::|\
std::mem::uninitialized::|\
std::ptr::read::|\
std::panicking::try::do_call::|\
std::slice::from_raw_parts_mut::|\
<std::heap::Heap as std::heap::Alloc>::|\
<std::mem::ManuallyDrop<T>><std::heap::AllocErr>::new$|\
<std::mem::ManuallyDrop<T> as std::ops::DerefMut><std::heap::AllocErr>::deref_mut$|\
std::sync::atomic::AtomicBool::get_mut$|\
<std::boxed::Box<T>><[a-zA-Z0-9_\\[\\]]+>::from_raw|\
<[a-zA-Z0-9_:<>]+ as std::slice::SliceIndex<[a-zA-Z0-9_\\[\\]]+>><[a-zA-Z0-9_\\[\\]]+>::get_unchecked_mut$|\
<alloc::raw_vec::RawVec<T, std::heap::Heap>><[a-zA-Z0-9_\\[\\]]+>::into_box$|\
<std::vec::Vec<T>><[a-zA-Z0-9_\\[\\]]+>::into_boxed_slice$\
)").unwrap();
}
// Now test
let name = self.stack[self.cur_frame()].instance.to_string();
if RE.is_match(&name) {
return Ok(())
}
}
// We need to monomorphize ty *without* erasing lifetimes
let ty = operand.ty.subst(self.tcx, self.substs());
let lval = self.eval_lvalue(&operand.lval)?;
let query = ValidationQuery { lval, ty, re: operand.re, mutbl: operand.mutbl };
let mode = match op {
ValidationOp::Acquire => ValidationMode::Acquire,
ValidationOp::Release => ValidationMode::Release,
ValidationOp::Suspend(_) => ValidationMode::Release,
};
match self.validate(query.clone(), mode) {
Err(EvalError::InvalidMemoryLockRelease { lock: LockInfo::ReadLock(_), .. }) => {
// HACK: When &x is used while x is already borrowed read-only, AddValidation still
// emits suspension. This code is legit, so just ignore the error *and*
// do NOT register a suspension.
// TODO: Integrate AddValidation better with borrowck so that we can/ not emit
// these wrong validation statements. This is all pretty fragile right now.
return Ok(());
}
res => res,
}?;
// Now that we are here, we know things went well. Time to register the suspension.
match op {
ValidationOp::Suspend(ce) => {
if query.mutbl == MutMutable {
let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) };
trace!("Suspending {:?} until {:?}", query, ce);
self.suspended.entry(lft).or_insert_with(Vec::new).push(query);
}
}
_ => {}
};
Ok(())
}
pub(crate) fn end_region(&mut self, ce: CodeExtent) -> EvalResult<'tcx> {
self.memory.locks_lifetime_ended(Some(ce));
// Recover suspended lvals
let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) };
if let Some(queries) = self.suspended.remove(&lft) {
for query in queries {
trace!("Recovering {:?} from suspension", query);
self.validate(query, ValidationMode::Recover(ce))?;
}
}
Ok(())
}
fn validate_variant(
&mut self,
query: ValidationQuery<'tcx>,
variant: &ty::VariantDef,
subst: &ty::subst::Substs<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
// TODO: Maybe take visibility/privacy into account.
for (idx, field) in variant.fields.iter().enumerate() {
let field_ty = field.ty(self.tcx, subst);
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
Ok(())
}
fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, re: Option<CodeExtent>, mutbl: Mutability, mode: ValidationMode) -> EvalResult<'tcx> {
// Check alignment and non-NULLness
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let ptr = val.into_ptr(&mut self.memory)?;
self.memory.check_align(ptr, align)?;
// Recurse
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
self.validate(ValidationQuery { lval: pointee_lvalue, ty: pointee_ty, re, mutbl }, mode)
}
/// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
#[inline]
fn validate(&mut self, query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx>
{
match self.try_validate(query, mode) {
// HACK: If, during releasing, we hit memory we cannot use, we just ignore that.
// This can happen because releases are added before drop elaboration.
// TODO: Fix the MIR so that these releases do not happen.
res @ Err(EvalError::DanglingPointerDeref) | res @ Err(EvalError::ReadUndefBytes) => {
if let ValidationMode::Release = mode {
return Ok(());
}
res
}
res => res,
}
}
fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx>
{
use rustc::ty::TypeVariants::*;
use rustc::ty::RegionKind::*;
use rustc::ty::AdtKind;
// No point releasing shared stuff.
if !mode.acquiring() && query.mutbl == MutImmutable {
return Ok(());
}
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
if let ValidationMode::Recover(ce) = mode {
if Some(ce) == query.re {
return Ok(());
}
}
// HACK: For now, bail out if we hit a dead local during recovery (can happen because sometimes we have
// StorageDead before EndRegion).
// TODO: We should rather fix the MIR.
// HACK: Releasing on dead/undef local variables is a NOP. This can happen because of releases being added
// before drop elaboration.
// TODO: Fix the MIR so that these releases do not happen.
match query.lval {
Lvalue::Local { frame, local } => {
let res = self.stack[frame].get_local(local);
match (res, mode) {
(Err(EvalError::DeadLocal), ValidationMode::Recover(_)) |
(Err(EvalError::DeadLocal), ValidationMode::Release) |
(Ok(Value::ByVal(PrimVal::Undef)), ValidationMode::Release) => {
return Ok(());
}
_ => {},
}
},
_ => {}
}
// This is essentially a copy of normalize_associated_type, but without erasure
if query.ty.has_projection_types() {
let param_env = ty::ParamEnv::empty(Reveal::All);
let old_ty = query.ty;
query.ty = self.tcx.infer_ctxt().enter(move |infcx| {
old_ty.trans_normalize(&infcx, param_env)
})
}
trace!("{:?} on {:?}", mode, query);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
// just assembles pieces (that each own their memory) together to a larger whole.
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
let is_owning = match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) |
TyBool | TyFloat(_) | TyChar | TyStr |
TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
TyAdt(adt, _) if adt.is_box() => true,
TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) | TyDynamic(..) => false,
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => bug!("I got an incomplete/unnormalized type for validation"),
};
if is_owning {
match query.lval {
Lvalue::Ptr { ptr, extra, aligned: _ } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
Some(size) => {
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(query.ty.sty, TyStr, "Found a surprising unsized owning type");
// The extra must be the length, in bytes.
match extra {
LvalueExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
}
};
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
let access = match query.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
if mode.acquiring() {
self.memory.acquire_lock(ptr, len, query.re, access)?;
} else {
self.memory.release_write_lock(ptr, len)?;
}
}
}
Lvalue::Local { .. } | Lvalue::Global(..) => {
// These are not backed by memory, so we have nothing to do.
}
}
}
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
// TODO: Make sure these are not undef.
// We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
Ok(())
}
TyBool | TyFloat(_) | TyChar | TyStr => {
// TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef).
Ok(())
}
TyNever => {
Err(EvalError::ValidationFailure(format!("The empty type is never valid.")))
}
TyRef(region, ty::TypeAndMut { ty: pointee_ty, mutbl }) => {
let val = self.read_lvalue(query.lval)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
}
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if query.re == None {
match *region {
ReScope(ce) => query.re = Some(ce),
// It is possible for us to encounter erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
_ => {},
}
}
self.validate_ptr(val, pointee_ty, query.re, query.mutbl, mode)
}
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_lvalue(query.lval)?;
self.validate_ptr(val, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
TyFnPtr(_sig) => {
let ptr = self.read_lvalue(query.lval)?.into_ptr(&mut self.memory)?.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
Ok(())
}
TyFnDef(..) => {
// This is a zero-sized type with all relevant data sitting in the type.
// There is nothing to validate.
Ok(())
}
// Compound types
TySlice(elem_ty) => {
let len = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
_ => bug!("acquire_valid of a TySlice given non-slice lvalue: {:?}", query.lval),
};
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i)?;
self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?;
}
Ok(())
}
TyArray(elem_ty, len) => {
for i in 0..len {
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i as u64)?;
self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?;
}
Ok(())
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
_ => bug!("acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", query.lval),
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
// Trait objects cannot have any operations performed
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
// contain an UnsafeCell. If we call functions to get access to data, we will validate
// their return values. So, it doesn't seem like there's anything else to do.
Ok(())
}
TyAdt(adt, subst) => {
if Some(adt.did) == self.tcx.lang_items.unsafe_cell_type() && query.mutbl == MutImmutable {
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
return Ok(());
}
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: Can we get the discriminant without forcing an allocation?
let ptr = self.force_allocation(query.lval)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, query.ty)?;
// Get variant index for discriminant
let variant_idx = adt.discriminants(self.tcx)
.position(|variant_discr| variant_discr.to_u128_unchecked() == discr)
.ok_or(EvalError::InvalidDiscriminant)?;
let variant = &adt.variants[variant_idx];
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let lval = if adt.variants.len() > 1 {
self.eval_lvalue_projection(query.lval, query.ty, &mir::ProjectionElem::Downcast(adt, variant_idx))?
} else {
query.lval
};
// Recursively validate the fields
self.validate_variant(ValidationQuery { lval, ..query} , variant, subst, mode)
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
Ok(())
}
}
AdtKind::Struct => {
self.validate_variant(query, adt.struct_variant(), subst, mode)
}
AdtKind::Union => {
// No guarantees are provided for union types.
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
Ok(())
}
}
}
TyTuple(ref types, _) => {
for (idx, field_ty) in types.iter().enumerate() {
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
Ok(())
}
TyClosure(def_id, ref closure_substs) => {
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?
Ok(())
}
_ => bug!("We already establishd that this is a type we support.")
}
}
}
......@@ -55,4 +55,5 @@ fn main() {
let bar_ref : *const BarS<[u32]> = foo_to_bar(u);
let z : &BarS<[u32]> = unsafe{&*bar_ref};
assert_eq!(&z.0, &[0,1,2]);
// If validation fails here, that's likely because an immutable suspension is recovered mutably.
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册