提交 85fd3f8e 编写于 作者: O Oliver Schneider 提交者: GitHub

Merge pull request #297 from RalfJung/mir-validate

Validation update
......@@ -16,7 +16,7 @@ script:
- |
# Test plain miri
cargo build --release --features "cargo_miri" &&
cargo test --release &&
cargo test --release --all &&
cargo install --features "cargo_miri"
- |
# Test cargo miri
......
......@@ -8,7 +8,6 @@ version = "0.1.0"
workspace = "../.."
[lib]
test = false
path = "lib.rs"
[dependencies]
......
......@@ -29,19 +29,14 @@ pub enum AccessKind {
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
struct LockInfo {
suspended: Vec<SuspendedWriteLock>,
/// Stores for which lifetimes (of the original write lock) we got
/// which suspensions.
suspended: HashMap<DynamicLifetime, Vec<CodeExtent>>,
/// The current state of the lock that's actually effective.
active: Lock,
}
#[derive(Clone, Debug)]
struct SuspendedWriteLock {
/// Original lifetime of the lock that is now suspended
lft: DynamicLifetime,
/// Regions that all have to end to reenable this suspension
suspensions: Vec<CodeExtent>,
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PartialEq)]
pub enum Lock {
NoLock,
WriteLock(DynamicLifetime),
......@@ -57,7 +52,7 @@ fn default() -> Self {
impl LockInfo {
fn new(lock: Lock) -> LockInfo {
LockInfo { suspended: Vec::new(), active: lock }
LockInfo { suspended: HashMap::new(), active: lock }
}
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
......@@ -513,9 +508,10 @@ pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Opti
}
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is no write lock or someone else's write lock, that's an error.
/// When releasing, if there is a read lock or someone else's write lock, that's an error.
/// We *do* accept relasing a NoLock, as this can happen when a local is first acquired and later force_allocate'd.
/// When suspending, the same cases are fine; we just register an additional suspension.
pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64,
pub(crate) fn suspend_write_lock(&mut self, ptr: MemoryPointer, len: u64,
lock_region: Option<CodeExtent>, suspend: Option<CodeExtent>) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
......@@ -523,7 +519,6 @@ pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64,
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
trace!("Releasing {:?}", lock);
let is_our_lock = match lock.active {
WriteLock(lft) => {
lft == lock_lft
......@@ -533,30 +528,25 @@ pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64,
}
};
if is_our_lock {
trace!("Releasing {:?} at {:?}", lock.active, lock_lft);
// Disable the lock
lock.active = NoLock;
} else {
trace!("Not touching {:?} at {:?} as its not our lock", lock.active, lock_lft);
}
match suspend {
Some(suspend_region) => {
if is_our_lock {
// We just released this lock, so add a new suspension
lock.suspended.push(SuspendedWriteLock { lft: lock_lft, suspensions: vec![suspend_region] });
} else {
// Find our lock in the suspended ones
for suspended_lock in lock.suspended.iter_mut().rev() {
if suspended_lock.lft == lock_lft {
// Found it!
suspended_lock.suspensions.push(suspend_region);
continue 'locks;
}
}
// We did not find it. Someone else had the lock and we have not suspended it, that's just wrong.
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.active.clone() });
}
trace!("Adding suspension to {:?} at {:?}", lock.active, lock_lft);
// We just released this lock, so add a new suspension.
// FIXME: Really, if there ever already is a suspension when is_our_lock, or if there is no suspension when !is_our_lock, something is amiss.
// But this model is not good enough yet to prevent that.
lock.suspended.entry(lock_lft)
.or_insert_with(|| Vec::new())
.push(suspend_region);
}
None => {
// If we do not suspend, make sure we actually released something
if !is_our_lock {
// Make sure we did not try to release someone else's lock.
if !is_our_lock && lock.active != NoLock {
return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.active.clone() });
}
}
......@@ -577,34 +567,33 @@ pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64,
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
for lock in alloc.locks.iter_mut(ptr.offset, len) {
// If we have a suspension here, it will be the topmost one
let (got_the_lock, pop_suspension) = match lock.suspended.last_mut() {
None => (true, false),
Some(suspended_lock) => {
if suspended_lock.lft == lock_lft {
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
let idx = match suspended_lock.suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
None => // TODO: Can the user trigger this?
bug!("We have this lock suspended, but not for the given region."),
Some((idx, _)) => idx
};
suspended_lock.suspensions.remove(idx);
let got_lock = suspended_lock.suspensions.is_empty();
(got_lock, got_lock)
} else {
// Someone else's suspension up top, we should be able to grab the lock
(true, false)
// Check if we have a suspension here
let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_lft) {
None => {
trace!("No suspension around, we can just acquire");
(true, false)
}
Some(suspensions) => {
trace!("Found suspension of {:?}, removing it", lock_lft);
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
None => // TODO: Can the user trigger this?
bug!("We have this lock suspended, but not for the given region."),
Some((idx, _)) => idx
};
suspensions.remove(idx);
let got_lock = suspensions.is_empty();
if got_lock {
trace!("All suspensions are gone, we can have the lock again");
}
(got_lock, got_lock)
}
};
if pop_suspension { // with NLL; we could do that up in the match above...
lock.suspended.pop();
} else {
// Sanity check: Our lock should not be in the suspension list
let found = lock.suspended.iter().find(|suspended_lock| suspended_lock.lft == lock_lft);
assert!(found.is_none());
if remove_suspension { // with NLL, we could do that up in the match above...
assert!(got_the_lock);
lock.suspended.remove(&lock_lft);
}
if got_the_lock {
match lock.active {
......@@ -653,7 +642,7 @@ pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<CodeExtent>)
lock.active = NoLock;
}
// Also clean up suspended write locks
lock.suspended.retain(|suspended_lock| !has_ended(&suspended_lock.lft));
lock.suspended.retain(|lft, _suspensions| !has_ended(lft));
}
// Clean up the map
alloc.locks.retain(|lock| {
......
//! Implements a map from disjoint non-empty integer ranges to data associated with those ranges
//! Implements a map from integer indices to data.
//! Rather than storing data for every index, internally, this maps entire ranges to the data.
//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
//! Users must not depend on whether a range is coalesced or not, even though this is observable
//! via the iteration APIs.
use std::collections::{BTreeMap};
use std::ops;
......@@ -21,6 +26,7 @@ struct Range {
impl Range {
fn range(offset: u64, len: u64) -> ops::Range<Range> {
assert!(len > 0);
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if all ranges that intersect with the argument range, are in the
......@@ -36,6 +42,7 @@ fn range(offset: u64, len: u64) -> ops::Range<Range> {
left..right
}
/// Tests if all of [offset, offset+len) are contained in this range.
fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset < self.end && offset+len >= self.start
......@@ -48,6 +55,7 @@ pub fn new() -> RangeMap<T> {
}
fn iter_with_range<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=(&'a Range, &'a T)> + 'a {
assert!(len > 0);
self.map.range(Range::range(offset, len))
.filter_map(move |(range, data)| {
if range.overlaps(offset, len) {
......@@ -63,7 +71,7 @@ pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=&'a T> +
}
fn split_entry_at(&mut self, offset: u64) where T: Clone {
let range = match self.iter_with_range(offset, 0).next() {
let range = match self.iter_with_range(offset, 1).next() {
Some((&range, _)) => range,
None => return,
};
......@@ -88,6 +96,7 @@ pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item=&'a mut T> + 'a {
pub fn iter_mut_with_gaps<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=&'a mut T> + 'a
where T: Clone
{
assert!(len > 0);
// Preparation: Split first and last entry as needed.
self.split_entry_at(offset);
self.split_entry_at(offset+len);
......@@ -112,14 +121,15 @@ pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=&
{
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = None;
let mut last_end = offset;
for (range, _) in self.iter_with_range(offset, len) {
if let Some(last_end) = last_end {
if last_end < range.start {
gaps.push(Range { start: last_end, end: range.start });
}
if last_end < range.start {
gaps.push(Range { start: last_end, end: range.start });
}
last_end = Some(range.end);
last_end = range.end;
}
if last_end < offset+len {
gaps.push(Range { start: last_end, end: offset+len });
}
// Add default for all gaps
......@@ -147,3 +157,43 @@ pub fn retain<F>(&mut self, mut f: F)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Query the map at every offset in the range and collect the results.
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
(offset..offset+len).into_iter().map(|i| *map.iter(i, 1).next().unwrap()).collect()
}
#[test]
fn basic_insert() {
let mut map = RangeMap::<i32>::new();
// Insert
for x in map.iter_mut(10, 1) {
*x = 42;
}
// Check
assert_eq!(to_vec(&map, 10, 1), vec![42]);
}
#[test]
fn gaps() {
let mut map = RangeMap::<i32>::new();
for x in map.iter_mut(11, 1) {
*x = 42;
}
for x in map.iter_mut(15, 1) {
*x = 42;
}
// Now request a range that needs three gaps filled
for x in map.iter_mut(10, 10) {
if *x != 42 { *x = 23; }
}
assert_eq!(to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]);
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
}
}
use rustc::hir::Mutability;
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::subst::Subst;
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
use rustc::ty::subst::{Substs, Subst};
use rustc::traits;
use rustc::infer::InferCtxt;
use rustc::traits::Reveal;
use rustc::infer::TransNormalize;
use rustc::middle::region::CodeExtent;
use super::{
......@@ -18,7 +19,7 @@
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue>;
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone, Debug, PartialEq)]
enum ValidationMode {
Acquire,
/// Recover because the given region ended
......@@ -110,6 +111,109 @@ pub(crate) fn end_region(&mut self, ce: CodeExtent) -> EvalResult<'tcx> {
Ok(())
}
fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
return normalize_associated_type(self.tcx, &ty);
use syntax::codemap::{Span, DUMMY_SP};
// We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: &T)
-> T::Lifted
where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
{
let mut selcx = traits::SelectionContext::new(self_);
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: result, obligations } =
traits::normalize(&mut selcx, param_env, cause, value);
let mut fulfill_cx = traits::FulfillmentContext::new();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(self_, obligation);
}
drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
}
fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
span: Span,
fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
result: &T)
-> T::Lifted
where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
{
// In principle, we only need to do this so long as `result`
// contains unbound type parameters. It could be a slight
// optimization to stop iterating early.
match fulfill_cx.select_all_or_error(self_) {
Ok(()) => { }
Err(errors) => {
span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking",
errors);
}
}
let result = self_.resolve_type_vars_if_possible(result);
let result = self_.tcx.fold_regions(&result, &mut false, |r, _| match *r { ty::ReVar(_) => self_.tcx.types.re_erased, _ => r });
match self_.tcx.lift_to_global(&result) {
Some(result) => result,
None => {
span_bug!(span, "Uninferred types/regions in `{:?}`", result);
}
}
}
trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
fn my_trans_normalize<'a, 'tcx>(&self,
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> Self;
}
macro_rules! items { ($($item:item)+) => ($($item)+) }
macro_rules! impl_trans_normalize {
($lt_gcx:tt, $($ty:ty),+) => {
items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
fn my_trans_normalize<'a, 'tcx>(&self,
infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> Self {
normalize_projections_in(infcx, param_env, self)
}
})+);
}
}
impl_trans_normalize!('gcx,
Ty<'gcx>,
&'gcx Substs<'gcx>,
ty::FnSig<'gcx>,
ty::PolyFnSig<'gcx>,
ty::ClosureSubsts<'gcx>,
ty::PolyTraitRef<'gcx>,
ty::ExistentialTraitRef<'gcx>
);
fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
where T: MyTransNormalize<'tcx>
{
let param_env = ty::ParamEnv::empty(Reveal::All);
if !value.has_projection_types() {
return value.clone();
}
self_.infer_ctxt().enter(|infcx| {
value.my_trans_normalize(&infcx, param_env)
})
}
}
fn validate_variant(
&mut self,
query: ValidationQuery<'tcx>,
......@@ -142,16 +246,15 @@ fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, re: Option<CodeExte
fn validate(&mut self, query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx>
{
match self.try_validate(query, mode) {
// Releasing an uninitalized variable is a NOP. This is needed because
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
// we have to release the return value of a function; due to destination-passing-style
// the callee may directly write there.
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is.
res @ Err(EvalError{ kind: EvalErrorKind::ReadUndefBytes, ..}) => {
if !mode.acquiring() {
return Ok(());
}
res
// release if it is. But of course that can't even always be statically determined.
Err(EvalError{ kind: EvalErrorKind::ReadUndefBytes, ..})
if mode == ValidationMode::ReleaseUntil(None)
=> {
return Ok(());
}
res => res,
}
......@@ -190,14 +293,7 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod
_ => {}
}
// This is essentially a copy of normalize_associated_type, but without erasure
if query.ty.has_projection_types() {
let param_env = ty::ParamEnv::empty(Reveal::All);
let old_ty = query.ty;
query.ty = self.tcx.infer_ctxt().enter(move |infcx| {
old_ty.trans_normalize(&infcx, param_env)
})
}
query.ty = self.normalize_type_unerased(&query.ty);
trace!("{:?} on {:?}", mode, query);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
......@@ -212,38 +308,46 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => bug!("I got an incomplete/unnormalized type for validation"),
};
if is_owning {
match query.lval {
Lvalue::Ptr { ptr, extra } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
Some(size) => {
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(query.ty.sty, TyStr, "Found a surprising unsized owning type");
// The extra must be the length, in bytes.
match extra {
LvalueExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
// We need to lock. So we need memory. So we have to force_acquire.
// Tracking the same state for locals not backed by memory would just duplicate too
// much machinery.
// FIXME: We ignore alignment.
let (ptr, extra) = self.force_allocation(query.lval)?.to_ptr_extra_aligned();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
Some(size) => {
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(query.ty.sty, TyStr, "Found a surprising unsized owning type");
// The extra must be the length, in bytes.
match extra {
LvalueExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
}
};
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
match query.mutbl {
MutImmutable =>
if mode.acquiring() {
self.memory.acquire_lock(ptr, len, query.re, AccessKind::Read)?;
}
};
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
let access = match query.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
// No releasing of read locks, ever.
MutMutable =>
match mode {
ValidationMode::Acquire => self.memory.acquire_lock(ptr, len, query.re, access)?,
ValidationMode::Recover(ending_ce) => self.memory.recover_write_lock(ptr, len, query.re, ending_ce)?,
ValidationMode::ReleaseUntil(suspended_ce) => self.memory.release_write_lock(ptr, len, query.re, suspended_ce)?,
ValidationMode::Acquire =>
self.memory.acquire_lock(ptr, len, query.re, AccessKind::Write)?,
ValidationMode::Recover(ending_ce) =>
self.memory.recover_write_lock(ptr, len, query.re, ending_ce)?,
ValidationMode::ReleaseUntil(suspended_ce) =>
self.memory.suspend_write_lock(ptr, len, query.re, suspended_ce)?,
}
}
}
Lvalue::Local { .. } => {
// Not backed by memory, so we have nothing to do.
}
}
}
......
// Validation forces more allocation; disable it.
// compile-flags: -Zmir-emit-validate=0
#![feature(box_syntax, custom_attribute, attr_literals)]
#![miri(memory_size=2048)]
......
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &mut i32, y: &mut i32) {} //~ ERROR: in conflict with lock WriteLock
}
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *x, &mut *x) };
}
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &i32, y: &mut i32) {} //~ ERROR: in conflict with lock ReadLock
}
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&*x, &mut *x) };
}
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &mut i32, y: &i32) {} //~ ERROR: in conflict with lock WriteLock
}
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *x, &*x) };
}
#![allow(unused_variables)]
mod safe {
use std::cell::Cell;
// Make sure &mut UnsafeCell also has a lock to it
pub fn safe(x: &mut Cell<i32>, y: &i32) {} //~ ERROR: in conflict with lock WriteLock
}
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *(x as *mut _), &*x) };
}
#![allow(unused_variables)]
mod safe {
use std::slice::from_raw_parts_mut;
pub fn split_at_mut<T>(self_: &mut [T], mid: usize) -> (&mut [T], &mut [T]) {
let len = self_.len();
let ptr = self_.as_mut_ptr();
unsafe {
assert!(mid <= len);
(from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid"
from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
}
}
}
fn main() {
let mut array = [1,2,3,4];
let _x = safe::split_at_mut(&mut array, 0); //~ ERROR: in conflict with lock WriteLock
}
#![allow(unused_variables)]
mod safe {
pub(crate) fn safe(x: &u32) {
let x : &mut u32 = unsafe { &mut *(x as *const _ as *mut _) };
*x = 42; //~ ERROR: in conflict with lock ReadLock
}
}
fn main() {
let target = &mut 42;
let target_ref = &target;
// do a reborrow, but we keep the lock
safe::safe(&*target);
}
#![allow(unused_variables)]
static mut PTR: *mut u8 = 0 as *mut _;
fn fun1(x: &mut u8) {
unsafe {
PTR = x;
}
}
fn fun2() {
// Now we use a pointer we are not allowed to use
let _x = unsafe { *PTR }; //~ ERROR: in conflict with lock WriteLock
}
fn main() {
let mut val = 0;
fun1(&mut val);
fun2();
}
#![allow(unused_variables)]
#[repr(u32)]
enum Bool { True }
mod safe {
pub(crate) fn safe(x: &mut super::Bool) {
let x = x as *mut _ as *mut u32;
unsafe { *x = 44; } // out-of-bounds enum discriminant
}
}
fn main() {
let mut x = Bool::True;
safe::safe(&mut x); //~ ERROR: invalid enum discriminant
}
#![allow(unused_variables)]
mod safe {
// This makes a ref that was passed to us via &mut alias with things it should not alias with
pub(crate) fn safe(x: &mut &u32, target: &mut u32) {
unsafe { *x = &mut *(target as *mut _); }
}
}
fn main() {
let target = &mut 42;
let mut target_alias = &42; // initial dummy value
safe::safe(&mut target_alias, target); //~ ERROR: in conflict with lock ReadLock
}
#![allow(unused_variables)]
mod safe {
pub(crate) fn safe(x: *mut u32) {
unsafe { *x = 42; } //~ ERROR: in conflict with lock WriteLock
}
}
fn main() {
let target = &mut 42u32;
let target2 = target as *mut _;
drop(&mut *target); // reborrow
// Now make sure we still got the lock
safe::safe(target2);
}
// FIXME: disable validation until we figure out how to handle <https://github.com/solson/miri/issues/296>.
// compile-flags: -Zmir-emit-validate=0
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;
......
......@@ -8,8 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: remove the next line once https://github.com/rust-lang/rust/issues/43359 is fixed
// compile-flags: -Zmir-opt-level=0
// FIXME: remove -Zmir-opt-level once https://github.com/rust-lang/rust/issues/43359 is fixed
// FIXME: remove -Zmir-emit-validate=0 once https://github.com/rust-lang/rust/pull/43748 is merged
// compile-flags: -Zmir-opt-level=0 -Zmir-emit-validate=0
use std::i32;
......
// FIXME: We have to disable this, force_allocation fails.
// TODO: I think this can be triggered even without validation.
// compile-flags: -Zmir-emit-validate=0
#![allow(dead_code)]
#![feature(unsize, coerce_unsized)]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册