diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index e83b514c69190bc49827df4b6043c7fae021c961..e8c89cb3e0e7eb17bdbb6b207855fb86699f2a18 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -391,10 +391,39 @@ fn hash_stable(&self, } } -impl_stable_hash_for!(struct mir::interpret::Pointer { - alloc_id, - offset -}); +impl<'a, Tag> HashStable> +for ::mir::interpret::Pointer +where Tag: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + let ::mir::interpret::Pointer { alloc_id, offset, tag } = self; + alloc_id.hash_stable(hcx, hasher); + offset.hash_stable(hcx, hasher); + tag.hash_stable(hcx, hasher); + } +} + +impl<'a, Tag> HashStable> +for ::mir::interpret::Scalar +where Tag: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use mir::interpret::Scalar::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Bits { bits, size } => { + bits.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); + }, + Ptr(ptr) => ptr.hash_stable(hcx, hasher), + } + } +} impl<'a> HashStable> for mir::interpret::AllocId { fn hash_stable( @@ -449,25 +478,6 @@ fn hash_stable( Mutable }); - -impl<'a> HashStable> -for ::mir::interpret::Scalar { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use mir::interpret::Scalar::*; - - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - Bits { bits, size } => { - bits.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - }, - Ptr(ptr) => ptr.hash_stable(hcx, hasher), - } - } -} - impl_stable_hash_for!(struct ty::Const<'tcx> { ty, val diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index e9d0b041339556162c663a4e6c7bfce5b73c88a5..5eee0fba5fb463bfb5a53fec00f34f418c629406 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -138,54 +138,82 @@ impl PointerArithmetic for T {} /// each context. /// /// Defaults to the index based and loosely coupled AllocId. +/// +/// Pointer is also generic over the `Tag` associated with each pointer, +/// which is used to do provenance tracking during execution. #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct Pointer { +pub struct Pointer { pub alloc_id: Id, pub offset: Size, + pub tag: Tag, } /// Produces a `Pointer` which points to the beginning of the Allocation impl From for Pointer { + #[inline(always)] fn from(alloc_id: AllocId) -> Self { Pointer::new(alloc_id, Size::ZERO) } } -impl<'tcx> Pointer { +impl<'tcx> Pointer<()> { + #[inline(always)] pub fn new(alloc_id: AllocId, offset: Size) -> Self { - Pointer { alloc_id, offset } + Pointer { alloc_id, offset, tag: () } + } + + #[inline(always)] + pub fn with_default_tag(self) -> Pointer + where Tag: Default + { + Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) + } +} + +impl<'tcx, Tag> Pointer { + #[inline(always)] + pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { + Pointer { alloc_id, offset, tag } } pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - Pointer::new( + Pointer::new_with_tag( self.alloc_id, Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), + self.tag, ) } pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); - (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) } pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new( + Ok(Pointer::new_with_tag( self.alloc_id, Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), + self.tag, )) } pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); - (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) } pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new( + Ok(Pointer::new_with_tag( self.alloc_id, Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), + self.tag )) } + + #[inline] + pub fn erase_tag(self) -> Pointer { + Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } + } } @@ -496,15 +524,15 @@ pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { } #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct Allocation { +pub struct Allocation { /// The actual bytes of the allocation. /// Note that the bytes of a pointer represent the offset of the pointer pub bytes: Vec, - /// Maps from byte addresses to allocations. + /// Maps from byte addresses to extra data for each pointer. /// Only the first byte of a pointer is inserted into the map; i.e., /// every entry in this map applies to `pointer_size` consecutive bytes starting /// at the given offset. - pub relocations: Relocations, + pub relocations: Relocations, /// Denotes undefined memory. Reading from undefined memory is forbidden in miri pub undef_mask: UndefMask, /// The alignment of the allocation to detect unaligned reads. @@ -515,7 +543,7 @@ pub struct Allocation { pub mutability: Mutability, } -impl Allocation { +impl Allocation { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes(slice: &[u8], align: Align) -> Self { let mut undef_mask = UndefMask::new(Size::ZERO); @@ -548,29 +576,29 @@ pub fn undef(size: Size, align: Align) -> Self { impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct Relocations(SortedMap); +pub struct Relocations(SortedMap); -impl Relocations { +impl Relocations { pub fn new() -> Self { Relocations(SortedMap::new()) } // The caller must guarantee that the given relocations are already sorted // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, Id)>) -> Self { + pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { Relocations(SortedMap::from_presorted_elements(r)) } } -impl Deref for Relocations { - type Target = SortedMap; +impl Deref for Relocations { + type Target = SortedMap; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for Relocations { +impl DerefMut for Relocations { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index c81d55e69b61a124b380142c3c7cb7b99ef5f847..9e54b146fd02a61edcf79d88ccd9de0ca1d9804d 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -79,7 +79,47 @@ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { } } -impl<'tcx> Scalar { +/// A `Scalar` represents an immediate, primitive value existing outside of a +/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in +/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes +/// of a simple value or a pointer into another `Allocation` +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum Scalar { + /// The raw bytes of a simple value. + Bits { + /// The first `size` bytes are the value. + /// Do not try to read less or more bytes that that. The remaining bytes must be 0. + size: u8, + bits: u128, + }, + + /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of + /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `Pointer` here. + Ptr(Pointer), +} + +impl<'tcx> Scalar<()> { + #[inline] + pub fn with_default_tag(self) -> Scalar + where Tag: Default + { + match self { + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_default_tag()), + Scalar::Bits { bits, size } => Scalar::Bits { bits, size }, + } + } +} + +impl<'tcx, Tag> Scalar { + #[inline] + pub fn erase_tag(self) -> Scalar { + match self { + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()), + Scalar::Bits { bits, size } => Scalar::Bits { bits, size }, + } + } + #[inline] pub fn ptr_null(cx: impl HasDataLayout) -> Self { Scalar::Bits { @@ -208,7 +248,7 @@ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { } #[inline] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { match self { Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage), Scalar::Bits { .. } => err!(ReadBytesAsPointer), @@ -317,29 +357,9 @@ pub fn to_f64(self) -> EvalResult<'static, f64> { } } -impl From for Scalar { +impl From> for Scalar { #[inline(always)] - fn from(ptr: Pointer) -> Self { + fn from(ptr: Pointer) -> Self { Scalar::Ptr(ptr) } } - -/// A `Scalar` represents an immediate, primitive value existing outside of a -/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in -/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes -/// of a simple value or a pointer into another `Allocation` -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum Scalar { - /// The raw bytes of a simple value. - Bits { - /// The first `size` bytes are the value. - /// Do not try to read less or more bytes that that. The remaining bytes must be 0. - size: u8, - bits: u128, - }, - - /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of - /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the - /// relocation and its associated offset together as a `Pointer` here. - Ptr(Pointer), -} diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index ce18f31da6907f440a25a5772bf0fcdf8e000b50..9f0f744389089c56981a35d1a08a55be3dc4ec32 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -92,7 +92,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll let pointer_size = layout.pointer_size.bytes() as usize; let mut next_offset = 0; - for &(offset, alloc_id) in alloc.relocations.iter() { + for &(offset, ((), alloc_id)) in alloc.relocations.iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; @@ -105,7 +105,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; llvals.push(scalar_to_llvm( cx, - Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(), + Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), &layout::Scalar { value: layout::Primitive::Pointer, valid_range: 0..=!0 diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 0c669b9ec31a798012e8ac2790ebf7d03686ac47..fd18d9feeea91e12b123b30dd2ad23fef05843f2 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -12,6 +12,9 @@ use std::fmt; use std::error::Error; +use std::borrow::{Borrow, Cow}; +use std::hash::Hash; +use std::collections::hash_map::Entry; use rustc::hir::{self, def_id::DefId}; use rustc::mir::interpret::ConstEvalErr; @@ -20,13 +23,14 @@ use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::ty::subst::Subst; use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::fx::FxHashMap; use syntax::ast::Mutability; use syntax::source_map::{Span, DUMMY_SP}; use rustc::mir::interpret::{ EvalResult, EvalError, EvalErrorKind, GlobalId, - Scalar, Allocation, ConstValue, + Scalar, Allocation, AllocId, ConstValue, }; use interpret::{self, Place, PlaceTy, MemPlace, OpTy, Operand, Value, @@ -118,9 +122,9 @@ pub fn op_to_const<'tcx>( } }; let val = match normalized_op { - Err(MemPlace { ptr, align, extra }) => { + Err(MemPlace { ptr, align, meta }) => { // extract alloc-offset pair - assert!(extra.is_none()); + assert!(meta.is_none()); let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); @@ -264,6 +268,67 @@ fn new() -> Self { } } +impl interpret::AllocMap for FxHashMap { + #[inline(always)] + fn contains_key(&mut self, k: &Q) -> bool + where K: Borrow + { + FxHashMap::contains_key(self, k) + } + + #[inline(always)] + fn insert(&mut self, k: K, v: V) -> Option + { + FxHashMap::insert(self, k, v) + } + + #[inline(always)] + fn remove(&mut self, k: &Q) -> Option + where K: Borrow + { + FxHashMap::remove(self, k) + } + + #[inline(always)] + fn filter_map_collect(&self, mut f: impl FnMut(&K, &V) -> Option) -> Vec { + self.iter() + .filter_map(move |(k, v)| f(k, &*v)) + .collect() + } + + #[inline(always)] + fn get_or( + &self, + k: K, + vacant: impl FnOnce() -> Result + ) -> Result<&V, E> + { + match self.get(&k) { + Some(v) => Ok(v), + None => { + vacant()?; + bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading") + } + } + } + + #[inline(always)] + fn get_mut_or( + &mut self, + k: K, + vacant: impl FnOnce() -> Result + ) -> Result<&mut V, E> + { + match self.entry(k) { + Entry::Occupied(e) => Ok(e.into_mut()), + Entry::Vacant(e) => { + let v = vacant()?; + Ok(e.insert(v)) + } + } + } +} + type CompileTimeEvalContext<'a, 'mir, 'tcx> = EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>; @@ -272,8 +337,11 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> { type MemoryData = (); type MemoryKinds = !; + type PointerTag = (); + + type MemoryMap = FxHashMap, Allocation<()>)>; - const MUT_STATIC_KIND: Option = None; // no mutating of statics allowed + const STATIC_KIND: Option = None; // no copying of statics allowed const ENFORCE_VALIDITY: bool = false; // for now, we don't fn find_fn( @@ -339,10 +407,18 @@ fn ptr_op( fn find_foreign_static( _tcx: TyCtxtAt<'a, 'tcx, 'tcx>, _def_id: DefId, - ) -> EvalResult<'tcx, &'tcx Allocation> { + ) -> EvalResult<'tcx, Cow<'tcx, Allocation>> { err!(ReadForeignStatic) } + #[inline(always)] + fn static_with_default_tag( + alloc: &'_ Allocation + ) -> Cow<'_, Allocation> { + // We do not use a tag so we can just cheaply forward the reference + Cow::Borrowed(alloc) + } + fn box_alloc( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, _dest: PlaceTy<'tcx>, diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index c6b527f42294c1134535272da53f0774ca17338b..bfc7e6801fc463fe39fd778fef7e4bdeede63d63 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -33,9 +33,9 @@ fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { pub fn cast( &mut self, - src: OpTy<'tcx>, + src: OpTy<'tcx, M::PointerTag>, kind: CastKind, - dest: PlaceTy<'tcx>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { let src_layout = src.layout; let dst_layout = dest.layout; @@ -143,10 +143,10 @@ pub fn cast( pub(super) fn cast_scalar( &self, - val: Scalar, + val: Scalar, src_layout: TyLayout<'tcx>, dest_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, Scalar> { + ) -> EvalResult<'tcx, Scalar> { use rustc::ty::TyKind::*; trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty); @@ -182,7 +182,7 @@ fn cast_from_int( v: u128, src_layout: TyLayout<'tcx>, dest_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, Scalar> { + ) -> EvalResult<'tcx, Scalar> { let signed = src_layout.abi.is_signed(); let v = if signed { self.sign_extend(v, src_layout) @@ -239,7 +239,7 @@ fn cast_from_float( bits: u128, fty: FloatTy, dest_ty: Ty<'tcx> - ) -> EvalResult<'tcx, Scalar> { + ) -> EvalResult<'tcx, Scalar> { use rustc::ty::TyKind::*; use rustc_apfloat::FloatConvert; match dest_ty.sty { @@ -283,7 +283,11 @@ fn cast_from_float( } } - fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> { + fn cast_from_ptr( + &self, + ptr: Pointer, + ty: Ty<'tcx> + ) -> EvalResult<'tcx, Scalar> { use rustc::ty::TyKind::*; match ty.sty { // Casting to a reference or fn pointer is not permitted by rustc, @@ -298,8 +302,8 @@ fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> fn unsize_into_ptr( &mut self, - src: OpTy<'tcx>, - dest: PlaceTy<'tcx>, + src: OpTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, // The pointee types sty: Ty<'tcx>, dty: Ty<'tcx>, @@ -339,8 +343,8 @@ fn unsize_into_ptr( fn unsize_into( &mut self, - src: OpTy<'tcx>, - dest: PlaceTy<'tcx>, + src: OpTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { match (&src.layout.ty.sty, &dest.layout.ty.sty) { (&ty::Ref(_, s, _), &ty::Ref(_, d, _)) | diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index e15a721731e8744b3f14001abefacc1b64b89e9e..f6944b2a9ae8555b73b0cbf6573697663137f044 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -49,12 +49,12 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { pub memory: Memory<'a, 'mir, 'tcx, M>, /// The virtual call stack. - pub(crate) stack: Vec>, + pub(crate) stack: Vec>, } /// A stack frame. #[derive(Clone)] -pub struct Frame<'mir, 'tcx: 'mir> { +pub struct Frame<'mir, 'tcx: 'mir, Tag=()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// @@ -74,14 +74,14 @@ pub struct Frame<'mir, 'tcx: 'mir> { pub return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to. - pub return_place: Place, + pub return_place: Place, /// The list of locals for this stack frame, stored in order as /// `[return_ptr, arguments..., variables..., temporaries...]`. /// The locals are stored as `Option`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. - pub locals: IndexVec>, + pub locals: IndexVec>, //////////////////////////////////////////////////////////////////////////////// // Current position within the function @@ -108,24 +108,24 @@ pub enum StackPopCleanup { // State of a local variable #[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub enum LocalValue { +pub enum LocalValue { Dead, // Mostly for convenience, we re-use the `Operand` type here. // This is an optimization over just always having a pointer here; // we can thus avoid doing an allocation when the local just stores // immediate values *and* never has its address taken. - Live(Operand), + Live(Operand), } -impl<'tcx> LocalValue { - pub fn access(&self) -> EvalResult<'tcx, &Operand> { +impl<'tcx, Tag> LocalValue { + pub fn access(&self) -> EvalResult<'tcx, &Operand> { match self { LocalValue::Dead => err!(DeadLocal), LocalValue::Live(ref val) => Ok(val), } } - pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> { + pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> { match self { LocalValue::Dead => err!(DeadLocal), LocalValue::Live(ref mut val) => Ok(val), @@ -218,7 +218,7 @@ pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { &mut self.memory } - pub fn stack(&self) -> &[Frame<'mir, 'tcx>] { + pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag>] { &self.stack } @@ -230,7 +230,10 @@ pub fn cur_frame(&self) -> usize { /// Mark a storage as live, killing the previous content and returning it. /// Remember to deallocate that! - pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { + pub fn storage_live( + &mut self, + local: mir::Local + ) -> EvalResult<'tcx, LocalValue> { assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); trace!("{:?} is now live", local); @@ -242,14 +245,14 @@ pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue /// Returns the old value of the local. /// Remember to deallocate that! - pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { + pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead) } - pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { let ptr = self.memory.allocate_static_bytes(s.as_bytes()); Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) } @@ -327,10 +330,10 @@ pub fn layout_of_local( } /// Return the actual dynamic size and alignment of the place at the given type. - /// Only the "extra" (metadata) part of the place matters. + /// Only the `meta` part of the place matters. pub(super) fn size_and_align_of( &self, - metadata: Option, + metadata: Option>, layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, (Size, Align)> { let metadata = match metadata { @@ -411,9 +414,9 @@ pub(super) fn size_and_align_of( #[inline] pub fn size_and_align_of_mplace( &self, - mplace: MPlaceTy<'tcx> + mplace: MPlaceTy<'tcx, M::PointerTag> ) -> EvalResult<'tcx, (Size, Align)> { - self.size_and_align_of(mplace.extra, mplace.layout) + self.size_and_align_of(mplace.meta, mplace.layout) } pub fn push_stack_frame( @@ -421,7 +424,7 @@ pub fn push_stack_frame( instance: ty::Instance<'tcx>, span: source_map::Span, mir: &'mir mir::Mir<'tcx>, - return_place: Place, + return_place: Place, return_to_block: StackPopCleanup, ) -> EvalResult<'tcx> { ::log_settings::settings().indentation += 1; @@ -519,7 +522,10 @@ pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { Ok(()) } - pub(super) fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { + pub(super) fn deallocate_local( + &mut self, + local: LocalValue, + ) -> EvalResult<'tcx> { // FIXME: should we tell the user that there was a local which was never written to? if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { trace!("deallocating local"); @@ -541,12 +547,12 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons } #[inline(always)] - pub fn frame(&self) -> &Frame<'mir, 'tcx> { + pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag> { self.stack.last().expect("no call frames exist") } #[inline(always)] - pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> { + pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag> { self.stack.last_mut().expect("no call frames exist") } @@ -562,7 +568,7 @@ pub fn substs(&self) -> &'tcx Substs<'tcx> { } } - pub fn dump_place(&self, place: Place) { + pub fn dump_place(&self, place: Place) { // Debug output if !log_enabled!(::log::Level::Trace) { return; diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 5fee49ba2fcf2ddb5bca8745a35b1bfc72510055..a669b2aafc2b8868a0a743e8372208bd270c4cbb 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -25,11 +25,11 @@ }; -fn numeric_intrinsic<'tcx>( +fn numeric_intrinsic<'tcx, Tag>( name: &str, bits: u128, kind: Primitive, -) -> EvalResult<'tcx, Scalar> { +) -> EvalResult<'tcx, Scalar> { let size = match kind { Primitive::Int(integer, _) => integer.size(), _ => bug!("invalid `{}` argument: {:?}", name, bits), @@ -51,8 +51,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn emulate_intrinsic( &mut self, instance: ty::Instance<'tcx>, - args: &[OpTy<'tcx>], - dest: PlaceTy<'tcx>, + args: &[OpTy<'tcx, M::PointerTag>], + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, bool> { let substs = instance.substs; @@ -169,8 +169,8 @@ pub fn emulate_intrinsic( pub fn hook_fn( &mut self, instance: ty::Instance<'tcx>, - args: &[OpTy<'tcx>], - dest: Option>, + args: &[OpTy<'tcx, M::PointerTag>], + dest: Option>, ) -> EvalResult<'tcx, bool> { let def_id = instance.def_id(); // Some fn calls are actually BinOp intrinsics diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index f90a7efce47b373fb4250397d673fdbaf5dd4ac1..a444f0bafd23c1f3d7d7b24de03ead89bb79923f 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -12,17 +12,55 @@ //! This separation exists to ensure that no fancy miri features like //! interpreting common C functions leak into CTFE. +use std::borrow::{Borrow, Cow}; +use std::hash::Hash; + use rustc::hir::def_id::DefId; -use rustc::mir::interpret::{Allocation, EvalResult, Scalar}; +use rustc::mir::interpret::{Allocation, AllocId, EvalResult, Scalar}; use rustc::mir; use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt}; -use super::{EvalContext, PlaceTy, OpTy}; +use super::{EvalContext, PlaceTy, OpTy, MemoryKind}; + +/// The functionality needed by memory to manage its allocations +pub trait AllocMap { + /// Test if the map contains the given key. + /// Deliberately takes `&mut` because that is sufficient, and some implementations + /// can be more efficient then (using `RefCell::get_mut`). + fn contains_key(&mut self, k: &Q) -> bool + where K: Borrow; + + /// Insert new entry into the map. + fn insert(&mut self, k: K, v: V) -> Option; + + /// Remove entry from the map. + fn remove(&mut self, k: &Q) -> Option + where K: Borrow; + + /// Return data based the keys and values in the map. + fn filter_map_collect(&self, f: impl FnMut(&K, &V) -> Option) -> Vec; + + /// Return a reference to entry `k`. If no such entry exists, call + /// `vacant` and either forward its error, or add its result to the map + /// and return a reference to *that*. + fn get_or( + &self, + k: K, + vacant: impl FnOnce() -> Result + ) -> Result<&V, E>; + + /// Return a mutable reference to entry `k`. If no such entry exists, call + /// `vacant` and either forward its error, or add its result to the map + /// and return a reference to *that*. + fn get_mut_or( + &mut self, + k: K, + vacant: impl FnOnce() -> Result + ) -> Result<&mut V, E>; +} /// Methods of this trait signifies a point where CTFE evaluation would fail /// and some use case dependent behaviour can instead be applied. -/// FIXME: We should be able to get rid of the 'a here if we can get rid of the 'a in -/// `snapshot::EvalSnapshot`. pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Additional data that can be accessed via the Memory type MemoryData; @@ -30,8 +68,22 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Additional memory kinds a machine wishes to distinguish from the builtin ones type MemoryKinds: ::std::fmt::Debug + Copy + Eq; - /// The memory kind to use for mutated statics -- or None if those are not supported. - const MUT_STATIC_KIND: Option; + /// Memory's allocation map + type MemoryMap: + AllocMap, Allocation)> + + Default + + Clone; + + /// Tag tracked alongside every pointer. This is inert for now, in preparation for + /// a future implementation of "Stacked Borrows" + /// . + type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static; + + /// The memory kind to use for copied statics -- or None if those are not supported. + /// Statics are copied under two circumstances: When they are mutated, and when + /// `static_with_default_tag` or `find_foreign_static` (see below) returns an owned allocation + /// that is added to the memory so that the work is not done twice. + const STATIC_KIND: Option; /// Whether to enforce the validity invariant const ENFORCE_VALIDITY: bool; @@ -53,8 +105,8 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { fn find_fn( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[OpTy<'tcx>], - dest: Option>, + args: &[OpTy<'tcx, Self::PointerTag>], + dest: Option>, ret: Option, ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>; @@ -63,18 +115,30 @@ fn find_fn( fn call_intrinsic( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[OpTy<'tcx>], - dest: PlaceTy<'tcx>, + args: &[OpTy<'tcx, Self::PointerTag>], + dest: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx>; /// Called for read access to a foreign static item. - /// This can be called multiple times for the same static item and should return consistent - /// results. Once the item is *written* the first time, as usual for statics a copy is - /// made and this function is not called again. + /// + /// This will only be called once per static and machine; the result is cached in + /// the machine memory. (This relies on `AllocMap::get_or` being able to add the + /// owned allocation to the map even when the map is shared.) fn find_foreign_static( tcx: TyCtxtAt<'a, 'tcx, 'tcx>, def_id: DefId, - ) -> EvalResult<'tcx, &'tcx Allocation>; + ) -> EvalResult<'tcx, Cow<'tcx, Allocation>>; + + /// Called to turn an allocation obtained from the `tcx` into one that has + /// the appropriate tags on each pointer. + /// + /// This should avoid copying if no work has to be done! If this returns an owned + /// allocation (because a copy had to be done to add the tags), machine memory will + /// cache the result. (This relies on `AllocMap::get_or` being able to add the + /// owned allocation to the map even when the map is shared.) + fn static_with_default_tag( + alloc: &'_ Allocation + ) -> Cow<'_, Allocation>; /// Called for all binary operations on integer(-like) types when one operand is a pointer /// value, and for the `Offset` operation that is inherently about pointers. @@ -83,18 +147,18 @@ fn find_foreign_static( fn ptr_op( ecx: &EvalContext<'a, 'mir, 'tcx, Self>, bin_op: mir::BinOp, - left: Scalar, + left: Scalar, left_layout: TyLayout<'tcx>, - right: Scalar, + right: Scalar, right_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)>; + ) -> EvalResult<'tcx, (Scalar, bool)>; /// Heap allocations via the `box` keyword /// /// Returns a pointer to the allocated memory fn box_alloc( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - dest: PlaceTy<'tcx>, + dest: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx>; /// Execute a validation operation diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 5437c8ababc27642f753c42adb32a71250867f7d..7d3ae19e1a30c69cf519d60f633a86c17c779616 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -18,23 +18,28 @@ use std::collections::VecDeque; use std::ptr; +use std::borrow::Cow; use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt}; use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout}; -use rustc::mir::interpret::{Pointer, AllocId, Allocation, ConstValue, GlobalId, - EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic, - truncate}; +use rustc::mir::interpret::{ + Pointer, AllocId, Allocation, ConstValue, GlobalId, + EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic, + truncate +}; pub use rustc::mir::interpret::{write_target_uint, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; use syntax::ast::Mutability; -use super::{Machine, ScalarMaybeUndef}; +use super::{Machine, AllocMap, ScalarMaybeUndef}; #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] pub enum MemoryKind { /// Error if deallocated except during a stack pop Stack, + /// Error if ever deallocated + Vtable, /// Additional memory kinds a machine wishes to distinguish from the builtin ones Machine(T), } @@ -48,9 +53,13 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { /// Allocations local to this instance of the miri engine. The kind /// helps ensure that the same mechanism is used for allocation and /// deallocation. When an allocation is not found here, it is a - /// static and looked up in the `tcx` for read access. Writing to - /// a static creates a copy here, in the machine. - alloc_map: FxHashMap, Allocation)>, + /// static and looked up in the `tcx` for read access. Some machines may + /// have to mutate this map even on a read-only access to a static (because + /// they do pointer provenance tracking and the allocations in `tcx` have + /// the wrong type), so we let the machine override this type. + /// Either way, if the machine allows writing to a static, doing so will + /// create a copy of the static allocation here. + alloc_map: M::MemoryMap, /// To be able to compare pointers with NULL, and to check alignment for accesses /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations @@ -98,23 +107,23 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self { Memory { data, - alloc_map: FxHashMap::default(), + alloc_map: Default::default(), dead_alloc_map: FxHashMap::default(), tcx, } } - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { - self.tcx.alloc_map.lock().create_fn_alloc(instance).into() + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { + Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance)).with_default_tag() } - pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer { - self.tcx.allocate_bytes(bytes).into() + pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer { + Pointer::from(self.tcx.allocate_bytes(bytes)).with_default_tag() } pub fn allocate_with( &mut self, - alloc: Allocation, + alloc: Allocation, kind: MemoryKind, ) -> EvalResult<'tcx, AllocId> { let id = self.tcx.alloc_map.lock().reserve(); @@ -127,19 +136,20 @@ pub fn allocate( size: Size, align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { - self.allocate_with(Allocation::undef(size, align), kind).map(Pointer::from) + ) -> EvalResult<'tcx, Pointer> { + let ptr = Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?); + Ok(ptr.with_default_tag()) } pub fn reallocate( &mut self, - ptr: Pointer, + ptr: Pointer, old_size: Size, old_align: Align, new_size: Size, new_align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { + ) -> EvalResult<'tcx, Pointer> { if ptr.offset.bytes() != 0 { return err!(ReallocateNonBasePtr); } @@ -160,7 +170,7 @@ pub fn reallocate( } /// Deallocate a local, or do nothing if that local has been made into a static - pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> { + pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> { // The allocation might be already removed by static interning. // This can only really happen in the CTFE instance, not in miri. if self.alloc_map.contains_key(&ptr.alloc_id) { @@ -172,7 +182,7 @@ pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> { pub fn deallocate( &mut self, - ptr: Pointer, + ptr: Pointer, size_and_align: Option<(Size, Align)>, kind: MemoryKind, ) -> EvalResult<'tcx> { @@ -231,7 +241,11 @@ pub fn deallocate( /// Check that the pointer is aligned AND non-NULL. This supports ZSTs in two ways: /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated. - pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> { + pub fn check_align( + &self, + ptr: Scalar, + required_align: Align + ) -> EvalResult<'tcx> { // Check non-NULL/Undef, extract offset let (offset, alloc_align) = match ptr { Scalar::Ptr(ptr) => { @@ -240,7 +254,7 @@ pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx // of some (potentially dead) allocation. if ptr.offset > size { return err!(PointerOutOfBounds { - ptr, + ptr: ptr.erase_tag(), access: true, allocation_size: size, }); @@ -284,12 +298,12 @@ pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx /// If you want to check bounds before doing a memory access, be sure to /// check the pointer one past the end of your access, then everything will /// work out exactly. - pub fn check_bounds_ptr(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { + pub fn check_bounds_ptr(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; let allocation_size = alloc.bytes.len() as u64; if ptr.offset.bytes() > allocation_size { return err!(PointerOutOfBounds { - ptr, + ptr: ptr.erase_tag(), access, allocation_size: Size::from_bytes(allocation_size), }); @@ -299,7 +313,12 @@ pub fn check_bounds_ptr(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { /// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds". #[inline(always)] - pub fn check_bounds(&self, ptr: Pointer, size: Size, access: bool) -> EvalResult<'tcx> { + pub fn check_bounds( + &self, + ptr: Pointer, + size: Size, + access: bool + ) -> EvalResult<'tcx> { // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) self.check_bounds_ptr(ptr.offset(size, &*self)?, access) } @@ -307,15 +326,21 @@ pub fn check_bounds(&self, ptr: Pointer, size: Size, access: bool) -> EvalResult /// Allocation accessors impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { - /// Helper function to obtain the global (tcx) allocation for a static + /// Helper function to obtain the global (tcx) allocation for a static. + /// This attempts to return a reference to an existing allocation if + /// one can be found in `tcx`. That, however, is only possible if `tcx` and + /// this machine use the same pointer tag, so it is indirected through + /// `M::static_with_default_tag`. fn get_static_alloc( tcx: TyCtxtAt<'a, 'tcx, 'tcx>, id: AllocId, - ) -> EvalResult<'tcx, &'tcx Allocation> { + ) -> EvalResult<'tcx, Cow<'tcx, Allocation>> { let alloc = tcx.alloc_map.lock().get(id); let def_id = match alloc { Some(AllocType::Memory(mem)) => { - return Ok(mem) + // We got tcx memory. Let the machine figure out whether and how to + // turn that into memory with the right pointer tag. + return Ok(M::static_with_default_tag(mem)) } Some(AllocType::Function(..)) => { return err!(DerefFunctionPointer) @@ -342,20 +367,73 @@ fn get_static_alloc( EvalErrorKind::ReferencedConstant(err).into() }).map(|const_val| { if let ConstValue::ByRef(_, allocation, _) = const_val.val { - allocation + // We got tcx memory. Let the machine figure out whether and how to + // turn that into memory with the right pointer tag. + M::static_with_default_tag(allocation) } else { bug!("Matching on non-ByRef static") } }) } - pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { - match self.alloc_map.get(&id) { - // Normal alloc? - Some(alloc) => Ok(&alloc.1), - // Static. No need to make any copies, just provide read access to the global static - // memory in tcx. - None => Self::get_static_alloc(self.tcx, id), + pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { + // The error type of the inner closure here is somewhat funny. We have two + // ways of "erroring": An actual error, or because we got a reference from + // `get_static_alloc` that we can actually use directly without inserting anything anywhere. + // So the error type is `EvalResult<'tcx, &Allocation>`. + let a = self.alloc_map.get_or(id, || { + let alloc = Self::get_static_alloc(self.tcx, id).map_err(Err)?; + match alloc { + Cow::Borrowed(alloc) => { + // We got a ref, cheaply return that as an "error" so that the + // map does not get mutated. + Err(Ok(alloc)) + } + Cow::Owned(alloc) => { + // Need to put it into the map and return a ref to that + let kind = M::STATIC_KIND.expect( + "I got an owned allocation that I have to copy but the machine does \ + not expect that to happen" + ); + Ok((MemoryKind::Machine(kind), alloc)) + } + } + }); + // Now unpack that funny error type + match a { + Ok(a) => Ok(&a.1), + Err(a) => a + } + } + + pub fn get_mut( + &mut self, + id: AllocId, + ) -> EvalResult<'tcx, &mut Allocation> { + let tcx = self.tcx; + let a = self.alloc_map.get_mut_or(id, || { + // Need to make a copy, even if `get_static_alloc` is able + // to give us a cheap reference. + let alloc = Self::get_static_alloc(tcx, id)?; + if alloc.mutability == Mutability::Immutable { + return err!(ModifiedConstantMemory); + } + let kind = M::STATIC_KIND.expect( + "An allocation is being mutated but the machine does not expect that to happen" + ); + Ok((MemoryKind::Machine(kind), alloc.into_owned())) + }); + // Unpack the error type manually because type inference doesn't + // work otherwise (and we cannot help it because `impl Trait`) + match a { + Err(e) => Err(e), + Ok(a) => { + let a = &mut a.1; + if a.mutability == Mutability::Immutable { + return err!(ModifiedConstantMemory); + } + Ok(a) + } } } @@ -367,7 +445,7 @@ pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) { match self.tcx.alloc_map.lock().get(id) { Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()), Some(AllocType::Static(did)) => { - // The only way `get` couldnÄt have worked here is if this is an extern static + // The only way `get` couldn't have worked here is if this is an extern static assert!(self.tcx.is_foreign_item(did)); // Use size and align of the type let ty = self.tcx.type_of(did); @@ -383,31 +461,7 @@ pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) { } } - pub fn get_mut( - &mut self, - id: AllocId, - ) -> EvalResult<'tcx, &mut Allocation> { - // Static? - if !self.alloc_map.contains_key(&id) { - // Ask the machine for what to do - if let Some(kind) = M::MUT_STATIC_KIND { - // The machine supports mutating statics. Make a copy, use that. - self.deep_copy_static(id, MemoryKind::Machine(kind))?; - } else { - return err!(ModifiedConstantMemory) - } - } - // If we come here, we know the allocation is in our map - let alloc = &mut self.alloc_map.get_mut(&id).unwrap().1; - // See if we are allowed to mutate this - if alloc.mutability == Mutability::Immutable { - err!(ModifiedConstantMemory) - } else { - Ok(alloc) - } - } - - pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> { + pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> { if ptr.offset.bytes() != 0 { return err!(InvalidFunctionPointer); } @@ -418,108 +472,132 @@ pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> { } } + pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> { + self.get_mut(id)?.mutability = Mutability::Immutable; + Ok(()) + } + /// For debugging, print an allocation and all allocations it points to, recursively. pub fn dump_alloc(&self, id: AllocId) { - if !log_enabled!(::log::Level::Trace) { - return; - } self.dump_allocs(vec![id]); } + fn dump_alloc_helper( + &self, + allocs_seen: &mut FxHashSet, + allocs_to_print: &mut VecDeque, + mut msg: String, + alloc: &Allocation, + extra: String, + ) { + use std::fmt::Write; + + let prefix_len = msg.len(); + let mut relocations = vec![]; + + for i in 0..(alloc.bytes.len() as u64) { + let i = Size::from_bytes(i); + if let Some(&(_, target_id)) = alloc.relocations.get(&i) { + if allocs_seen.insert(target_id) { + allocs_to_print.push_back(target_id); + } + relocations.push((i, target_id)); + } + if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() { + // this `as usize` is fine, since `i` came from a `usize` + write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap(); + } else { + msg.push_str("__ "); + } + } + + trace!( + "{}({} bytes, alignment {}){}", + msg, + alloc.bytes.len(), + alloc.align.abi(), + extra + ); + + if !relocations.is_empty() { + msg.clear(); + write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. + let mut pos = Size::ZERO; + let relocation_width = (self.pointer_size().bytes() - 1) * 3; + for (i, target_id) in relocations { + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap(); + let target = format!("({})", target_id); + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); + pos = i + self.pointer_size(); + } + trace!("{}", msg); + } + } + /// For debugging, print a list of allocations and all allocations they point to, recursively. pub fn dump_allocs(&self, mut allocs: Vec) { if !log_enabled!(::log::Level::Trace) { return; } - use std::fmt::Write; allocs.sort(); allocs.dedup(); let mut allocs_to_print = VecDeque::from(allocs); let mut allocs_seen = FxHashSet::default(); while let Some(id) = allocs_to_print.pop_front() { - let mut msg = format!("Alloc {:<5} ", format!("{}:", id)); - let prefix_len = msg.len(); - let mut relocations = vec![]; - - let (alloc, immutable) = - // normal alloc? - match self.alloc_map.get(&id) { - Some((kind, alloc)) => (alloc, match kind { + let msg = format!("Alloc {:<5} ", format!("{}:", id)); + + // normal alloc? + match self.alloc_map.get_or(id, || Err(())) { + Ok((kind, alloc)) => { + let extra = match kind { MemoryKind::Stack => " (stack)".to_owned(), + MemoryKind::Vtable => " (vtable)".to_owned(), MemoryKind::Machine(m) => format!(" ({:?})", m), - }), - None => { - // static alloc? - match self.tcx.alloc_map.lock().get(id) { - Some(AllocType::Memory(a)) => (a, " (immutable)".to_owned()), - Some(AllocType::Function(func)) => { - trace!("{} {}", msg, func); - continue; - } - Some(AllocType::Static(did)) => { - trace!("{} {:?}", msg, did); - continue; - } - None => { - trace!("{} (deallocated)", msg); - continue; - } + }; + self.dump_alloc_helper( + &mut allocs_seen, &mut allocs_to_print, + msg, alloc, extra + ); + }, + Err(()) => { + // static alloc? + match self.tcx.alloc_map.lock().get(id) { + Some(AllocType::Memory(alloc)) => { + self.dump_alloc_helper( + &mut allocs_seen, &mut allocs_to_print, + msg, alloc, " (immutable)".to_owned() + ); + } + Some(AllocType::Function(func)) => { + trace!("{} {}", msg, func); + } + Some(AllocType::Static(did)) => { + trace!("{} {:?}", msg, did); + } + None => { + trace!("{} (deallocated)", msg); } - }, - }; - - for i in 0..(alloc.bytes.len() as u64) { - let i = Size::from_bytes(i); - if let Some(&target_id) = alloc.relocations.get(&i) { - if allocs_seen.insert(target_id) { - allocs_to_print.push_back(target_id); } - relocations.push((i, target_id)); - } - if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() { - // this `as usize` is fine, since `i` came from a `usize` - write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap(); - } else { - msg.push_str("__ "); - } - } + }, + }; - trace!( - "{}({} bytes, alignment {}){}", - msg, - alloc.bytes.len(), - alloc.align.abi(), - immutable - ); - - if !relocations.is_empty() { - msg.clear(); - write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. - let mut pos = Size::ZERO; - let relocation_width = (self.pointer_size().bytes() - 1) * 3; - for (i, target_id) in relocations { - // this `as usize` is fine, since we can't print more chars than `usize::MAX` - write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap(); - let target = format!("({})", target_id); - // this `as usize` is fine, since we can't print more chars than `usize::MAX` - write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); - pos = i + self.pointer_size(); - } - trace!("{}", msg); - } } } pub fn leak_report(&self) -> usize { trace!("### LEAK REPORT ###"); - let mut_static_kind = M::MUT_STATIC_KIND.map(|k| MemoryKind::Machine(k)); - let leaks: Vec<_> = self.alloc_map - .iter() - .filter_map(|(&id, &(kind, _))| - // exclude mutable statics - if Some(kind) == mut_static_kind { None } else { Some(id) } ) - .collect(); + let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| { + // exclude statics and vtables + let exclude = match kind { + MemoryKind::Stack => false, + MemoryKind::Vtable => true, + MemoryKind::Machine(k) => Some(k) == M::STATIC_KIND, + }; + if exclude { None } else { Some(id) } + }); let n = leaks.len(); self.dump_allocs(leaks); n @@ -531,9 +609,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// The last argument controls whether we error out when there are undefined /// or pointer bytes. You should never call this, call `get_bytes` or /// `get_bytes_with_undef_and_ptr` instead, + /// + /// This function also guarantees that the resulting pointer will remain stable + /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies + /// on that. fn get_bytes_internal( &self, - ptr: Pointer, + ptr: Pointer, size: Size, align: Align, check_defined_and_ptr: bool, @@ -558,7 +640,12 @@ fn get_bytes_internal( } #[inline] - fn get_bytes(&self, ptr: Pointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> { + fn get_bytes( + &self, + ptr: Pointer, + size: Size, + align: Align + ) -> EvalResult<'tcx, &[u8]> { self.get_bytes_internal(ptr, size, align, true) } @@ -567,7 +654,7 @@ fn get_bytes(&self, ptr: Pointer, size: Size, align: Align) -> EvalResult<'tcx, #[inline] fn get_bytes_with_undef_and_ptr( &self, - ptr: Pointer, + ptr: Pointer, size: Size, align: Align ) -> EvalResult<'tcx, &[u8]> { @@ -578,7 +665,7 @@ fn get_bytes_with_undef_and_ptr( /// so be sure to actually put data there! fn get_bytes_mut( &mut self, - ptr: Pointer, + ptr: Pointer, size: Size, align: Align, ) -> EvalResult<'tcx, &mut [u8]> { @@ -597,8 +684,12 @@ fn get_bytes_mut( } } -/// Reading and writing -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { +/// Interning (for CTFE) +impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M> +where + M: Machine<'a, 'mir, 'tcx, PointerTag=()>, + M::MemoryMap: AllocMap, Allocation<()>)>, +{ /// mark an allocation as static and initialized, either mutable or not pub fn intern_static( &mut self, @@ -614,14 +705,14 @@ pub fn intern_static( let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap(); match kind { MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"), - MemoryKind::Stack => {}, + MemoryKind::Stack | MemoryKind::Vtable => {}, } // ensure llvm knows not to put this into immutable memory alloc.mutability = mutability; let alloc = self.tcx.intern_const_alloc(alloc); self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc); // recurse into inner allocations - for &alloc in alloc.relocations.values() { + for &(_, alloc) in alloc.relocations.values() { // FIXME: Reusing the mutability here is likely incorrect. It is originally // determined via `is_freeze`, and data is considered frozen if there is no // `UnsafeCell` *immediately* in that data -- however, this search stops @@ -635,28 +726,15 @@ pub fn intern_static( } Ok(()) } +} - /// The alloc_id must refer to a (mutable) static; a deep copy of that - /// static is made into this memory. - fn deep_copy_static( - &mut self, - id: AllocId, - kind: MemoryKind, - ) -> EvalResult<'tcx> { - let alloc = Self::get_static_alloc(self.tcx, id)?; - if alloc.mutability == Mutability::Immutable { - return err!(ModifiedConstantMemory); - } - let old = self.alloc_map.insert(id, (kind, alloc.clone())); - assert!(old.is_none(), "deep_copy_static: must not overwrite existing memory"); - Ok(()) - } - +/// Reading and writing +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn copy( &mut self, - src: Scalar, + src: Scalar, src_align: Align, - dest: Scalar, + dest: Scalar, dest_align: Align, size: Size, nonoverlapping: bool, @@ -666,9 +744,9 @@ pub fn copy( pub fn copy_repeatedly( &mut self, - src: Scalar, + src: Scalar, src_align: Align, - dest: Scalar, + dest: Scalar, dest_align: Align, size: Size, length: u64, @@ -695,9 +773,9 @@ pub fn copy_repeatedly( new_relocations.extend( relocations .iter() - .map(|&(offset, alloc_id)| { + .map(|&(offset, reloc)| { (offset + dest.offset - src.offset + (i * size * relocations.len() as u64), - alloc_id) + reloc) }) ); } @@ -712,6 +790,8 @@ pub fn copy_repeatedly( // SAFE: The above indexing would have panicked if there weren't at least `size` bytes // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and // `dest` could possibly overlap. + // The pointers above remain valid even if the `HashMap` table is moved around because they + // point into the `Vec` storing the bytes. unsafe { assert_eq!(size.bytes() as usize as u64, size.bytes()); if src.alloc_id == dest.alloc_id { @@ -747,7 +827,7 @@ pub fn copy_repeatedly( Ok(()) } - pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> { + pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> { let alloc = self.get(ptr.alloc_id)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); let offset = ptr.offset.bytes() as usize; @@ -758,11 +838,11 @@ pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> { self.check_defined(ptr, p1)?; Ok(&alloc.bytes[offset..offset + size]) } - None => err!(UnterminatedCString(ptr)), + None => err!(UnterminatedCString(ptr.erase_tag())), } } - pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { + pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); if size.bytes() == 0 { @@ -772,7 +852,7 @@ pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { self.get_bytes(ptr.to_ptr()?, size, align) } - pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { + pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); if src.is_empty() { @@ -784,7 +864,12 @@ pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { Ok(()) } - pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> { + pub fn write_repeat( + &mut self, + ptr: Scalar, + val: u8, + count: Size + ) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); if count.bytes() == 0 { @@ -801,10 +886,10 @@ pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult< /// Read a *non-ZST* scalar pub fn read_scalar( &self, - ptr: Pointer, + ptr: Pointer, ptr_align: Align, size: Size - ) -> EvalResult<'tcx, ScalarMaybeUndef> { + ) -> EvalResult<'tcx, ScalarMaybeUndef> { // get_bytes_unchecked tests alignment and relocation edges let bytes = self.get_bytes_with_undef_and_ptr( ptr, size, ptr_align.min(self.int_align(size)) @@ -825,8 +910,8 @@ pub fn read_scalar( } else { let alloc = self.get(ptr.alloc_id)?; match alloc.relocations.get(&ptr.offset) { - Some(&alloc_id) => { - let ptr = Pointer::new(alloc_id, Size::from_bytes(bits as u64)); + Some(&(tag, alloc_id)) => { + let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag); return Ok(ScalarMaybeUndef::Scalar(ptr.into())) } None => {}, @@ -836,17 +921,20 @@ pub fn read_scalar( Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size))) } - pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) - -> EvalResult<'tcx, ScalarMaybeUndef> { + pub fn read_ptr_sized( + &self, + ptr: Pointer, + ptr_align: Align + ) -> EvalResult<'tcx, ScalarMaybeUndef> { self.read_scalar(ptr, ptr_align, self.pointer_size()) } /// Write a *non-ZST* scalar pub fn write_scalar( &mut self, - ptr: Pointer, + ptr: Pointer, ptr_align: Align, - val: ScalarMaybeUndef, + val: ScalarMaybeUndef, type_size: Size, ) -> EvalResult<'tcx> { let val = match val { @@ -880,7 +968,7 @@ pub fn write_scalar( Scalar::Ptr(val) => { self.get_mut(ptr.alloc_id)?.relocations.insert( ptr.offset, - val.alloc_id, + (val.tag, val.alloc_id), ); } _ => {} @@ -889,8 +977,12 @@ pub fn write_scalar( Ok(()) } - pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) - -> EvalResult<'tcx> { + pub fn write_ptr_sized( + &mut self, + ptr: Pointer, + ptr_align: Align, + val: ScalarMaybeUndef + ) -> EvalResult<'tcx> { let ptr_size = self.pointer_size(); self.write_scalar(ptr.into(), ptr_align, val, ptr_size) } @@ -915,9 +1007,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// Return all relocations overlapping with the given ptr-offset pair. fn relocations( &self, - ptr: Pointer, + ptr: Pointer, size: Size, - ) -> EvalResult<'tcx, &[(Size, AllocId)]> { + ) -> EvalResult<'tcx, &[(Size, (M::PointerTag, AllocId))]> { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1); @@ -927,7 +1019,7 @@ fn relocations( /// Check that there ar eno relocations overlapping with the given range. #[inline(always)] - fn check_relocations(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + fn check_relocations(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { if self.relocations(ptr, size)?.len() != 0 { err!(ReadPointerAsBytes) } else { @@ -941,7 +1033,7 @@ fn check_relocations(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { /// uninitialized. This is a somewhat odd "spooky action at a distance", /// but it allows strictly more code to run than if we would just error /// immediately in that case. - fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { // Find the start and end of the given range and its outermost relocations. let (first, last) = { // Find all relocations overlapping the given range. @@ -976,7 +1068,7 @@ fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { /// Error if there are relocations overlapping with the egdes of the /// given memory range. #[inline] - fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { self.check_relocations(ptr, Size::ZERO)?; self.check_relocations(ptr.offset(size, self)?, Size::ZERO)?; Ok(()) @@ -988,8 +1080,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // FIXME: Add a fast version for the common, nonoverlapping case fn copy_undef_mask( &mut self, - src: Pointer, - dest: Pointer, + src: Pointer, + dest: Pointer, size: Size, repeat: u64, ) -> EvalResult<'tcx> { @@ -1016,7 +1108,7 @@ fn copy_undef_mask( /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes` /// error which will report the first byte which is undefined. #[inline] - fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; alloc.undef_mask.is_range_defined( ptr.offset, @@ -1026,7 +1118,7 @@ fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { pub fn mark_definedness( &mut self, - ptr: Pointer, + ptr: Pointer, size: Size, new_state: bool, ) -> EvalResult<'tcx> { diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 9e0efaa9c78ef6208c0eb1daf16b1394e6ff01be..39628598ef31cd1d1172d9fa35e16e0ce22bb964 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -32,7 +32,7 @@ pub use self::memory::{Memory, MemoryKind}; -pub use self::machine::Machine; +pub use self::machine::{Machine, AllocMap}; pub use self::operand::{ScalarMaybeUndef, Value, ValTy, Operand, OpTy}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index b53bcfa993d53d9ecb389c9347e442afe73eb363..039a92cee2ca2fcfba65f06617ece23d6de56141 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -25,21 +25,42 @@ use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}; #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum ScalarMaybeUndef { - Scalar(Scalar), +pub enum ScalarMaybeUndef { + Scalar(Scalar), Undef, } -impl From for ScalarMaybeUndef { +impl From> for ScalarMaybeUndef { #[inline(always)] - fn from(s: Scalar) -> Self { + fn from(s: Scalar) -> Self { ScalarMaybeUndef::Scalar(s) } } -impl<'tcx> ScalarMaybeUndef { +impl<'tcx> ScalarMaybeUndef<()> { #[inline] - pub fn not_undef(self) -> EvalResult<'static, Scalar> { + pub fn with_default_tag(self) -> ScalarMaybeUndef + where Tag: Default + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } +} + +impl<'tcx, Tag> ScalarMaybeUndef { + #[inline] + pub fn erase_tag(self) -> ScalarMaybeUndef + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } + + #[inline] + pub fn not_undef(self) -> EvalResult<'static, Scalar> { match self { ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), @@ -47,7 +68,7 @@ pub fn not_undef(self) -> EvalResult<'static, Scalar> { } #[inline(always)] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { self.not_undef()?.to_ptr() } @@ -126,26 +147,49 @@ pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> { /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely /// defined on `Value`, and do not have to work with a `Place`. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Value { - Scalar(ScalarMaybeUndef), - ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), +pub enum Value { + Scalar(ScalarMaybeUndef), + ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), } -impl<'tcx> Value { +impl Value { + #[inline] + pub fn with_default_tag(self) -> Value + where Tag: Default + { + match self { + Value::Scalar(x) => Value::Scalar(x.with_default_tag()), + Value::ScalarPair(x, y) => + Value::ScalarPair(x.with_default_tag(), y.with_default_tag()), + } + } +} + +impl<'tcx, Tag> Value { + #[inline] + pub fn erase_tag(self) -> Value + { + match self { + Value::Scalar(x) => Value::Scalar(x.erase_tag()), + Value::ScalarPair(x, y) => + Value::ScalarPair(x.erase_tag(), y.erase_tag()), + } + } + pub fn new_slice( - val: Scalar, + val: Scalar, len: u64, cx: impl HasDataLayout ) -> Self { Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into()) } - pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { + pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into()) } #[inline] - pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { + pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { match self { Value::Scalar(val) => val, Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"), @@ -153,12 +197,12 @@ pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { } #[inline] - pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> { + pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> { self.to_scalar_or_undef().not_undef() } #[inline] - pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> { + pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> { match self { Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"), Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)) @@ -168,7 +212,7 @@ pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> { /// Convert the value into a pointer (or a pointer-sized integer). /// Throws away the second half of a ScalarPair! #[inline] - pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { + pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { match self { Value::Scalar(ptr) | Value::ScalarPair(ptr, _) => ptr.not_undef(), @@ -179,15 +223,15 @@ pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { // ScalarPair needs a type to interpret, so we often have a value and a type together // as input for binary and cast operations. #[derive(Copy, Clone, Debug)] -pub struct ValTy<'tcx> { - value: Value, +pub struct ValTy<'tcx, Tag=()> { + value: Value, pub layout: TyLayout<'tcx>, } -impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { - type Target = Value; +impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> { + type Target = Value; #[inline(always)] - fn deref(&self) -> &Value { + fn deref(&self) -> &Value { &self.value } } @@ -196,14 +240,37 @@ fn deref(&self) -> &Value { /// or still in memory. The latter is an optimization, to delay reading that chunk of /// memory and to avoid having to store arbitrary-sized data here. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Operand { - Immediate(Value), - Indirect(MemPlace), +pub enum Operand { + Immediate(Value), + Indirect(MemPlace), } impl Operand { #[inline] - pub fn to_mem_place(self) -> MemPlace { + pub fn with_default_tag(self) -> Operand + where Tag: Default + { + match self { + Operand::Immediate(x) => Operand::Immediate(x.with_default_tag()), + Operand::Indirect(x) => Operand::Indirect(x.with_default_tag()), + } + } +} + +impl Operand { + #[inline] + pub fn erase_tag(self) -> Operand + { + match self { + Operand::Immediate(x) => Operand::Immediate(x.erase_tag()), + Operand::Indirect(x) => Operand::Indirect(x.erase_tag()), + } + } + + #[inline] + pub fn to_mem_place(self) -> MemPlace + where Tag: ::std::fmt::Debug + { match self { Operand::Indirect(mplace) => mplace, _ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self), @@ -212,7 +279,9 @@ pub fn to_mem_place(self) -> MemPlace { } #[inline] - pub fn to_immediate(self) -> Value { + pub fn to_immediate(self) -> Value + where Tag: ::std::fmt::Debug + { match self { Operand::Immediate(val) => val, _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self), @@ -222,22 +291,22 @@ pub fn to_immediate(self) -> Value { } #[derive(Copy, Clone, Debug)] -pub struct OpTy<'tcx> { - crate op: Operand, // ideally we'd make this private, but const_prop needs this +pub struct OpTy<'tcx, Tag=()> { + crate op: Operand, // ideally we'd make this private, but const_prop needs this pub layout: TyLayout<'tcx>, } -impl<'tcx> ::std::ops::Deref for OpTy<'tcx> { - type Target = Operand; +impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> { + type Target = Operand; #[inline(always)] - fn deref(&self) -> &Operand { + fn deref(&self) -> &Operand { &self.op } } -impl<'tcx> From> for OpTy<'tcx> { +impl<'tcx, Tag: Copy> From> for OpTy<'tcx, Tag> { #[inline(always)] - fn from(mplace: MPlaceTy<'tcx>) -> Self { + fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout @@ -245,9 +314,9 @@ fn from(mplace: MPlaceTy<'tcx>) -> Self { } } -impl<'tcx> From> for OpTy<'tcx> { +impl<'tcx, Tag> From> for OpTy<'tcx, Tag> { #[inline(always)] - fn from(val: ValTy<'tcx>) -> Self { + fn from(val: ValTy<'tcx, Tag>) -> Self { OpTy { op: Operand::Immediate(val.value), layout: val.layout @@ -256,18 +325,36 @@ fn from(val: ValTy<'tcx>) -> Self { } // Validation needs to hash OpTy, but we cannot hash Layout -- so we just hash the type -impl<'tcx> Hash for OpTy<'tcx> { +impl<'tcx, Tag> Hash for OpTy<'tcx, Tag> + where Tag: Hash +{ fn hash(&self, state: &mut H) { self.op.hash(state); self.layout.ty.hash(state); } } -impl<'tcx> PartialEq for OpTy<'tcx> { +impl<'tcx, Tag> PartialEq for OpTy<'tcx, Tag> + where Tag: PartialEq +{ fn eq(&self, other: &Self) -> bool { self.op == other.op && self.layout.ty == other.layout.ty } } -impl<'tcx> Eq for OpTy<'tcx> {} +impl<'tcx, Tag> Eq for OpTy<'tcx, Tag> + where Tag: Eq +{} + +impl<'tcx, Tag> OpTy<'tcx, Tag> +{ + #[inline] + pub fn erase_tag(self) -> OpTy<'tcx> + { + OpTy { + op: self.op.erase_tag(), + layout: self.layout, + } + } +} // Use the existing layout if given (but sanity check in debug mode), // or compute the layout. @@ -295,8 +382,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> /// Return None if the layout does not permit loading this as a value. pub(super) fn try_read_value_from_mplace( &self, - mplace: MPlaceTy<'tcx>, - ) -> EvalResult<'tcx, Option> { + mplace: MPlaceTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, Option>> { if mplace.layout.is_unsized() { // Dont touch unsized return Ok(None); @@ -339,8 +426,8 @@ pub(super) fn try_read_value_from_mplace( /// in a `Value`, not on which data is stored there currently. pub(crate) fn try_read_value( &self, - src: OpTy<'tcx>, - ) -> EvalResult<'tcx, Result> { + src: OpTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, Result, MemPlace>> { Ok(match src.try_as_mplace() { Ok(mplace) => { if let Some(val) = self.try_read_value_from_mplace(mplace)? { @@ -355,7 +442,10 @@ pub(crate) fn try_read_value( /// Read a value from a place, asserting that that is possible with the given layout. #[inline(always)] - pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { + pub fn read_value( + &self, + op: OpTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx, ValTy<'tcx, M::PointerTag>> { if let Ok(value) = self.try_read_value(op)? { Ok(ValTy { value, layout: op.layout }) } else { @@ -364,7 +454,10 @@ pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { } /// Read a scalar from a place - pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> { + pub fn read_scalar( + &self, + op: OpTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx, ScalarMaybeUndef> { match *self.read_value(op)? { Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty), Value::Scalar(val) => Ok(val), @@ -374,7 +467,7 @@ pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> // Turn the MPlace into a string (must already be dereferenced!) pub fn read_str( &self, - mplace: MPlaceTy<'tcx>, + mplace: MPlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, &str> { let len = mplace.len(self)?; let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?; @@ -383,7 +476,10 @@ pub fn read_str( Ok(str) } - pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> { + pub fn uninit_operand( + &mut self, + layout: TyLayout<'tcx> + ) -> EvalResult<'tcx, Operand> { // This decides which types we will use the Immediate optimization for, and hence should // match what `try_read_value` and `eval_place_to_op` support. if layout.is_zst() { @@ -410,9 +506,9 @@ pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Ope /// Projection functions pub fn operand_field( &self, - op: OpTy<'tcx>, + op: OpTy<'tcx, M::PointerTag>, field: u64, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { let base = match op.try_as_mplace() { Ok(mplace) => { // The easy case @@ -445,9 +541,9 @@ pub fn operand_field( pub fn operand_downcast( &self, - op: OpTy<'tcx>, + op: OpTy<'tcx, M::PointerTag>, variant: usize, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcasts only change the layout Ok(match op.try_as_mplace() { Ok(mplace) => { @@ -464,8 +560,8 @@ pub fn operand_downcast( // will always be a MemPlace. pub(super) fn deref_operand( &self, - src: OpTy<'tcx>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + src: OpTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let val = self.read_value(src)?; trace!("deref to {} on {:?}", val.layout.ty, *val); Ok(self.ref_to_mplace(val)?) @@ -473,9 +569,9 @@ pub(super) fn deref_operand( pub fn operand_projection( &self, - base: OpTy<'tcx>, + base: OpTy<'tcx, M::PointerTag>, proj_elem: &mir::PlaceElem<'tcx>, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { Field(field, _) => self.operand_field(base, field.index() as u64)?, @@ -503,7 +599,7 @@ fn eval_place_to_op( &self, mir_place: &mir::Place<'tcx>, layout: Option>, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; let op = match *mir_place { Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer), @@ -533,7 +629,7 @@ pub fn eval_operand( &self, mir_op: &mir::Operand<'tcx>, layout: Option>, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::Operand::*; let op = match *mir_op { // FIXME: do some more logic on `move` to invalidate the old location @@ -558,7 +654,7 @@ pub fn eval_operand( pub(super) fn eval_operands( &self, ops: &[mir::Operand<'tcx>], - ) -> EvalResult<'tcx, Vec>> { + ) -> EvalResult<'tcx, Vec>> { ops.into_iter() .map(|op| self.eval_operand(op, None)) .collect() @@ -568,7 +664,7 @@ pub(super) fn eval_operands( pub(super) fn const_value_to_op( &self, val: ConstValue<'tcx>, - ) -> EvalResult<'tcx, Operand> { + ) -> EvalResult<'tcx, Operand> { trace!("const_value_to_op: {:?}", val); match val { ConstValue::Unevaluated(def_id, substs) => { @@ -581,23 +677,28 @@ pub(super) fn const_value_to_op( ConstValue::ByRef(id, alloc, offset) => { // We rely on mutability being set correctly in that allocation to prevent writes // where none should happen -- and for `static mut`, we copy on demand anyway. - Ok(Operand::Indirect(MemPlace::from_ptr(Pointer::new(id, offset), alloc.align))) + Ok(Operand::Indirect( + MemPlace::from_ptr(Pointer::new(id, offset), alloc.align) + ).with_default_tag()) }, ConstValue::ScalarPair(a, b) => - Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into()))), + Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into())).with_default_tag()), ConstValue::Scalar(x) => - Ok(Operand::Immediate(Value::Scalar(x.into()))), + Ok(Operand::Immediate(Value::Scalar(x.into())).with_default_tag()), } } pub fn const_to_op( &self, cnst: &ty::Const<'tcx>, - ) -> EvalResult<'tcx, OpTy<'tcx>> { + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { let op = self.const_value_to_op(cnst.val)?; Ok(OpTy { op, layout: self.layout_of(cnst.ty)? }) } - pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> { + pub(super) fn global_to_op( + &self, + gid: GlobalId<'tcx> + ) -> EvalResult<'tcx, Operand> { let cv = self.const_eval(gid)?; self.const_value_to_op(cv.val) } @@ -605,7 +706,7 @@ pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Opera /// Read discriminant, return the runtime value as well as the variant index. pub fn read_discriminant( &self, - rval: OpTy<'tcx>, + rval: OpTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, (u128, usize)> { trace!("read_discriminant_value {:#?}", rval.layout); if rval.layout.abi.is_uninhabited() { diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index dd6ee374c0facc9f4b4370f5577fb62f8c7b6636..5f4bafc39f3deec8aa57efb12157d67c36fd7a14 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -24,9 +24,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn binop_with_overflow( &mut self, op: mir::BinOp, - left: ValTy<'tcx>, - right: ValTy<'tcx>, - dest: PlaceTy<'tcx>, + left: ValTy<'tcx, M::PointerTag>, + right: ValTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { let (val, overflowed) = self.binary_op_val(op, left, right)?; let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); @@ -38,9 +38,9 @@ pub fn binop_with_overflow( pub fn binop_ignore_overflow( &mut self, op: mir::BinOp, - left: ValTy<'tcx>, - right: ValTy<'tcx>, - dest: PlaceTy<'tcx>, + left: ValTy<'tcx, M::PointerTag>, + right: ValTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { let (val, _overflowed) = self.binary_op_val(op, left, right)?; self.write_scalar(val, dest) @@ -53,7 +53,7 @@ fn binary_char_op( bin_op: mir::BinOp, l: char, r: char, - ) -> EvalResult<'tcx, (Scalar, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; let res = match bin_op { @@ -73,7 +73,7 @@ fn binary_bool_op( bin_op: mir::BinOp, l: bool, r: bool, - ) -> EvalResult<'tcx, (Scalar, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; let res = match bin_op { @@ -98,7 +98,7 @@ fn binary_float_op( // passing in raw bits l: u128, r: u128, - ) -> EvalResult<'tcx, (Scalar, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; macro_rules! float_math { @@ -138,7 +138,7 @@ fn binary_int_op( left_layout: TyLayout<'tcx>, r: u128, right_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; // Shift ops can have an RHS with a different numeric type. @@ -288,9 +288,9 @@ fn binary_int_op( pub fn binary_op_val( &self, bin_op: mir::BinOp, - left: ValTy<'tcx>, - right: ValTy<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)> { + left: ValTy<'tcx, M::PointerTag>, + right: ValTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, (Scalar, bool)> { self.binary_op( bin_op, left.to_scalar()?, left.layout, @@ -302,11 +302,11 @@ pub fn binary_op_val( pub fn binary_op( &self, bin_op: mir::BinOp, - left: Scalar, + left: Scalar, left_layout: TyLayout<'tcx>, - right: Scalar, + right: Scalar, right_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_layout.ty, right, right_layout.ty); @@ -352,9 +352,9 @@ pub fn binary_op( pub fn unary_op( &self, un_op: mir::UnOp, - val: Scalar, + val: Scalar, layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, Scalar> { + ) -> EvalResult<'tcx, Scalar> { use rustc::mir::UnOp::*; use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index b75ceb61febb7c41337f29adb1ded33e3664745f..8b9c6a5a270537fdaec0cff1e9e4c4c711ccf876 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -13,33 +13,37 @@ //! All high-level functions to write to memory work on places as destinations. use std::convert::TryFrom; +use std::hash::Hash; use rustc::mir; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout}; use rustc::mir::interpret::{ - GlobalId, AllocId, Scalar, EvalResult, Pointer, PointerArithmetic + GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic +}; +use super::{ + EvalContext, Machine, AllocMap, + Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind }; -use super::{EvalContext, Machine, Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub struct MemPlace { +pub struct MemPlace { /// A place may have an integral pointer for ZSTs, and since it might /// be turned back into a reference before ever being dereferenced. /// However, it may never be undef. - pub ptr: Scalar, + pub ptr: Scalar, pub align: Align, /// Metadata for unsized places. Interpretation is up to the type. /// Must not be present for sized types, but can be missing for unsized types /// (e.g. `extern type`). - pub extra: Option>, + pub meta: Option>, } #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Place { +pub enum Place { /// A place referring to a value allocated in the `Memory` system. - Ptr(MemPlace), + Ptr(MemPlace), /// To support alloc-free locals, we are able to write directly to a local. /// (Without that optimization, we'd just always be a `MemPlace`.) @@ -50,37 +54,37 @@ pub enum Place { } #[derive(Copy, Clone, Debug)] -pub struct PlaceTy<'tcx> { - place: Place, +pub struct PlaceTy<'tcx, Tag=()> { + place: Place, pub layout: TyLayout<'tcx>, } -impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> { - type Target = Place; +impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> { + type Target = Place; #[inline(always)] - fn deref(&self) -> &Place { + fn deref(&self) -> &Place { &self.place } } /// A MemPlace with its layout. Constructing it is only possible in this module. #[derive(Copy, Clone, Debug)] -pub struct MPlaceTy<'tcx> { - mplace: MemPlace, +pub struct MPlaceTy<'tcx, Tag=()> { + mplace: MemPlace, pub layout: TyLayout<'tcx>, } -impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> { - type Target = MemPlace; +impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> { + type Target = MemPlace; #[inline(always)] - fn deref(&self) -> &MemPlace { + fn deref(&self) -> &MemPlace { &self.mplace } } -impl<'tcx> From> for PlaceTy<'tcx> { +impl<'tcx, Tag> From> for PlaceTy<'tcx, Tag> { #[inline(always)] - fn from(mplace: MPlaceTy<'tcx>) -> Self { + fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout @@ -89,29 +93,52 @@ fn from(mplace: MPlaceTy<'tcx>) -> Self { } impl MemPlace { + #[inline] + pub fn with_default_tag(self) -> MemPlace + where Tag: Default + { + MemPlace { + ptr: self.ptr.with_default_tag(), + align: self.align, + meta: self.meta.map(Scalar::with_default_tag), + } + } +} + +impl MemPlace { + #[inline] + pub fn erase_tag(self) -> MemPlace + { + MemPlace { + ptr: self.ptr.erase_tag(), + align: self.align, + meta: self.meta.map(Scalar::erase_tag), + } + } + #[inline(always)] - pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { MemPlace { ptr, align, - extra: None, + meta: None, } } #[inline(always)] - pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { Self::from_scalar_ptr(ptr.into(), align) } #[inline(always)] - pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { - assert_eq!(self.extra, None); + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + assert!(self.meta.is_none()); (self.ptr, self.align) } - /// Extract the ptr part of the mplace + /// metact the ptr part of the mplace #[inline(always)] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { // At this point, we forget about the alignment information -- // the place has been turned into a reference, and no matter where it came from, // it now must be aligned. @@ -120,17 +147,17 @@ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. /// This is the inverse of `ref_to_mplace`. - pub fn to_ref(self) -> Value { + pub fn to_ref(self) -> Value { // We ignore the alignment of the place here -- special handling for packed structs ends // at the `&` operator. - match self.extra { + match self.meta { None => Value::Scalar(self.ptr.into()), - Some(extra) => Value::ScalarPair(self.ptr.into(), extra.into()), + Some(meta) => Value::ScalarPair(self.ptr.into(), meta.into()), } } } -impl<'tcx> MPlaceTy<'tcx> { +impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { /// Produces a MemPlace that works for ZST but nothing else #[inline] pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self { @@ -144,17 +171,17 @@ pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self { } #[inline] - fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } } #[inline] pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { if self.layout.is_unsized() { - // We need to consult `extra` metadata + // We need to consult `meta` metadata match self.layout.ty.sty { ty::Slice(..) | ty::Str => - return self.extra.unwrap().to_usize(cx), + return self.mplace.meta.unwrap().to_usize(cx), _ => bug!("len not supported on unsized type {:?}", self.layout.ty), } } else { @@ -168,30 +195,30 @@ pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { } #[inline] - pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer> { + pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer> { match self.layout.ty.sty { - ty::Dynamic(..) => self.extra.unwrap().to_ptr(), + ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(), _ => bug!("vtable not supported on type {:?}", self.layout.ty), } } } -impl<'tcx> OpTy<'tcx> { +impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> { #[inline(always)] - pub fn try_as_mplace(self) -> Result, Value> { - match *self { + pub fn try_as_mplace(self) -> Result, Value> { + match self.op { Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), Operand::Immediate(value) => Err(value), } } #[inline(always)] - pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> { self.try_as_mplace().unwrap() } } -impl<'tcx> Place { +impl<'tcx, Tag: ::std::fmt::Debug> Place { /// Produces a Place that will error if attempted to be read from or written to #[inline] pub fn null(cx: impl HasDataLayout) -> Self { @@ -199,17 +226,17 @@ pub fn null(cx: impl HasDataLayout) -> Self { } #[inline] - pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { Place::Ptr(MemPlace::from_scalar_ptr(ptr, align)) } #[inline] - pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { Place::Ptr(MemPlace::from_ptr(ptr, align)) } #[inline] - pub fn to_mem_place(self) -> MemPlace { + pub fn to_mem_place(self) -> MemPlace { match self { Place::Ptr(mplace) => mplace, _ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self), @@ -218,17 +245,17 @@ pub fn to_mem_place(self) -> MemPlace { } #[inline] - pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { self.to_mem_place().to_scalar_ptr_align() } #[inline] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { self.to_mem_place().to_ptr() } } -impl<'tcx> PlaceTy<'tcx> { +impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> { /// Produces a Place that will error if attempted to be read from or written to #[inline] pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self { @@ -236,25 +263,31 @@ pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self { } #[inline] - pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> { MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout } } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +// separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 +impl<'a, 'mir, 'tcx, Tag, M> EvalContext<'a, 'mir, 'tcx, M> +where + Tag: ::std::fmt::Debug+Default+Copy+Eq+Hash+'static, + M: Machine<'a, 'mir, 'tcx, PointerTag=Tag>, + M::MemoryMap: AllocMap, Allocation)>, +{ /// Take a value, which represents a (thin or fat) reference, and make it a place. /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`. pub fn ref_to_mplace( - &self, val: ValTy<'tcx> - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + &self, val: ValTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty; let layout = self.layout_of(pointee_type)?; let align = layout.align; let mplace = match *val { Value::Scalar(ptr) => - MemPlace { ptr: ptr.not_undef()?, align, extra: None }, - Value::ScalarPair(ptr, extra) => - MemPlace { ptr: ptr.not_undef()?, align, extra: Some(extra.not_undef()?) }, + MemPlace { ptr: ptr.not_undef()?, align, meta: None }, + Value::ScalarPair(ptr, meta) => + MemPlace { ptr: ptr.not_undef()?, align, meta: Some(meta.not_undef()?) }, }; Ok(MPlaceTy { mplace, layout }) } @@ -265,9 +298,9 @@ pub fn ref_to_mplace( #[inline(always)] pub fn mplace_field( &self, - base: MPlaceTy<'tcx>, + base: MPlaceTy<'tcx, M::PointerTag>, field: u64, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Not using the layout method because we want to compute on u64 let offset = match base.layout.fields { layout::FieldPlacement::Arbitrary { ref offsets, .. } => @@ -290,13 +323,13 @@ pub fn mplace_field( let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; // Offset may need adjustment for unsized fields - let (extra, offset) = if field_layout.is_unsized() { + let (meta, offset) = if field_layout.is_unsized() { // re-use parent metadata to determine dynamic field layout - let (_, align) = self.size_and_align_of(base.extra, field_layout)?; - (base.extra, offset.abi_align(align)) + let (_, align) = self.size_and_align_of(base.meta, field_layout)?; + (base.meta, offset.abi_align(align)) } else { - // base.extra could be present; we might be accessing a sized field of an unsized + // base.meta could be present; we might be accessing a sized field of an unsized // struct. (None, offset) }; @@ -307,15 +340,17 @@ pub fn mplace_field( // codegen -- mostly to see if we can get away with that .restrict_for_offset(offset); // must be last thing that happens - Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout }) + Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout }) } // Iterates over all fields of an array. Much more efficient than doing the // same by repeatedly calling `mplace_array`. pub fn mplace_array_fields( &self, - base: MPlaceTy<'tcx>, - ) -> EvalResult<'tcx, impl Iterator>> + 'a> { + base: MPlaceTy<'tcx, Tag>, + ) -> + EvalResult<'tcx, impl Iterator>> + 'a> + { let len = base.len(self)?; // also asserts that we have a type where this makes sense let stride = match base.layout.fields { layout::FieldPlacement::Array { stride, .. } => stride, @@ -326,7 +361,7 @@ pub fn mplace_array_fields( Ok((0..len).map(move |i| { let ptr = base.ptr.ptr_offset(i * stride, dl)?; Ok(MPlaceTy { - mplace: MemPlace { ptr, align: base.align, extra: None }, + mplace: MemPlace { ptr, align: base.align, meta: None }, layout }) })) @@ -334,10 +369,10 @@ pub fn mplace_array_fields( pub fn mplace_subslice( &self, - base: MPlaceTy<'tcx>, + base: MPlaceTy<'tcx, M::PointerTag>, from: u64, to: u64, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let len = base.len(self)?; // also asserts that we have a type where this makes sense assert!(from <= len - to); @@ -350,9 +385,9 @@ pub fn mplace_subslice( }; let ptr = base.ptr.ptr_offset(from_offset, self)?; - // Compute extra and new layout + // Compute meta and new layout let inner_len = len - to - from; - let (extra, ty) = match base.layout.ty.sty { + let (meta, ty) = match base.layout.ty.sty { // It is not nice to match on the type, but that seems to be the only way to // implement this. ty::Array(inner, _) => @@ -367,27 +402,27 @@ pub fn mplace_subslice( let layout = self.layout_of(ty)?; Ok(MPlaceTy { - mplace: MemPlace { ptr, align: base.align, extra }, + mplace: MemPlace { ptr, align: base.align, meta }, layout }) } pub fn mplace_downcast( &self, - base: MPlaceTy<'tcx>, + base: MPlaceTy<'tcx, M::PointerTag>, variant: usize, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Downcasts only change the layout - assert_eq!(base.extra, None); + assert!(base.meta.is_none()); Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) } /// Project into an mplace pub fn mplace_projection( &self, - base: MPlaceTy<'tcx>, + base: MPlaceTy<'tcx, M::PointerTag>, proj_elem: &mir::PlaceElem<'tcx>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { Field(field, _) => self.mplace_field(base, field.index() as u64)?, @@ -428,9 +463,9 @@ pub fn mplace_projection( /// Just a convenience function, but used quite a bit. pub fn place_field( &mut self, - base: PlaceTy<'tcx>, + base: PlaceTy<'tcx, M::PointerTag>, field: u64, - ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // FIXME: We could try to be smarter and avoid allocation for fields that span the // entire place. let mplace = self.force_allocation(base)?; @@ -439,9 +474,9 @@ pub fn place_field( pub fn place_downcast( &mut self, - base: PlaceTy<'tcx>, + base: PlaceTy<'tcx, M::PointerTag>, variant: usize, - ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // Downcast just changes the layout Ok(match base.place { Place::Ptr(mplace) => @@ -456,9 +491,9 @@ pub fn place_downcast( /// Project into a place pub fn place_projection( &mut self, - base: PlaceTy<'tcx>, + base: PlaceTy<'tcx, M::PointerTag>, proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, - ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { Field(field, _) => self.place_field(base, field.index() as u64)?, @@ -478,7 +513,7 @@ pub fn place_projection( pub(super) fn eval_place_to_mplace( &self, mir_place: &mir::Place<'tcx> - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; Ok(match *mir_place { Promoted(ref promoted) => { @@ -515,7 +550,7 @@ pub(super) fn eval_place_to_mplace( // and miri: They use the same query to eventually obtain a `ty::Const` // and use that for further computation. let alloc = self.tcx.alloc_map.lock().intern_static(cid.instance.def_id()); - MPlaceTy::from_aligned_ptr(alloc.into(), layout) + MPlaceTy::from_aligned_ptr(Pointer::from(alloc).with_default_tag(), layout) } _ => bug!("eval_place_to_mplace called on {:?}", mir_place), @@ -524,7 +559,10 @@ pub(super) fn eval_place_to_mplace( /// Compute a place. You should only use this if you intend to write into this /// place; for reading, a more efficient alternative is `eval_place_for_read`. - pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> { + pub fn eval_place( + &mut self, + mir_place: &mir::Place<'tcx> + ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; let place = match *mir_place { Local(mir::RETURN_PLACE) => PlaceTy { @@ -554,8 +592,8 @@ pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, P /// Write a scalar to a place pub fn write_scalar( &mut self, - val: impl Into, - dest: PlaceTy<'tcx>, + val: impl Into>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { self.write_value(Value::Scalar(val.into()), dest) } @@ -563,8 +601,8 @@ pub fn write_scalar( /// Write a value to a place pub fn write_value( &mut self, - src_val: Value, - dest: PlaceTy<'tcx>, + src_val: Value, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { trace!("write_value: {:?} <- {:?}", *dest, src_val); // Check that the value actually is okay for that type @@ -599,8 +637,8 @@ pub fn write_value( /// done that before calling this! fn write_value_to_mplace( &mut self, - value: Value, - dest: MPlaceTy<'tcx>, + value: Value, + dest: MPlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { let (ptr, ptr_align) = dest.to_scalar_ptr_align(); // Note that it is really important that the type here is the right one, and matches the @@ -641,8 +679,8 @@ fn write_value_to_mplace( /// Copy the data from an operand to a place pub fn copy_op( &mut self, - src: OpTy<'tcx>, - dest: PlaceTy<'tcx>, + src: OpTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(), "Cannot copy unsized data"); @@ -678,8 +716,8 @@ pub fn copy_op( /// This is essentially `force_to_memplace`. pub fn force_allocation( &mut self, - place: PlaceTy<'tcx>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + place: PlaceTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let mplace = match place.place { Place::Local { frame, local } => { match *self.stack[frame].locals[local].access()? { @@ -715,7 +753,7 @@ pub fn allocate( &mut self, layout: TyLayout<'tcx>, kind: MemoryKind, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); let ptr = self.memory.allocate(layout.size, layout.align, kind)?; Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) @@ -724,7 +762,7 @@ pub fn allocate( pub fn write_discriminant_index( &mut self, variant_index: usize, - dest: PlaceTy<'tcx>, + dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { match dest.layout.variants { layout::Variants::Single { index } => { @@ -772,7 +810,10 @@ pub fn write_discriminant_index( /// Every place can be read from, so we can turm them into an operand #[inline(always)] - pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> { + pub fn place_to_op( + &self, + place: PlaceTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { let op = match place.place { Place::Ptr(mplace) => { Operand::Indirect(mplace) @@ -785,8 +826,8 @@ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. /// Also return some more information so drop doesn't have to run the same code twice. - pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>) - -> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx>)> { + pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) + -> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { let vtable = mplace.vtable()?; // also sanity checks the type let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; let layout = self.layout_of(ty)?; @@ -799,7 +840,7 @@ pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>) } let mplace = MPlaceTy { - mplace: MemPlace { extra: None, ..*mplace }, + mplace: MemPlace { meta: None, ..*mplace }, layout }; Ok((instance, mplace)) diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 9b507eca3633b1cf16f771dd4ae16f0c05ce86e5..06aee8605c6e10cc05fadee13a6503f3e0f3959e 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -99,6 +99,8 @@ trait Snapshot<'a, Ctx: SnapshotContext<'a>> { ($field:ident, $ctx:expr, $delegate:expr) => ($delegate); } +// This assumes the type has two type parameters, first for the tag (set to `()`), +// then for the id macro_rules! impl_snapshot_for { // FIXME(mark-i-m): Some of these should be `?` rather than `*`. (enum $enum_name:ident { @@ -108,7 +110,7 @@ trait Snapshot<'a, Ctx: SnapshotContext<'a>> { impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name where Ctx: self::SnapshotContext<'a>, { - type Item = $enum_name>; + type Item = $enum_name<(), AllocIdSnapshot<'a>>; #[inline] fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { @@ -129,7 +131,7 @@ fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name where Ctx: self::SnapshotContext<'a>, { - type Item = $struct_name>; + type Item = $struct_name<(), AllocIdSnapshot<'a>>; #[inline] fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { @@ -175,12 +177,13 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { impl_snapshot_for!(struct Pointer { alloc_id, offset -> *offset, // just copy offset verbatim + tag -> *tag, // just copy tag }); impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar where Ctx: SnapshotContext<'a>, { - type Item = Scalar>; + type Item = Scalar<(), AllocIdSnapshot<'a>>; fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { match self { @@ -206,11 +209,11 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { impl_stable_hash_for!(struct ::interpret::MemPlace { ptr, align, - extra, + meta, }); impl_snapshot_for!(struct MemPlace { ptr, - extra, + meta, align -> *align, // just copy alignment verbatim }); @@ -234,7 +237,7 @@ fn hash_stable( impl<'a, Ctx> Snapshot<'a, Ctx> for Place where Ctx: SnapshotContext<'a>, { - type Item = Place>; + type Item = Place<(), AllocIdSnapshot<'a>>; fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { match self { @@ -278,11 +281,11 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations where Ctx: SnapshotContext<'a>, { - type Item = Relocations>; + type Item = Relocations<(), AllocIdSnapshot<'a>>; fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { Relocations::from_presorted(self.iter() - .map(|(size, id)| (*size, id.snapshot(ctx))) + .map(|(size, ((), id))| (*size, ((), id.snapshot(ctx)))) .collect()) } } @@ -290,7 +293,7 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { #[derive(Eq, PartialEq)] struct AllocationSnapshot<'a> { bytes: &'a [u8], - relocations: Relocations>, + relocations: Relocations<(), AllocIdSnapshot<'a>>, undef_mask: &'a UndefMask, align: &'a Align, mutability: &'a Mutability, @@ -334,8 +337,8 @@ struct FrameSnapshot<'a, 'tcx: 'a> { instance: &'a ty::Instance<'tcx>, span: &'a Span, return_to_block: &'a StackPopCleanup, - return_place: Place>, - locals: IndexVec>>, + return_place: Place<(), AllocIdSnapshot<'a>>, + locals: IndexVec>>, block: &'a mir::BasicBlock, stmt: usize, } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 862f61df227be2b103f18d15a621c544cb54137a..e599608b2dac99d875218f4f98c7e9ce6240d57d 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -205,8 +205,8 @@ fn check_argument_compat( fn pass_argument( &mut self, skip_zst: bool, - caller_arg: &mut impl Iterator>, - callee_arg: PlaceTy<'tcx>, + caller_arg: &mut impl Iterator>, + callee_arg: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { if skip_zst && callee_arg.layout.is_zst() { // Nothing to do. @@ -231,8 +231,8 @@ fn eval_fn_call( instance: ty::Instance<'tcx>, span: Span, caller_abi: Abi, - args: &[OpTy<'tcx>], - dest: Option>, + args: &[OpTy<'tcx, M::PointerTag>], + dest: Option>, ret: Option, ) -> EvalResult<'tcx> { trace!("eval_fn_call: {:#?}", instance); @@ -330,7 +330,7 @@ fn eval_fn_call( // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). - let caller_args : Cow<[OpTy<'tcx>]> = + let caller_args : Cow<[OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple let (&untuple_arg, args) = args.split_last().unwrap(); @@ -339,7 +339,7 @@ fn eval_fn_call( .chain((0..untuple_arg.layout.fields.count()).into_iter() .map(|i| self.operand_field(untuple_arg, i as u64)) ) - .collect::>>>()?) + .collect::>>>()?) } else { // Plain arg passing Cow::from(args) @@ -426,7 +426,7 @@ fn eval_fn_call( fn drop_in_place( &mut self, - place: PlaceTy<'tcx>, + place: PlaceTy<'tcx, M::PointerTag>, instance: ty::Instance<'tcx>, span: Span, target: mir::BasicBlock, diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 5ea588b957a4ffed98f4a25d3424e301b2d1f2a4..227c85772d228aa22f5179c0726fb0f22e5164c0 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -12,8 +12,6 @@ use rustc::ty::layout::{Size, Align, LayoutOf}; use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic}; -use syntax::ast::Mutability; - use super::{EvalContext, Machine, MemoryKind}; impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -27,9 +25,11 @@ pub fn get_vtable( &mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, - ) -> EvalResult<'tcx, Pointer> { + ) -> EvalResult<'tcx, Pointer> { debug!("get_vtable(trait_ref={:?})", trait_ref); + // FIXME: Cache this! + let layout = self.layout_of(trait_ref.self_ty())?; assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); let size = layout.size.bytes(); @@ -41,7 +41,7 @@ pub fn get_vtable( let vtable = self.memory.allocate( ptr_size * (3 + methods.len() as u64), ptr_align, - MemoryKind::Stack, + MemoryKind::Vtable, )?; let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); @@ -63,10 +63,7 @@ pub fn get_vtable( } } - self.memory.intern_static( - vtable.alloc_id, - Mutability::Immutable, - )?; + self.memory.mark_immutable(vtable.alloc_id)?; Ok(vtable) } @@ -74,7 +71,7 @@ pub fn get_vtable( /// Return the drop fn instance as well as the actual dynamic type pub fn read_drop_type_from_vtable( &self, - vtable: Pointer, + vtable: Pointer, ) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; @@ -90,7 +87,7 @@ pub fn read_drop_type_from_vtable( pub fn read_size_and_align_from_vtable( &self, - vtable: Pointer, + vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index f481238bd5ba7406a9aac1ebef313a0186b02e51..9dc035a3e20b81d115f4680e87ac6a59ae15bb09 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -9,6 +9,7 @@ // except according to those terms. use std::fmt::Write; +use std::hash::Hash; use syntax_pos::symbol::Symbol; use rustc::ty::layout::{self, Size, Align, TyLayout}; @@ -80,13 +81,13 @@ pub enum PathElem { } /// State for tracking recursive validation of references -pub struct RefTracking<'tcx> { - pub seen: FxHashSet<(OpTy<'tcx>)>, - pub todo: Vec<(OpTy<'tcx>, Vec)>, +pub struct RefTracking<'tcx, Tag> { + pub seen: FxHashSet<(OpTy<'tcx, Tag>)>, + pub todo: Vec<(OpTy<'tcx, Tag>, Vec)>, } -impl<'tcx> RefTracking<'tcx> { - pub fn new(op: OpTy<'tcx>) -> Self { +impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> { + pub fn new(op: OpTy<'tcx, Tag>) -> Self { let mut ref_tracking = RefTracking { seen: FxHashSet(), todo: vec![(op, Vec::new())], @@ -128,7 +129,7 @@ fn path_format(path: &Vec) -> String { out } -fn scalar_format(value: ScalarMaybeUndef) -> String { +fn scalar_format(value: ScalarMaybeUndef) -> String { match value { ScalarMaybeUndef::Undef => "uninitialized bytes".to_owned(), @@ -143,9 +144,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> /// Make sure that `value` is valid for `ty`, *assuming* `ty` is a primitive type. fn validate_primitive_type( &self, - value: ValTy<'tcx>, + value: ValTy<'tcx, M::PointerTag>, path: &Vec, - ref_tracking: Option<&mut RefTracking<'tcx>>, + ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, const_mode: bool, ) -> EvalResult<'tcx> { // Go over all the primitive types @@ -185,7 +186,7 @@ fn validate_primitive_type( let tail = self.tcx.struct_tail(place.layout.ty); match tail.sty { ty::Dynamic(..) => { - let vtable = try_validation!(place.extra.unwrap().to_ptr(), + let vtable = try_validation!(place.meta.unwrap().to_ptr(), "non-pointer vtable in fat pointer", path); try_validation!(self.read_drop_type_from_vtable(vtable), "invalid drop fn in vtable", path); @@ -194,7 +195,7 @@ fn validate_primitive_type( // FIXME: More checks for the vtable. } ty::Slice(..) | ty::Str => { - try_validation!(place.extra.unwrap().to_usize(self), + try_validation!(place.meta.unwrap().to_usize(self), "non-integer slice length in fat pointer", path); } ty::Foreign(..) => { @@ -207,7 +208,7 @@ fn validate_primitive_type( // for safe ptrs, also check the ptr values itself if !ty.is_unsafe_ptr() { // Make sure this is non-NULL and aligned - let (size, align) = self.size_and_align_of(place.extra, place.layout)?; + let (size, align) = self.size_and_align_of(place.meta, place.layout)?; match self.memory.check_align(place.ptr, align) { Ok(_) => {}, Err(err) => match err.kind { @@ -272,7 +273,7 @@ fn validate_primitive_type( /// Make sure that `value` matches the fn validate_scalar_layout( &self, - value: ScalarMaybeUndef, + value: ScalarMaybeUndef, size: Size, path: &Vec, layout: &layout::Scalar, @@ -363,9 +364,9 @@ fn validate_scalar_layout( /// validation (e.g., pointer values are fine in integers at runtime). pub fn validate_operand( &self, - dest: OpTy<'tcx>, + dest: OpTy<'tcx, M::PointerTag>, path: &mut Vec, - mut ref_tracking: Option<&mut RefTracking<'tcx>>, + mut ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, const_mode: bool, ) -> EvalResult<'tcx> { trace!("validate_operand: {:?}, {:?}", *dest, dest.layout.ty); diff --git a/src/librustc_mir/monomorphize/collector.rs b/src/librustc_mir/monomorphize/collector.rs index b2fa83493849cf37f2dad731c8e11cadd67a59ba..29f167629441b964f2a2090649eb64a15eadc561 100644 --- a/src/librustc_mir/monomorphize/collector.rs +++ b/src/librustc_mir/monomorphize/collector.rs @@ -1163,7 +1163,7 @@ fn collect_miri<'a, 'tcx>( } Some(AllocType::Memory(alloc)) => { trace!("collecting {:?} with {:#?}", alloc_id, alloc); - for &inner in alloc.relocations.values() { + for &((), inner) in alloc.relocations.values() { collect_miri(tcx, inner, output); } }, @@ -1272,7 +1272,7 @@ fn collect_const<'a, 'tcx>( ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output), ConstValue::ByRef(_id, alloc, _offset) => { - for &id in alloc.relocations.values() { + for &((), id) in alloc.relocations.values() { collect_miri(tcx, id, output); } }