提交 2243fabd 编写于 作者: B bors

Auto merge of #54461 - RalfJung:pointer-provenance, r=oli-obk

miri engine: basic support for pointer provenance tracking

This enriches pointers with a new member, `tag`, that can be used to do provenance tracking. This is a new type parameter that propagates up through everything. It defaults to `()` (no tag), which is also the value used by CTFE -- but miri will use another type.

The only actually interesting piece here, I think, is what I had to do in the memory's `get`. The problem is that `tcx` (storing the allocations for statics) uses `()` for provenance information. But the machine might need another tag. The machine has a function to do the conversion, but if a conversion actually happened, we need to store the result of this *somewhere* -- we cannot return a pointer into `tcx` as we usually would.
So I introduced `MonoHashMap` which uses `RefCell` to be able to insert new entries even when we just have a shared ref. However, it is important that we can also return shared refs into the map without holding the `RefCell` opan. This is achieved by boxing the values stored in the map, so their addresses remain stable even when the map's table gets reallocated. This is all implemented in `mono_hash_map.rs`.

NOTE: This PR also contains the commits from https://github.com/rust-lang/rust/pull/54380#issuecomment-423130753. Only the [last two commits](https://github.com/rust-lang/rust/pull/54461/files/8e74ee0998a5b11f28d61600dbb881c7168a4a40..HEAD) are new.
......@@ -391,10 +391,39 @@ fn hash_stable<W: StableHasherResult>(&self,
}
}
impl_stable_hash_for!(struct mir::interpret::Pointer {
alloc_id,
offset
});
impl<'a, Tag> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Pointer<Tag>
where Tag: HashStable<StableHashingContext<'a>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let ::mir::interpret::Pointer { alloc_id, offset, tag } = self;
alloc_id.hash_stable(hcx, hasher);
offset.hash_stable(hcx, hasher);
tag.hash_stable(hcx, hasher);
}
}
impl<'a, Tag> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Scalar<Tag>
where Tag: HashStable<StableHashingContext<'a>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
use mir::interpret::Scalar::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match self {
Bits { bits, size } => {
bits.hash_stable(hcx, hasher);
size.hash_stable(hcx, hasher);
},
Ptr(ptr) => ptr.hash_stable(hcx, hasher),
}
}
}
impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
fn hash_stable<W: StableHasherResult>(
......@@ -449,25 +478,6 @@ fn hash_stable<W: StableHasherResult>(
Mutable
});
impl<'a> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Scalar {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
use mir::interpret::Scalar::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
Bits { bits, size } => {
bits.hash_stable(hcx, hasher);
size.hash_stable(hcx, hasher);
},
Ptr(ptr) => ptr.hash_stable(hcx, hasher),
}
}
}
impl_stable_hash_for!(struct ty::Const<'tcx> {
ty,
val
......
......@@ -138,54 +138,82 @@ impl<T: layout::HasDataLayout> PointerArithmetic for T {}
/// each context.
///
/// Defaults to the index based and loosely coupled AllocId.
///
/// Pointer is also generic over the `Tag` associated with each pointer,
/// which is used to do provenance tracking during execution.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub struct Pointer<Id=AllocId> {
pub struct Pointer<Tag=(),Id=AllocId> {
pub alloc_id: Id,
pub offset: Size,
pub tag: Tag,
}
/// Produces a `Pointer` which points to the beginning of the Allocation
impl From<AllocId> for Pointer {
#[inline(always)]
fn from(alloc_id: AllocId) -> Self {
Pointer::new(alloc_id, Size::ZERO)
}
}
impl<'tcx> Pointer {
impl<'tcx> Pointer<()> {
#[inline(always)]
pub fn new(alloc_id: AllocId, offset: Size) -> Self {
Pointer { alloc_id, offset }
Pointer { alloc_id, offset, tag: () }
}
#[inline(always)]
pub fn with_default_tag<Tag>(self) -> Pointer<Tag>
where Tag: Default
{
Pointer::new_with_tag(self.alloc_id, self.offset, Default::default())
}
}
impl<'tcx, Tag> Pointer<Tag> {
#[inline(always)]
pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self {
Pointer { alloc_id, offset, tag }
}
pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
Pointer::new(
Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
self.tag,
)
}
pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
self.tag,
))
}
pub fn overflowing_offset<C: HasDataLayout>(self, i: Size, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
self.tag
))
}
#[inline]
pub fn erase_tag(self) -> Pointer {
Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
}
}
......@@ -496,15 +524,15 @@ pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) {
}
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub struct Allocation {
pub struct Allocation<Tag=()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
pub bytes: Vec<u8>,
/// Maps from byte addresses to allocations.
/// Maps from byte addresses to extra data for each pointer.
/// Only the first byte of a pointer is inserted into the map; i.e.,
/// every entry in this map applies to `pointer_size` consecutive bytes starting
/// at the given offset.
pub relocations: Relocations,
pub relocations: Relocations<Tag>,
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
......@@ -515,7 +543,7 @@ pub struct Allocation {
pub mutability: Mutability,
}
impl Allocation {
impl<Tag> Allocation<Tag> {
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
let mut undef_mask = UndefMask::new(Size::ZERO);
......@@ -548,29 +576,29 @@ pub fn undef(size: Size, align: Align) -> Self {
impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Relocations<Id=AllocId>(SortedMap<Size, Id>);
pub struct Relocations<Tag=(), Id=AllocId>(SortedMap<Size, (Tag, Id)>);
impl<Id> Relocations<Id> {
impl<Tag, Id> Relocations<Tag, Id> {
pub fn new() -> Self {
Relocations(SortedMap::new())
}
// The caller must guarantee that the given relocations are already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Id)>) -> Self {
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
Relocations(SortedMap::from_presorted_elements(r))
}
}
impl Deref for Relocations {
type Target = SortedMap<Size, AllocId>;
impl<Tag> Deref for Relocations<Tag> {
type Target = SortedMap<Size, (Tag, AllocId)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Relocations {
impl<Tag> DerefMut for Relocations<Tag> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
......
......@@ -79,7 +79,47 @@ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
}
}
impl<'tcx> Scalar {
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation`
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Scalar<Tag=(), Id=AllocId> {
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
/// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `Pointer` here.
Ptr(Pointer<Tag, Id>),
}
impl<'tcx> Scalar<()> {
#[inline]
pub fn with_default_tag<Tag>(self) -> Scalar<Tag>
where Tag: Default
{
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_default_tag()),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size },
}
}
}
impl<'tcx, Tag> Scalar<Tag> {
#[inline]
pub fn erase_tag(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size },
}
}
#[inline]
pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits {
......@@ -208,7 +248,7 @@ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
}
#[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
match self {
Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
Scalar::Bits { .. } => err!(ReadBytesAsPointer),
......@@ -317,29 +357,9 @@ pub fn to_f64(self) -> EvalResult<'static, f64> {
}
}
impl From<Pointer> for Scalar {
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer) -> Self {
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation`
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Scalar<Id=AllocId> {
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
/// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `Pointer` here.
Ptr(Pointer<Id>),
}
......@@ -92,7 +92,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
let pointer_size = layout.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, alloc_id) in alloc.relocations.iter() {
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
......@@ -105,7 +105,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
cx,
Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(),
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
......
......@@ -12,6 +12,9 @@
use std::fmt;
use std::error::Error;
use std::borrow::{Borrow, Cow};
use std::hash::Hash;
use std::collections::hash_map::Entry;
use rustc::hir::{self, def_id::DefId};
use rustc::mir::interpret::ConstEvalErr;
......@@ -20,13 +23,14 @@
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::ty::subst::Subst;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::fx::FxHashMap;
use syntax::ast::Mutability;
use syntax::source_map::{Span, DUMMY_SP};
use rustc::mir::interpret::{
EvalResult, EvalError, EvalErrorKind, GlobalId,
Scalar, Allocation, ConstValue,
Scalar, Allocation, AllocId, ConstValue,
};
use interpret::{self,
Place, PlaceTy, MemPlace, OpTy, Operand, Value,
......@@ -118,9 +122,9 @@ pub fn op_to_const<'tcx>(
}
};
let val = match normalized_op {
Err(MemPlace { ptr, align, extra }) => {
Err(MemPlace { ptr, align, meta }) => {
// extract alloc-offset pair
assert!(extra.is_none());
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
......@@ -264,6 +268,67 @@ fn new() -> Self {
}
}
impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
#[inline(always)]
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>
{
FxHashMap::contains_key(self, k)
}
#[inline(always)]
fn insert(&mut self, k: K, v: V) -> Option<V>
{
FxHashMap::insert(self, k, v)
}
#[inline(always)]
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>
{
FxHashMap::remove(self, k)
}
#[inline(always)]
fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
self.iter()
.filter_map(move |(k, v)| f(k, &*v))
.collect()
}
#[inline(always)]
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E>
{
match self.get(&k) {
Some(v) => Ok(v),
None => {
vacant()?;
bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
}
}
}
#[inline(always)]
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>
{
match self.entry(k) {
Entry::Occupied(e) => Ok(e.into_mut()),
Entry::Vacant(e) => {
let v = vacant()?;
Ok(e.insert(v))
}
}
}
}
type CompileTimeEvalContext<'a, 'mir, 'tcx> =
EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>;
......@@ -272,8 +337,11 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx>
{
type MemoryData = ();
type MemoryKinds = !;
type PointerTag = ();
type MemoryMap = FxHashMap<AllocId, (MemoryKind<!>, Allocation<()>)>;
const MUT_STATIC_KIND: Option<!> = None; // no mutating of statics allowed
const STATIC_KIND: Option<!> = None; // no copying of statics allowed
const ENFORCE_VALIDITY: bool = false; // for now, we don't
fn find_fn(
......@@ -339,10 +407,18 @@ fn ptr_op(
fn find_foreign_static(
_tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
_def_id: DefId,
) -> EvalResult<'tcx, &'tcx Allocation> {
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
err!(ReadForeignStatic)
}
#[inline(always)]
fn static_with_default_tag(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Self::PointerTag>> {
// We do not use a tag so we can just cheaply forward the reference
Cow::Borrowed(alloc)
}
fn box_alloc(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
_dest: PlaceTy<'tcx>,
......
......@@ -33,9 +33,9 @@ fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
pub fn cast(
&mut self,
src: OpTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
kind: CastKind,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let src_layout = src.layout;
let dst_layout = dest.layout;
......@@ -143,10 +143,10 @@ pub fn cast(
pub(super) fn cast_scalar(
&self,
val: Scalar,
val: Scalar<M::PointerTag>,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
......@@ -182,7 +182,7 @@ fn cast_from_int(
v: u128,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
let signed = src_layout.abi.is_signed();
let v = if signed {
self.sign_extend(v, src_layout)
......@@ -239,7 +239,7 @@ fn cast_from_float(
bits: u128,
fty: FloatTy,
dest_ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
......@@ -283,7 +283,11 @@ fn cast_from_float(
}
}
fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
fn cast_from_ptr(
&self,
ptr: Pointer<M::PointerTag>,
ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc,
......@@ -298,8 +302,8 @@ fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar>
fn unsize_into_ptr(
&mut self,
src: OpTy<'tcx>,
dest: PlaceTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
// The pointee types
sty: Ty<'tcx>,
dty: Ty<'tcx>,
......@@ -339,8 +343,8 @@ fn unsize_into_ptr(
fn unsize_into(
&mut self,
src: OpTy<'tcx>,
dest: PlaceTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
match (&src.layout.ty.sty, &dest.layout.ty.sty) {
(&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |
......
......@@ -49,12 +49,12 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
pub memory: Memory<'a, 'mir, 'tcx, M>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx>>,
pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag>>,
}
/// A stack frame.
#[derive(Clone)]
pub struct Frame<'mir, 'tcx: 'mir> {
pub struct Frame<'mir, 'tcx: 'mir, Tag=()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
......@@ -74,14 +74,14 @@ pub struct Frame<'mir, 'tcx: 'mir> {
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to.
pub return_place: Place,
pub return_place: Place<Tag>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
pub locals: IndexVec<mir::Local, LocalValue<AllocId>>,
pub locals: IndexVec<mir::Local, LocalValue<Tag>>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
......@@ -108,24 +108,24 @@ pub enum StackPopCleanup {
// State of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum LocalValue<Id=AllocId> {
pub enum LocalValue<Tag=(), Id=AllocId> {
Dead,
// Mostly for convenience, we re-use the `Operand` type here.
// This is an optimization over just always having a pointer here;
// we can thus avoid doing an allocation when the local just stores
// immediate values *and* never has its address taken.
Live(Operand<Id>),
Live(Operand<Tag, Id>),
}
impl<'tcx> LocalValue {
pub fn access(&self) -> EvalResult<'tcx, &Operand> {
impl<'tcx, Tag> LocalValue<Tag> {
pub fn access(&self) -> EvalResult<'tcx, &Operand<Tag>> {
match self {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref val) => Ok(val),
}
}
pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> {
pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand<Tag>> {
match self {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref mut val) => Ok(val),
......@@ -218,7 +218,7 @@ pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
&mut self.memory
}
pub fn stack(&self) -> &[Frame<'mir, 'tcx>] {
pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag>] {
&self.stack
}
......@@ -230,7 +230,10 @@ pub fn cur_frame(&self) -> usize {
/// Mark a storage as live, killing the previous content and returning it.
/// Remember to deallocate that!
pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
pub fn storage_live(
&mut self,
local: mir::Local
) -> EvalResult<'tcx, LocalValue<M::PointerTag>> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
......@@ -242,14 +245,14 @@ pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue
/// Returns the old value of the local.
/// Remember to deallocate that!
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead)
}
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value<M::PointerTag>> {
let ptr = self.memory.allocate_static_bytes(s.as_bytes());
Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
}
......@@ -327,10 +330,10 @@ pub fn layout_of_local(
}
/// Return the actual dynamic size and alignment of the place at the given type.
/// Only the "extra" (metadata) part of the place matters.
/// Only the `meta` part of the place matters.
pub(super) fn size_and_align_of(
&self,
metadata: Option<Scalar>,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Size, Align)> {
let metadata = match metadata {
......@@ -411,9 +414,9 @@ pub(super) fn size_and_align_of(
#[inline]
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx>
mplace: MPlaceTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, (Size, Align)> {
self.size_and_align_of(mplace.extra, mplace.layout)
self.size_and_align_of(mplace.meta, mplace.layout)
}
pub fn push_stack_frame(
......@@ -421,7 +424,7 @@ pub fn push_stack_frame(
instance: ty::Instance<'tcx>,
span: source_map::Span,
mir: &'mir mir::Mir<'tcx>,
return_place: Place,
return_place: Place<M::PointerTag>,
return_to_block: StackPopCleanup,
) -> EvalResult<'tcx> {
::log_settings::settings().indentation += 1;
......@@ -519,7 +522,10 @@ pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
Ok(())
}
pub(super) fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
pub(super) fn deallocate_local(
&mut self,
local: LocalValue<M::PointerTag>,
) -> EvalResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
......@@ -541,12 +547,12 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons
}
#[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx> {
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag> {
self.stack.last().expect("no call frames exist")
}
#[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag> {
self.stack.last_mut().expect("no call frames exist")
}
......@@ -562,7 +568,7 @@ pub fn substs(&self) -> &'tcx Substs<'tcx> {
}
}
pub fn dump_place(&self, place: Place) {
pub fn dump_place(&self, place: Place<M::PointerTag>) {
// Debug output
if !log_enabled!(::log::Level::Trace) {
return;
......
......@@ -25,11 +25,11 @@
};
fn numeric_intrinsic<'tcx>(
fn numeric_intrinsic<'tcx, Tag>(
name: &str,
bits: u128,
kind: Primitive,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
......@@ -51,8 +51,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn emulate_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, bool> {
let substs = instance.substs;
......@@ -169,8 +169,8 @@ pub fn emulate_intrinsic(
pub fn hook_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
) -> EvalResult<'tcx, bool> {
let def_id = instance.def_id();
// Some fn calls are actually BinOp intrinsics
......
......@@ -12,17 +12,55 @@
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.
use std::borrow::{Borrow, Cow};
use std::hash::Hash;
use rustc::hir::def_id::DefId;
use rustc::mir::interpret::{Allocation, EvalResult, Scalar};
use rustc::mir::interpret::{Allocation, AllocId, EvalResult, Scalar};
use rustc::mir;
use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt};
use super::{EvalContext, PlaceTy, OpTy};
use super::{EvalContext, PlaceTy, OpTy, MemoryKind};
/// The functionality needed by memory to manage its allocations
pub trait AllocMap<K: Hash + Eq, V> {
/// Test if the map contains the given key.
/// Deliberately takes `&mut` because that is sufficient, and some implementations
/// can be more efficient then (using `RefCell::get_mut`).
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>;
/// Insert new entry into the map.
fn insert(&mut self, k: K, v: V) -> Option<V>;
/// Remove entry from the map.
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>;
/// Return data based the keys and values in the map.
fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
/// Return a reference to entry `k`. If no such entry exists, call
/// `vacant` and either forward its error, or add its result to the map
/// and return a reference to *that*.
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E>;
/// Return a mutable reference to entry `k`. If no such entry exists, call
/// `vacant` and either forward its error, or add its result to the map
/// and return a reference to *that*.
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>;
}
/// Methods of this trait signifies a point where CTFE evaluation would fail
/// and some use case dependent behaviour can instead be applied.
/// FIXME: We should be able to get rid of the 'a here if we can get rid of the 'a in
/// `snapshot::EvalSnapshot`.
pub trait Machine<'a, 'mir, 'tcx>: Sized {
/// Additional data that can be accessed via the Memory
type MemoryData;
......@@ -30,8 +68,22 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKinds: ::std::fmt::Debug + Copy + Eq;
/// The memory kind to use for mutated statics -- or None if those are not supported.
const MUT_STATIC_KIND: Option<Self::MemoryKinds>;
/// Memory's allocation map
type MemoryMap:
AllocMap<AllocId, (MemoryKind<Self::MemoryKinds>, Allocation<Self::PointerTag>)> +
Default +
Clone;
/// Tag tracked alongside every pointer. This is inert for now, in preparation for
/// a future implementation of "Stacked Borrows"
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static;
/// The memory kind to use for copied statics -- or None if those are not supported.
/// Statics are copied under two circumstances: When they are mutated, and when
/// `static_with_default_tag` or `find_foreign_static` (see below) returns an owned allocation
/// that is added to the memory so that the work is not done twice.
const STATIC_KIND: Option<Self::MemoryKinds>;
/// Whether to enforce the validity invariant
const ENFORCE_VALIDITY: bool;
......@@ -53,8 +105,8 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
fn find_fn(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: Option<PlaceTy<'tcx, Self::PointerTag>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
......@@ -63,18 +115,30 @@ fn find_fn(
fn call_intrinsic(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx>;
/// Called for read access to a foreign static item.
/// This can be called multiple times for the same static item and should return consistent
/// results. Once the item is *written* the first time, as usual for statics a copy is
/// made and this function is not called again.
///
/// This will only be called once per static and machine; the result is cached in
/// the machine memory. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
fn find_foreign_static(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> EvalResult<'tcx, &'tcx Allocation>;
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>>;
/// Called to turn an allocation obtained from the `tcx` into one that has
/// the appropriate tags on each pointer.
///
/// This should avoid copying if no work has to be done! If this returns an owned
/// allocation (because a copy had to be done to add the tags), machine memory will
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
fn static_with_default_tag(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Self::PointerTag>>;
/// Called for all binary operations on integer(-like) types when one operand is a pointer
/// value, and for the `Offset` operation that is inherently about pointers.
......@@ -83,18 +147,18 @@ fn find_foreign_static(
fn ptr_op(
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<Self::PointerTag>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<Self::PointerTag>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)>;
) -> EvalResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
/// Heap allocations via the `box` keyword
///
/// Returns a pointer to the allocated memory
fn box_alloc(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx>;
/// Execute a validation operation
......
......@@ -32,7 +32,7 @@
pub use self::memory::{Memory, MemoryKind};
pub use self::machine::Machine;
pub use self::machine::{Machine, AllocMap};
pub use self::operand::{ScalarMaybeUndef, Value, ValTy, Operand, OpTy};
......
......@@ -25,21 +25,42 @@
use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind};
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum ScalarMaybeUndef<Id=AllocId> {
Scalar(Scalar<Id>),
pub enum ScalarMaybeUndef<Tag=(), Id=AllocId> {
Scalar(Scalar<Tag, Id>),
Undef,
}
impl From<Scalar> for ScalarMaybeUndef {
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUndef<Tag> {
#[inline(always)]
fn from(s: Scalar) -> Self {
fn from(s: Scalar<Tag>) -> Self {
ScalarMaybeUndef::Scalar(s)
}
}
impl<'tcx> ScalarMaybeUndef {
impl<'tcx> ScalarMaybeUndef<()> {
#[inline]
pub fn not_undef(self) -> EvalResult<'static, Scalar> {
pub fn with_default_tag<Tag>(self) -> ScalarMaybeUndef<Tag>
where Tag: Default
{
match self {
ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()),
ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef,
}
}
}
impl<'tcx, Tag> ScalarMaybeUndef<Tag> {
#[inline]
pub fn erase_tag(self) -> ScalarMaybeUndef
{
match self {
ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()),
ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef,
}
}
#[inline]
pub fn not_undef(self) -> EvalResult<'static, Scalar<Tag>> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))),
......@@ -47,7 +68,7 @@ pub fn not_undef(self) -> EvalResult<'static, Scalar> {
}
#[inline(always)]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
self.not_undef()?.to_ptr()
}
......@@ -126,26 +147,49 @@ pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> {
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
/// defined on `Value`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Value<Id=AllocId> {
Scalar(ScalarMaybeUndef<Id>),
ScalarPair(ScalarMaybeUndef<Id>, ScalarMaybeUndef<Id>),
pub enum Value<Tag=(), Id=AllocId> {
Scalar(ScalarMaybeUndef<Tag, Id>),
ScalarPair(ScalarMaybeUndef<Tag, Id>, ScalarMaybeUndef<Tag, Id>),
}
impl<'tcx> Value {
impl Value {
#[inline]
pub fn with_default_tag<Tag>(self) -> Value<Tag>
where Tag: Default
{
match self {
Value::Scalar(x) => Value::Scalar(x.with_default_tag()),
Value::ScalarPair(x, y) =>
Value::ScalarPair(x.with_default_tag(), y.with_default_tag()),
}
}
}
impl<'tcx, Tag> Value<Tag> {
#[inline]
pub fn erase_tag(self) -> Value
{
match self {
Value::Scalar(x) => Value::Scalar(x.erase_tag()),
Value::ScalarPair(x, y) =>
Value::ScalarPair(x.erase_tag(), y.erase_tag()),
}
}
pub fn new_slice(
val: Scalar,
val: Scalar<Tag>,
len: u64,
cx: impl HasDataLayout
) -> Self {
Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into())
}
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
}
#[inline]
pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef {
pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef<Tag> {
match self {
Value::Scalar(val) => val,
Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
......@@ -153,12 +197,12 @@ pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef {
}
#[inline]
pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> {
pub fn to_scalar(self) -> EvalResult<'tcx, Scalar<Tag>> {
self.to_scalar_or_undef().not_undef()
}
#[inline]
pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> {
pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
match self {
Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
......@@ -168,7 +212,7 @@ pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> {
/// Convert the value into a pointer (or a pointer-sized integer).
/// Throws away the second half of a ScalarPair!
#[inline]
pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> {
pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar<Tag>> {
match self {
Value::Scalar(ptr) |
Value::ScalarPair(ptr, _) => ptr.not_undef(),
......@@ -179,15 +223,15 @@ pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> {
// ScalarPair needs a type to interpret, so we often have a value and a type together
// as input for binary and cast operations.
#[derive(Copy, Clone, Debug)]
pub struct ValTy<'tcx> {
value: Value,
pub struct ValTy<'tcx, Tag=()> {
value: Value<Tag>,
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
type Target = Value;
impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> {
type Target = Value<Tag>;
#[inline(always)]
fn deref(&self) -> &Value {
fn deref(&self) -> &Value<Tag> {
&self.value
}
}
......@@ -196,14 +240,37 @@ fn deref(&self) -> &Value {
/// or still in memory. The latter is an optimization, to delay reading that chunk of
/// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Operand<Id=AllocId> {
Immediate(Value<Id>),
Indirect(MemPlace<Id>),
pub enum Operand<Tag=(), Id=AllocId> {
Immediate(Value<Tag, Id>),
Indirect(MemPlace<Tag, Id>),
}
impl Operand {
#[inline]
pub fn to_mem_place(self) -> MemPlace {
pub fn with_default_tag<Tag>(self) -> Operand<Tag>
where Tag: Default
{
match self {
Operand::Immediate(x) => Operand::Immediate(x.with_default_tag()),
Operand::Indirect(x) => Operand::Indirect(x.with_default_tag()),
}
}
}
impl<Tag> Operand<Tag> {
#[inline]
pub fn erase_tag(self) -> Operand
{
match self {
Operand::Immediate(x) => Operand::Immediate(x.erase_tag()),
Operand::Indirect(x) => Operand::Indirect(x.erase_tag()),
}
}
#[inline]
pub fn to_mem_place(self) -> MemPlace<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Indirect(mplace) => mplace,
_ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
......@@ -212,7 +279,9 @@ pub fn to_mem_place(self) -> MemPlace {
}
#[inline]
pub fn to_immediate(self) -> Value {
pub fn to_immediate(self) -> Value<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Immediate(val) => val,
_ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
......@@ -222,22 +291,22 @@ pub fn to_immediate(self) -> Value {
}
#[derive(Copy, Clone, Debug)]
pub struct OpTy<'tcx> {
crate op: Operand, // ideally we'd make this private, but const_prop needs this
pub struct OpTy<'tcx, Tag=()> {
crate op: Operand<Tag>, // ideally we'd make this private, but const_prop needs this
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for OpTy<'tcx> {
type Target = Operand;
impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
#[inline(always)]
fn deref(&self) -> &Operand {
fn deref(&self) -> &Operand<Tag> {
&self.op
}
}
impl<'tcx> From<MPlaceTy<'tcx>> for OpTy<'tcx> {
impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx>) -> Self {
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
OpTy {
op: Operand::Indirect(*mplace),
layout: mplace.layout
......@@ -245,9 +314,9 @@ fn from(mplace: MPlaceTy<'tcx>) -> Self {
}
}
impl<'tcx> From<ValTy<'tcx>> for OpTy<'tcx> {
impl<'tcx, Tag> From<ValTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(val: ValTy<'tcx>) -> Self {
fn from(val: ValTy<'tcx, Tag>) -> Self {
OpTy {
op: Operand::Immediate(val.value),
layout: val.layout
......@@ -256,18 +325,36 @@ fn from(val: ValTy<'tcx>) -> Self {
}
// Validation needs to hash OpTy, but we cannot hash Layout -- so we just hash the type
impl<'tcx> Hash for OpTy<'tcx> {
impl<'tcx, Tag> Hash for OpTy<'tcx, Tag>
where Tag: Hash
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.op.hash(state);
self.layout.ty.hash(state);
}
}
impl<'tcx> PartialEq for OpTy<'tcx> {
impl<'tcx, Tag> PartialEq for OpTy<'tcx, Tag>
where Tag: PartialEq
{
fn eq(&self, other: &Self) -> bool {
self.op == other.op && self.layout.ty == other.layout.ty
}
}
impl<'tcx> Eq for OpTy<'tcx> {}
impl<'tcx, Tag> Eq for OpTy<'tcx, Tag>
where Tag: Eq
{}
impl<'tcx, Tag> OpTy<'tcx, Tag>
{
#[inline]
pub fn erase_tag(self) -> OpTy<'tcx>
{
OpTy {
op: self.op.erase_tag(),
layout: self.layout,
}
}
}
// Use the existing layout if given (but sanity check in debug mode),
// or compute the layout.
......@@ -295,8 +382,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Return None if the layout does not permit loading this as a value.
pub(super) fn try_read_value_from_mplace(
&self,
mplace: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, Option<Value>> {
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Option<Value<M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Dont touch unsized
return Ok(None);
......@@ -339,8 +426,8 @@ pub(super) fn try_read_value_from_mplace(
/// in a `Value`, not on which data is stored there currently.
pub(crate) fn try_read_value(
&self,
src: OpTy<'tcx>,
) -> EvalResult<'tcx, Result<Value, MemPlace>> {
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Result<Value<M::PointerTag>, MemPlace<M::PointerTag>>> {
Ok(match src.try_as_mplace() {
Ok(mplace) => {
if let Some(val) = self.try_read_value_from_mplace(mplace)? {
......@@ -355,7 +442,10 @@ pub(crate) fn try_read_value(
/// Read a value from a place, asserting that that is possible with the given layout.
#[inline(always)]
pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
pub fn read_value(
&self,
op: OpTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, ValTy<'tcx, M::PointerTag>> {
if let Ok(value) = self.try_read_value(op)? {
Ok(ValTy { value, layout: op.layout })
} else {
......@@ -364,7 +454,10 @@ pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
}
/// Read a scalar from a place
pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> {
pub fn read_scalar(
&self,
op: OpTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
match *self.read_value(op)? {
Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty),
Value::Scalar(val) => Ok(val),
......@@ -374,7 +467,7 @@ pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef>
// Turn the MPlace into a string (must already be dereferenced!)
pub fn read_str(
&self,
mplace: MPlaceTy<'tcx>,
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
......@@ -383,7 +476,10 @@ pub fn read_str(
Ok(str)
}
pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> {
pub fn uninit_operand(
&mut self,
layout: TyLayout<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
// This decides which types we will use the Immediate optimization for, and hence should
// match what `try_read_value` and `eval_place_to_op` support.
if layout.is_zst() {
......@@ -410,9 +506,9 @@ pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Ope
/// Projection functions
pub fn operand_field(
&self,
op: OpTy<'tcx>,
op: OpTy<'tcx, M::PointerTag>,
field: u64,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace() {
Ok(mplace) => {
// The easy case
......@@ -445,9 +541,9 @@ pub fn operand_field(
pub fn operand_downcast(
&self,
op: OpTy<'tcx>,
op: OpTy<'tcx, M::PointerTag>,
variant: usize,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace() {
Ok(mplace) => {
......@@ -464,8 +560,8 @@ pub fn operand_downcast(
// will always be a MemPlace.
pub(super) fn deref_operand(
&self,
src: OpTy<'tcx>,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_value(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
Ok(self.ref_to_mplace(val)?)
......@@ -473,9 +569,9 @@ pub(super) fn deref_operand(
pub fn operand_projection(
&self,
base: OpTy<'tcx>,
base: OpTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.operand_field(base, field.index() as u64)?,
......@@ -503,7 +599,7 @@ fn eval_place_to_op(
&self,
mir_place: &mir::Place<'tcx>,
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Place::*;
let op = match *mir_place {
Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer),
......@@ -533,7 +629,7 @@ pub fn eval_operand(
&self,
mir_op: &mir::Operand<'tcx>,
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Operand::*;
let op = match *mir_op {
// FIXME: do some more logic on `move` to invalidate the old location
......@@ -558,7 +654,7 @@ pub fn eval_operand(
pub(super) fn eval_operands(
&self,
ops: &[mir::Operand<'tcx>],
) -> EvalResult<'tcx, Vec<OpTy<'tcx>>> {
) -> EvalResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
ops.into_iter()
.map(|op| self.eval_operand(op, None))
.collect()
......@@ -568,7 +664,7 @@ pub(super) fn eval_operands(
pub(super) fn const_value_to_op(
&self,
val: ConstValue<'tcx>,
) -> EvalResult<'tcx, Operand> {
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
trace!("const_value_to_op: {:?}", val);
match val {
ConstValue::Unevaluated(def_id, substs) => {
......@@ -581,23 +677,28 @@ pub(super) fn const_value_to_op(
ConstValue::ByRef(id, alloc, offset) => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen -- and for `static mut`, we copy on demand anyway.
Ok(Operand::Indirect(MemPlace::from_ptr(Pointer::new(id, offset), alloc.align)))
Ok(Operand::Indirect(
MemPlace::from_ptr(Pointer::new(id, offset), alloc.align)
).with_default_tag())
},
ConstValue::ScalarPair(a, b) =>
Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into()))),
Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into())).with_default_tag()),
ConstValue::Scalar(x) =>
Ok(Operand::Immediate(Value::Scalar(x.into()))),
Ok(Operand::Immediate(Value::Scalar(x.into())).with_default_tag()),
}
}
pub fn const_to_op(
&self,
cnst: &ty::Const<'tcx>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let op = self.const_value_to_op(cnst.val)?;
Ok(OpTy { op, layout: self.layout_of(cnst.ty)? })
}
pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> {
pub(super) fn global_to_op(
&self,
gid: GlobalId<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
let cv = self.const_eval(gid)?;
self.const_value_to_op(cv.val)
}
......@@ -605,7 +706,7 @@ pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Opera
/// Read discriminant, return the runtime value as well as the variant index.
pub fn read_discriminant(
&self,
rval: OpTy<'tcx>,
rval: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, (u128, usize)> {
trace!("read_discriminant_value {:#?}", rval.layout);
if rval.layout.abi.is_uninhabited() {
......
......@@ -24,9 +24,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: PlaceTy<'tcx>,
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binary_op_val(op, left, right)?;
let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
......@@ -38,9 +38,9 @@ pub fn binop_with_overflow(
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: PlaceTy<'tcx>,
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (val, _overflowed) = self.binary_op_val(op, left, right)?;
self.write_scalar(val, dest)
......@@ -53,7 +53,7 @@ fn binary_char_op(
bin_op: mir::BinOp,
l: char,
r: char,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
......@@ -73,7 +73,7 @@ fn binary_bool_op(
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
......@@ -98,7 +98,7 @@ fn binary_float_op(
// passing in raw bits
l: u128,
r: u128,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
macro_rules! float_math {
......@@ -138,7 +138,7 @@ fn binary_int_op(
left_layout: TyLayout<'tcx>,
r: u128,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
// Shift ops can have an RHS with a different numeric type.
......@@ -288,9 +288,9 @@ fn binary_int_op(
pub fn binary_op_val(
&self,
bin_op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
self.binary_op(
bin_op,
left.to_scalar()?, left.layout,
......@@ -302,11 +302,11 @@ pub fn binary_op_val(
pub fn binary_op(
&self,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<M::PointerTag>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<M::PointerTag>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, left, left_layout.ty, right, right_layout.ty);
......@@ -352,9 +352,9 @@ pub fn binary_op(
pub fn unary_op(
&self,
un_op: mir::UnOp,
val: Scalar,
val: Scalar<M::PointerTag>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::mir::UnOp::*;
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;
......
此差异已折叠。
......@@ -99,6 +99,8 @@ trait Snapshot<'a, Ctx: SnapshotContext<'a>> {
($field:ident, $ctx:expr, $delegate:expr) => ($delegate);
}
// This assumes the type has two type parameters, first for the tag (set to `()`),
// then for the id
macro_rules! impl_snapshot_for {
// FIXME(mark-i-m): Some of these should be `?` rather than `*`.
(enum $enum_name:ident {
......@@ -108,7 +110,7 @@ trait Snapshot<'a, Ctx: SnapshotContext<'a>> {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $enum_name<AllocIdSnapshot<'a>>;
type Item = $enum_name<(), AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
......@@ -129,7 +131,7 @@ fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $struct_name<AllocIdSnapshot<'a>>;
type Item = $struct_name<(), AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
......@@ -175,12 +177,13 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
impl_snapshot_for!(struct Pointer {
alloc_id,
offset -> *offset, // just copy offset verbatim
tag -> *tag, // just copy tag
});
impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar
where Ctx: SnapshotContext<'a>,
{
type Item = Scalar<AllocIdSnapshot<'a>>;
type Item = Scalar<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
......@@ -206,11 +209,11 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
impl_stable_hash_for!(struct ::interpret::MemPlace {
ptr,
align,
extra,
meta,
});
impl_snapshot_for!(struct MemPlace {
ptr,
extra,
meta,
align -> *align, // just copy alignment verbatim
});
......@@ -234,7 +237,7 @@ fn hash_stable<W: StableHasherResult>(
impl<'a, Ctx> Snapshot<'a, Ctx> for Place
where Ctx: SnapshotContext<'a>,
{
type Item = Place<AllocIdSnapshot<'a>>;
type Item = Place<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
......@@ -278,11 +281,11 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations
where Ctx: SnapshotContext<'a>,
{
type Item = Relocations<AllocIdSnapshot<'a>>;
type Item = Relocations<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
Relocations::from_presorted(self.iter()
.map(|(size, id)| (*size, id.snapshot(ctx)))
.map(|(size, ((), id))| (*size, ((), id.snapshot(ctx))))
.collect())
}
}
......@@ -290,7 +293,7 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
#[derive(Eq, PartialEq)]
struct AllocationSnapshot<'a> {
bytes: &'a [u8],
relocations: Relocations<AllocIdSnapshot<'a>>,
relocations: Relocations<(), AllocIdSnapshot<'a>>,
undef_mask: &'a UndefMask,
align: &'a Align,
mutability: &'a Mutability,
......@@ -334,8 +337,8 @@ struct FrameSnapshot<'a, 'tcx: 'a> {
instance: &'a ty::Instance<'tcx>,
span: &'a Span,
return_to_block: &'a StackPopCleanup,
return_place: Place<AllocIdSnapshot<'a>>,
locals: IndexVec<mir::Local, LocalValue<AllocIdSnapshot<'a>>>,
return_place: Place<(), AllocIdSnapshot<'a>>,
locals: IndexVec<mir::Local, LocalValue<(), AllocIdSnapshot<'a>>>,
block: &'a mir::BasicBlock,
stmt: usize,
}
......
......@@ -205,8 +205,8 @@ fn check_argument_compat(
fn pass_argument(
&mut self,
skip_zst: bool,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx>>,
callee_arg: PlaceTy<'tcx>,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>,
callee_arg: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
if skip_zst && callee_arg.layout.is_zst() {
// Nothing to do.
......@@ -231,8 +231,8 @@ fn eval_fn_call(
instance: ty::Instance<'tcx>,
span: Span,
caller_abi: Abi,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
......@@ -330,7 +330,7 @@ fn eval_fn_call(
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
let caller_args : Cow<[OpTy<'tcx>]> =
let caller_args : Cow<[OpTy<'tcx, M::PointerTag>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (&untuple_arg, args) = args.split_last().unwrap();
......@@ -339,7 +339,7 @@ fn eval_fn_call(
.chain((0..untuple_arg.layout.fields.count()).into_iter()
.map(|i| self.operand_field(untuple_arg, i as u64))
)
.collect::<EvalResult<Vec<OpTy<'tcx>>>>()?)
.collect::<EvalResult<Vec<OpTy<'tcx, M::PointerTag>>>>()?)
} else {
// Plain arg passing
Cow::from(args)
......@@ -426,7 +426,7 @@ fn eval_fn_call(
fn drop_in_place(
&mut self,
place: PlaceTy<'tcx>,
place: PlaceTy<'tcx, M::PointerTag>,
instance: ty::Instance<'tcx>,
span: Span,
target: mir::BasicBlock,
......
......@@ -12,8 +12,6 @@
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use syntax::ast::Mutability;
use super::{EvalContext, Machine, MemoryKind};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
......@@ -27,9 +25,11 @@ pub fn get_vtable(
&mut self,
ty: Ty<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> EvalResult<'tcx, Pointer> {
) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
debug!("get_vtable(trait_ref={:?})", trait_ref);
// FIXME: Cache this!
let layout = self.layout_of(trait_ref.self_ty())?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
......@@ -41,7 +41,7 @@ pub fn get_vtable(
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_align,
MemoryKind::Stack,
MemoryKind::Vtable,
)?;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
......@@ -63,10 +63,7 @@ pub fn get_vtable(
}
}
self.memory.intern_static(
vtable.alloc_id,
Mutability::Immutable,
)?;
self.memory.mark_immutable(vtable.alloc_id)?;
Ok(vtable)
}
......@@ -74,7 +71,7 @@ pub fn get_vtable(
/// Return the drop fn instance as well as the actual dynamic type
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
......@@ -90,7 +87,7 @@ pub fn read_drop_type_from_vtable(
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
......
......@@ -9,6 +9,7 @@
// except according to those terms.
use std::fmt::Write;
use std::hash::Hash;
use syntax_pos::symbol::Symbol;
use rustc::ty::layout::{self, Size, Align, TyLayout};
......@@ -80,13 +81,13 @@ pub enum PathElem {
}
/// State for tracking recursive validation of references
pub struct RefTracking<'tcx> {
pub seen: FxHashSet<(OpTy<'tcx>)>,
pub todo: Vec<(OpTy<'tcx>, Vec<PathElem>)>,
pub struct RefTracking<'tcx, Tag> {
pub seen: FxHashSet<(OpTy<'tcx, Tag>)>,
pub todo: Vec<(OpTy<'tcx, Tag>, Vec<PathElem>)>,
}
impl<'tcx> RefTracking<'tcx> {
pub fn new(op: OpTy<'tcx>) -> Self {
impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> {
pub fn new(op: OpTy<'tcx, Tag>) -> Self {
let mut ref_tracking = RefTracking {
seen: FxHashSet(),
todo: vec![(op, Vec::new())],
......@@ -128,7 +129,7 @@ fn path_format(path: &Vec<PathElem>) -> String {
out
}
fn scalar_format(value: ScalarMaybeUndef) -> String {
fn scalar_format<Tag>(value: ScalarMaybeUndef<Tag>) -> String {
match value {
ScalarMaybeUndef::Undef =>
"uninitialized bytes".to_owned(),
......@@ -143,9 +144,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Make sure that `value` is valid for `ty`, *assuming* `ty` is a primitive type.
fn validate_primitive_type(
&self,
value: ValTy<'tcx>,
value: ValTy<'tcx, M::PointerTag>,
path: &Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<'tcx>>,
ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
const_mode: bool,
) -> EvalResult<'tcx> {
// Go over all the primitive types
......@@ -185,7 +186,7 @@ fn validate_primitive_type(
let tail = self.tcx.struct_tail(place.layout.ty);
match tail.sty {
ty::Dynamic(..) => {
let vtable = try_validation!(place.extra.unwrap().to_ptr(),
let vtable = try_validation!(place.meta.unwrap().to_ptr(),
"non-pointer vtable in fat pointer", path);
try_validation!(self.read_drop_type_from_vtable(vtable),
"invalid drop fn in vtable", path);
......@@ -194,7 +195,7 @@ fn validate_primitive_type(
// FIXME: More checks for the vtable.
}
ty::Slice(..) | ty::Str => {
try_validation!(place.extra.unwrap().to_usize(self),
try_validation!(place.meta.unwrap().to_usize(self),
"non-integer slice length in fat pointer", path);
}
ty::Foreign(..) => {
......@@ -207,7 +208,7 @@ fn validate_primitive_type(
// for safe ptrs, also check the ptr values itself
if !ty.is_unsafe_ptr() {
// Make sure this is non-NULL and aligned
let (size, align) = self.size_and_align_of(place.extra, place.layout)?;
let (size, align) = self.size_and_align_of(place.meta, place.layout)?;
match self.memory.check_align(place.ptr, align) {
Ok(_) => {},
Err(err) => match err.kind {
......@@ -272,7 +273,7 @@ fn validate_primitive_type(
/// Make sure that `value` matches the
fn validate_scalar_layout(
&self,
value: ScalarMaybeUndef,
value: ScalarMaybeUndef<M::PointerTag>,
size: Size,
path: &Vec<PathElem>,
layout: &layout::Scalar,
......@@ -363,9 +364,9 @@ fn validate_scalar_layout(
/// validation (e.g., pointer values are fine in integers at runtime).
pub fn validate_operand(
&self,
dest: OpTy<'tcx>,
dest: OpTy<'tcx, M::PointerTag>,
path: &mut Vec<PathElem>,
mut ref_tracking: Option<&mut RefTracking<'tcx>>,
mut ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
const_mode: bool,
) -> EvalResult<'tcx> {
trace!("validate_operand: {:?}, {:?}", *dest, dest.layout.ty);
......
......@@ -1163,7 +1163,7 @@ fn collect_miri<'a, 'tcx>(
}
Some(AllocType::Memory(alloc)) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &inner in alloc.relocations.values() {
for &((), inner) in alloc.relocations.values() {
collect_miri(tcx, inner, output);
}
},
......@@ -1272,7 +1272,7 @@ fn collect_const<'a, 'tcx>(
ConstValue::Scalar(Scalar::Ptr(ptr)) =>
collect_miri(tcx, ptr.alloc_id, output),
ConstValue::ByRef(_id, alloc, _offset) => {
for &id in alloc.relocations.values() {
for &((), id) in alloc.relocations.values() {
collect_miri(tcx, id, output);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册