提交 a6096fbf 编写于 作者: O Oliver Schneider 提交者: GitHub

Merge pull request #292 from oli-obk/static_alloc_ids

Prepare for splitting off static alloc ids from local alloc ids
......@@ -16,14 +16,14 @@
use tls::MemoryExt;
use super::memory::Kind;
use super::memory::MemoryKind;
pub trait EvalContextExt<'tcx> {
fn call_c_abi(
&mut self,
def_id: DefId,
arg_operands: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
) -> EvalResult<'tcx>;
......@@ -33,7 +33,7 @@ fn call_c_abi(
fn call_missing_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
......@@ -42,7 +42,7 @@ fn call_missing_fn(
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
......@@ -53,7 +53,7 @@ impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator>
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
......@@ -89,7 +89,7 @@ fn call_c_abi(
&mut self,
def_id: DefId,
arg_operands: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
) -> EvalResult<'tcx> {
......@@ -113,7 +113,7 @@ fn call_c_abi(
self.write_null(dest, dest_ty)?;
} else {
let align = self.memory.pointer_size();
let ptr = self.memory.allocate(size, align, Kind::C.into())?;
let ptr = self.memory.allocate(size, align, MemoryKind::C.into())?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
}
......@@ -121,7 +121,7 @@ fn call_c_abi(
"free" => {
let ptr = args[0].into_ptr(&mut self.memory)?;
if !ptr.is_null()? {
self.memory.deallocate(ptr.to_ptr()?, None, Kind::C.into())?;
self.memory.deallocate(ptr.to_ptr()?, None, MemoryKind::C.into())?;
}
}
......@@ -251,7 +251,7 @@ fn call_c_abi(
}
if let Some(old) = success {
if let Some(var) = old {
self.memory.deallocate(var, None, Kind::Env.into())?;
self.memory.deallocate(var, None, MemoryKind::Env.into())?;
}
self.write_null(dest, dest_ty)?;
} else {
......@@ -274,12 +274,12 @@ fn call_c_abi(
}
if let Some((name, value)) = new {
// +1 for the null terminator
let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env.into())?;
let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, MemoryKind::Env.into())?;
self.memory.write_bytes(value_copy.into(), &value)?;
let trailing_zero_ptr = value_copy.offset(value.len() as u64, &self)?.into();
self.memory.write_bytes(trailing_zero_ptr, &[0])?;
if let Some(var) = self.machine_data.env_vars.insert(name.to_owned(), value_copy) {
self.memory.deallocate(var, None, Kind::Env.into())?;
self.memory.deallocate(var, None, MemoryKind::Env.into())?;
}
self.write_null(dest, dest_ty)?;
} else {
......@@ -317,7 +317,8 @@ fn call_c_abi(
}
"sysconf" => {
let name = self.value_to_primval(args[0], usize)?.to_u64()?;
let c_int = self.operand_ty(&arg_operands[0]);
let name = self.value_to_primval(args[0], c_int)?.to_u64()?;
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
let paths = &[
......@@ -329,8 +330,8 @@ fn call_c_abi(
if let Ok(instance) = self.resolve_path(path) {
let cid = GlobalId { instance, promoted: None };
// compute global if not cached
let val = match self.globals.get(&cid).map(|glob| glob.value) {
Some(value) => self.value_to_primval(value, usize)?.to_u64()?,
let val = match self.globals.get(&cid).cloned() {
Some(ptr) => self.value_to_primval(Value::ByRef(ptr), c_int)?.to_u64()?,
None => eval_body_as_primval(self.tcx, instance)?.0.to_u64()?,
};
if val == name {
......@@ -459,7 +460,7 @@ fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
fn call_missing_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
......@@ -500,7 +501,7 @@ fn call_missing_fn(
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = self.memory.allocate(size, align, Kind::Rust.into())?;
let ptr = self.memory.allocate(size, align, MemoryKind::Rust.into())?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
"alloc::heap::::__rust_alloc_zeroed" => {
......@@ -512,7 +513,7 @@ fn call_missing_fn(
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = self.memory.allocate(size, align, Kind::Rust.into())?;
let ptr = self.memory.allocate(size, align, MemoryKind::Rust.into())?;
self.memory.write_repeat(ptr.into(), 0, size)?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
......@@ -526,7 +527,7 @@ fn call_missing_fn(
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
self.memory.deallocate(ptr, Some((old_size, align)), Kind::Rust.into())?;
self.memory.deallocate(ptr, Some((old_size, align)), MemoryKind::Rust.into())?;
}
"alloc::heap::::__rust_realloc" => {
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
......@@ -543,7 +544,7 @@ fn call_missing_fn(
if !new_align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(new_align));
}
let new_ptr = self.memory.reallocate(ptr, old_size, old_align, new_size, new_align, Kind::Rust.into())?;
let new_ptr = self.memory.reallocate(ptr, old_size, old_align, new_size, new_align, MemoryKind::Rust.into())?;
self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?;
}
......
......@@ -8,7 +8,7 @@
Lvalue, LvalueExtra,
PrimVal, PrimValKind, Value, Pointer,
HasMemory,
EvalContext,
EvalContext, PtrAndAlign,
};
use helpers::EvalContextExt as HelperEvalContextExt;
......@@ -18,7 +18,7 @@ fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_layout: &'tcx Layout,
target: mir::BasicBlock,
......@@ -30,7 +30,7 @@ fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_layout: &'tcx Layout,
target: mir::BasicBlock,
......@@ -266,10 +266,10 @@ fn call_intrinsic(
let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
let init = |this: &mut Self, val: Value| {
let zero_val = match val {
Value::ByRef { ptr, aligned } => {
Value::ByRef(PtrAndAlign { ptr, .. }) => {
// These writes have no alignment restriction anyway.
this.memory.write_repeat(ptr, 0, size)?;
Value::ByRef { ptr, aligned }
val
},
// TODO(solson): Revisit this, it's fishy to check for Undef here.
Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
......@@ -289,9 +289,8 @@ fn call_intrinsic(
};
match dest {
Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
Lvalue::Global(cid) => self.modify_global(cid, init)?,
}
}
......@@ -457,19 +456,18 @@ fn call_intrinsic(
let size = dest_layout.size(&self.tcx.data_layout).bytes();
let uninit = |this: &mut Self, val: Value| {
match val {
Value::ByRef { ptr, aligned } => {
Value::ByRef(PtrAndAlign { ptr, .. }) => {
this.memory.mark_definedness(ptr, size, false)?;
Ok(Value::ByRef { ptr, aligned })
Ok(val)
},
_ => Ok(Value::ByVal(PrimVal::Undef)),
}
};
match dest {
Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } =>
self.memory.mark_definedness(ptr, size, false)?,
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
}
}
......
......@@ -71,7 +71,7 @@ fn run_main<'a, 'tcx: 'a>(
// Return value
let size = ecx.tcx.data_layout.pointer_size.bytes();
let align = ecx.tcx.data_layout.pointer_align.abi();
let ret_ptr = ecx.memory_mut().allocate(size, align, Kind::Stack)?;
let ret_ptr = ecx.memory_mut().allocate(size, align, MemoryKind::Stack)?;
cleanup_ptr = Some(ret_ptr);
// Push our stack frame
......@@ -114,7 +114,7 @@ fn run_main<'a, 'tcx: 'a>(
while ecx.step()? {}
ecx.run_tls_dtors()?;
if let Some(cleanup_ptr) = cleanup_ptr {
ecx.memory_mut().deallocate(cleanup_ptr, None, Kind::Stack)?;
ecx.memory_mut().deallocate(cleanup_ptr, None, MemoryKind::Stack)?;
}
Ok(())
}
......@@ -161,13 +161,13 @@ struct MemoryData<'tcx> {
impl<'tcx> Machine<'tcx> for Evaluator {
type Data = EvaluatorData;
type MemoryData = MemoryData<'tcx>;
type MemoryKinds = memory::Kind;
type MemoryKinds = memory::MemoryKind;
/// Returns Ok() when the function was handled, fail otherwise
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
......@@ -179,7 +179,7 @@ fn call_intrinsic<'a>(
ecx: &mut rustc_miri::interpret::EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: ty::Ty<'tcx>,
dest_layout: &'tcx Layout,
target: mir::BasicBlock,
......@@ -198,8 +198,8 @@ fn try_ptr_op<'a>(
ecx.ptr_op(bin_op, left, left_ty, right, right_ty)
}
fn mark_static_initialized(m: memory::Kind) -> EvalResult<'tcx> {
use memory::Kind::*;
fn mark_static_initialized(m: memory::MemoryKind) -> EvalResult<'tcx> {
use memory::MemoryKind::*;
match m {
// FIXME: This could be allowed, but not for env vars set during miri execution
Env => err!(Unimplemented("statics can't refer to env vars".to_owned())),
......@@ -218,7 +218,7 @@ fn box_alloc<'a>(
Ok(PrimVal::Bytes(align.into()))
} else {
ecx.memory
.allocate(size, align, Kind::Machine(memory::Kind::Rust))
.allocate(size, align, MemoryKind::Machine(memory::MemoryKind::Rust))
.map(PrimVal::Ptr)
}
}
......
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Kind {
pub enum MemoryKind {
/// Error if deallocated any other way than `rust_deallocate`
Rust,
/// Error if deallocated any other way than `free`
......@@ -9,8 +9,8 @@ pub enum Kind {
Env,
}
impl Into<::rustc_miri::interpret::Kind<Kind>> for Kind {
fn into(self) -> ::rustc_miri::interpret::Kind<Kind> {
::rustc_miri::interpret::Kind::Machine(self)
impl Into<::rustc_miri::interpret::MemoryKind<MemoryKind>> for MemoryKind {
fn into(self) -> ::rustc_miri::interpret::MemoryKind<MemoryKind> {
::rustc_miri::interpret::MemoryKind::Machine(self)
}
}
......@@ -7,9 +7,10 @@
use super::{
EvalResult, EvalError, EvalErrorKind,
Global, GlobalId, Lvalue,
GlobalId, Lvalue, Value,
PrimVal,
EvalContext, StackPopCleanup,
EvalContext, StackPopCleanup, PtrAndAlign,
MemoryKind,
};
use rustc_const_math::ConstInt;
......@@ -30,7 +31,11 @@ pub fn eval_body_as_primval<'a, 'tcx>(
let mir = ecx.load_mir(instance.def)?;
if !ecx.globals.contains_key(&cid) {
ecx.globals.insert(cid, Global::uninitialized(mir.return_ty));
let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?.expect("unsized global");
let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?;
let ptr = ecx.memory.allocate(size, align, MemoryKind::UninitializedStatic)?;
let aligned = !ecx.is_packed(mir.return_ty)?;
ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
let mutable = !mir.return_ty.is_freeze(
ecx.tcx,
ty::ParamEnv::empty(Reveal::All),
......@@ -42,18 +47,18 @@ pub fn eval_body_as_primval<'a, 'tcx>(
};
let cleanup = StackPopCleanup::MarkStatic(mutability);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("pushing stack frame for global: {}", name);
trace!("const_eval: pushing stack frame for global: {}", name);
ecx.push_stack_frame(
instance,
mir.span,
mir,
Lvalue::Global(cid),
Lvalue::from_ptr(ptr),
cleanup,
)?;
while ecx.step()? {}
}
let value = ecx.globals.get(&cid).expect("global not cached").value;
let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
Ok((ecx.value_to_primval(value, mir.return_ty)?, mir.return_ty))
}
......@@ -132,7 +137,7 @@ impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator {
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
_arg_operands: &[mir::Operand<'tcx>],
span: Span,
_sig: ty::FnSig<'tcx>,
......@@ -168,7 +173,7 @@ fn call_intrinsic<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_args: &[mir::Operand<'tcx>],
_dest: Lvalue<'tcx>,
_dest: Lvalue,
_dest_ty: Ty<'tcx>,
_dest_layout: &'tcx layout::Layout,
_target: mir::BasicBlock,
......
......@@ -139,7 +139,7 @@ fn description(&self) -> &str {
DoubleFree =>
"tried to deallocate dangling pointer",
InvalidFunctionPointer =>
"tried to use an integer pointer or a dangling pointer as a function pointer",
"tried to use a function pointer after offsetting it",
InvalidBool =>
"invalid boolean value read",
InvalidDiscriminant =>
......
......@@ -2,7 +2,6 @@
use rustc::ty::layout::{Size, Align};
use rustc::ty::{self, Ty};
use rustc_data_structures::indexed_vec::Idx;
use syntax::ast::Mutability;
use super::{
EvalResult,
......@@ -10,19 +9,18 @@
MemoryPointer,
PrimVal, Value, Pointer,
Machine,
PtrAndAlign,
};
#[derive(Copy, Clone, Debug)]
pub enum Lvalue<'tcx> {
pub enum Lvalue {
/// An lvalue referring to a value allocated in the `Memory` system.
Ptr {
/// An lvalue may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
ptr: Pointer,
ptr: PtrAndAlign,
extra: LvalueExtra,
/// Remember whether this lvalue is *supposed* to be aligned.
aligned: bool,
},
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
......@@ -31,9 +29,6 @@ pub enum Lvalue<'tcx> {
frame: usize,
local: mir::Local,
},
/// An lvalue referring to a global
Global(GlobalId<'tcx>),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
......@@ -55,42 +50,30 @@ pub struct GlobalId<'tcx> {
pub promoted: Option<mir::Promoted>,
}
#[derive(Clone, Debug)]
pub struct Global<'tcx> {
pub value: Value,
/// Only used in `force_allocation` to ensure we don't mark the memory
/// before the static is initialized. It is possible to convert a
/// global which initially is `Value::ByVal(PrimVal::Undef)` and gets
/// lifted to an allocation before the static is fully initialized
pub(super) initialized: bool,
pub(super) mutable: Mutability,
pub(super) ty: Ty<'tcx>,
}
impl<'tcx> Lvalue<'tcx> {
impl<'tcx> Lvalue {
/// Produces an Lvalue that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef.into())
}
pub fn from_primval_ptr(ptr: Pointer) -> Self {
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true }
Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None }
}
pub fn from_ptr(ptr: MemoryPointer) -> Self {
Self::from_primval_ptr(ptr.into())
}
pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) {
pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) {
match self {
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
}
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
let (ptr, extra, _aligned) = self.to_ptr_extra_aligned();
let (ptr, extra) = self.to_ptr_extra_aligned();
// At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
// and no matter where it came from, it now must be aligned.
assert_eq!(extra, LvalueExtra::None);
......@@ -113,26 +96,6 @@ pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
}
}
impl<'tcx> Global<'tcx> {
pub(super) fn uninitialized(ty: Ty<'tcx>) -> Self {
Global {
value: Value::ByVal(PrimVal::Undef),
mutable: Mutability::Mutable,
ty,
initialized: false,
}
}
pub(super) fn initialized(ty: Ty<'tcx>, value: Value, mutable: Mutability) -> Self {
Global {
value,
mutable,
ty,
initialized: true,
}
}
}
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
/// Reads a value from the lvalue without going through the intermediate step of obtaining
/// a `miri::Lvalue`
......@@ -147,7 +110,7 @@ pub fn try_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let cid = GlobalId { instance, promoted: None };
Ok(Some(self.globals.get(&cid).expect("global not cached").value))
Ok(Some(Value::ByRef(*self.globals.get(&cid).expect("global not cached"))))
},
Projection(ref proj) => self.try_read_lvalue_projection(proj),
}
......@@ -195,22 +158,19 @@ pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> Eva
self.read_lvalue(lvalue)
}
pub fn read_lvalue(&self, lvalue: Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> {
match lvalue {
Lvalue::Ptr { ptr, extra, aligned } => {
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
Ok(Value::ByRef { ptr, aligned })
Ok(Value::ByRef(ptr))
}
Lvalue::Local { frame, local } => {
self.stack[frame].get_local(local)
}
Lvalue::Global(cid) => {
Ok(self.globals.get(&cid).expect("global not cached").value)
}
}
}
pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
use rustc::mir::Lvalue::*;
let lvalue = match *mir_lvalue {
Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
......@@ -218,7 +178,11 @@ pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
Lvalue::Global(GlobalId { instance, promoted: None })
let gid = GlobalId { instance, promoted: None };
Lvalue::Ptr {
ptr: *self.globals.get(&gid).expect("uncached global"),
extra: LvalueExtra::None,
}
}
Projection(ref proj) => {
......@@ -237,11 +201,11 @@ pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx
pub fn lvalue_field(
&mut self,
base: Lvalue<'tcx>,
base: Lvalue,
field_index: usize,
base_ty: Ty<'tcx>,
field_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Lvalue<'tcx>> {
) -> EvalResult<'tcx, Lvalue> {
let base_layout = self.type_layout(base_ty)?;
use rustc::ty::layout::Layout::*;
let (offset, packed) = match *base_layout {
......@@ -250,10 +214,11 @@ pub fn lvalue_field(
},
General { ref variants, .. } => {
let (_, base_extra, _) = base.to_ptr_extra_aligned();
let (_, base_extra) = base.to_ptr_extra_aligned();
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
// +1 for the discriminant, which is field 0
(variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed)
assert!(!variants[variant_idx].packed);
(variants[variant_idx].offsets[field_index + 1], false)
} else {
bug!("field access on enum had no variant index");
}
......@@ -300,8 +265,8 @@ pub fn lvalue_field(
};
// Do not allocate in trivial cases
let (base_ptr, base_extra, aligned) = match base {
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
let (base_ptr, base_extra) = match base {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? {
// in case the type has a single field, just return the value
Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
......@@ -312,27 +277,20 @@ pub fn lvalue_field(
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
},
Lvalue::Global(cid) => match self.globals.get(&cid).expect("uncached global").value {
// in case the type has a single field, just return the value
Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
return Ok(base);
},
Value::ByRef{..} |
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
},
};
let offset = match base_extra {
LvalueExtra::Vtable(tab) => {
let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.to_value_with_vtable(tab))?;
let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.ptr.to_value_with_vtable(tab))?;
offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes()
}
_ => offset.bytes(),
};
let ptr = base_ptr.offset(offset, &self)?;
let mut ptr = base_ptr.offset(offset, &self)?;
// if we were unaligned, stay unaligned
// no matter what we were, if we are packed, we must not be aligned anymore
ptr.aligned &= !packed;
let field_ty = self.monomorphize(field_ty, self.substs());
......@@ -349,43 +307,43 @@ pub fn lvalue_field(
base_extra
};
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
Ok(Lvalue::Ptr { ptr, extra } )
}
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Vtable(vtable) }
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.into_slice(&self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Length(len) }
},
_ => Lvalue::Ptr { ptr: val.into_ptr(&self.memory)?, extra: LvalueExtra::None, aligned: true },
_ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?),
})
}
pub(super) fn lvalue_index(&mut self, base: Lvalue<'tcx>, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue<'tcx>> {
pub(super) fn lvalue_index(&mut self, base: Lvalue, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned })
Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None })
}
pub(super) fn eval_lvalue_projection(
&mut self,
base: Lvalue<'tcx>,
base: Lvalue,
base_ty: Ty<'tcx>,
proj_elem: &mir::ProjectionElem<'tcx, mir::Operand<'tcx>, Ty<'tcx>>,
) -> EvalResult<'tcx, Lvalue<'tcx>> {
) -> EvalResult<'tcx, Lvalue> {
use rustc::mir::ProjectionElem::*;
let (ptr, extra, aligned) = match *proj_elem {
let (ptr, extra) = match *proj_elem {
Field(field, field_ty) => {
return self.lvalue_field(base, field.index(), base_ty, field_ty);
}
......@@ -394,7 +352,7 @@ pub(super) fn eval_lvalue_projection(
let base_layout = self.type_layout(base_ty)?;
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned();
let (base_ptr, base_extra) = base.to_ptr_extra_aligned();
use rustc::ty::layout::Layout::*;
let extra = match *base_layout {
......@@ -402,7 +360,7 @@ pub(super) fn eval_lvalue_projection(
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
};
(base_ptr, extra, aligned)
(base_ptr, extra)
}
Deref => {
......@@ -431,7 +389,7 @@ pub(super) fn eval_lvalue_projection(
ConstantIndex { offset, min_length, from_end } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
......@@ -444,24 +402,24 @@ pub(super) fn eval_lvalue_projection(
};
let ptr = base_ptr.offset(index * elem_size, &self)?;
(ptr, LvalueExtra::None, aligned)
(ptr, LvalueExtra::None)
}
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
(ptr, extra, aligned)
(ptr, extra)
}
};
Ok(Lvalue::Ptr { ptr, extra, aligned })
Ok(Lvalue::Ptr { ptr, extra })
}
pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
......
......@@ -33,7 +33,7 @@ pub trait Machine<'tcx>: Sized {
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
......@@ -44,7 +44,7 @@ fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: ty::Ty<'tcx>,
dest_layout: &'tcx ty::layout::Layout,
target: mir::BasicBlock,
......
......@@ -82,12 +82,55 @@ fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
// Allocations and pointers
////////////////////////////////////////////////////////////////////////////////
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct AllocId(pub u64);
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct AllocId(u64);
#[derive(Debug)]
enum AllocIdKind {
/// We can't ever have more than `usize::max_value` functions at the same time
/// since we never "deallocate" functions
Function(usize),
/// Locals and heap allocations (also statics for now, but those will get their
/// own variant soonish).
Runtime(u64),
}
impl AllocIdKind {
fn into_alloc_id(self) -> AllocId {
match self {
AllocIdKind::Function(n) => AllocId(n as u64),
AllocIdKind::Runtime(n) => AllocId((1 << 63) | n),
}
}
}
impl AllocId {
/// Currently yields the top bit to discriminate the `AllocIdKind`s
fn discriminant(self) -> u64 {
self.0 >> 63
}
/// Yields everything but the discriminant bits
fn index(self) -> u64 {
self.0 & ((1 << 63) - 1)
}
fn into_alloc_id_kind(self) -> AllocIdKind {
match self.discriminant() {
0 => AllocIdKind::Function(self.index() as usize),
1 => AllocIdKind::Runtime(self.index()),
n => bug!("got discriminant {} for AllocId", n),
}
}
}
impl fmt::Display for AllocId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
write!(f, "{:?}", self.into_alloc_id_kind())
}
}
impl fmt::Debug for AllocId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.into_alloc_id_kind())
}
}
......@@ -108,7 +151,7 @@ pub struct Allocation<M> {
/// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
/// allocation is modified or deallocated in the future.
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: Kind<M>,
pub kind: MemoryKind<M>,
/// Memory regions that are locked by some function
locks: RangeMap<LockInfo>,
}
......@@ -129,7 +172,7 @@ fn check_locks<'tcx>(&self, frame: Option<usize>, offset: u64, len: u64, access:
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Kind<T> {
pub enum MemoryKind<T> {
/// Error if deallocated except during a stack pop
Stack,
/// Static in the process of being initialized.
......@@ -186,16 +229,10 @@ pub struct Memory<'a, 'tcx, M: Machine<'tcx>> {
pub data: M::MemoryData,
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
alloc_map: HashMap<AllocId, Allocation<M::MemoryKinds>>,
alloc_map: HashMap<u64, Allocation<M::MemoryKinds>>,
/// The AllocId to assign to the next new allocation. Always incremented, never gets smaller.
next_id: AllocId,
/// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from
/// stepping out of its own allocations. This set only contains statics backed by an
/// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once
/// they become ByRef.
static_alloc: HashSet<AllocId>,
/// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller.
next_alloc_id: u64,
/// Number of virtual bytes allocated.
memory_usage: u64,
......@@ -205,7 +242,7 @@ pub struct Memory<'a, 'tcx, M: Machine<'tcx>> {
/// Function "allocations". They exist solely so pointers have something to point to, and
/// we can figure out what they point to.
functions: HashMap<AllocId, ty::Instance<'tcx>>,
functions: Vec<ty::Instance<'tcx>>,
/// Inverse map of `functions` so we don't allocate a new pointer every time we need one
function_alloc_cache: HashMap<ty::Instance<'tcx>, AllocId>,
......@@ -231,13 +268,12 @@ pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) -
Memory {
data,
alloc_map: HashMap::new(),
functions: HashMap::new(),
functions: Vec::new(),
function_alloc_cache: HashMap::new(),
next_id: AllocId(0),
next_alloc_id: 0,
layout,
memory_size: max_memory,
memory_usage: 0,
static_alloc: HashSet::new(),
literal_alloc_cache: HashMap::new(),
reads_are_aligned: Cell::new(true),
writes_are_aligned: Cell::new(true),
......@@ -245,20 +281,20 @@ pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) -
}
}
pub fn allocations(&self) -> ::std::collections::hash_map::Iter<AllocId, Allocation<M::MemoryKinds>> {
self.alloc_map.iter()
pub fn allocations<'x>(&'x self) -> impl Iterator<Item = (AllocId, &'x Allocation<M::MemoryKinds>)> {
self.alloc_map.iter().map(|(&id, alloc)| (AllocIdKind::Runtime(id).into_alloc_id(), alloc))
}
pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> MemoryPointer {
if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
return MemoryPointer::new(alloc_id, 0);
}
let id = self.next_id;
let id = self.functions.len();
debug!("creating fn ptr: {}", id);
self.next_id.0 += 1;
self.functions.insert(id, instance);
self.function_alloc_cache.insert(instance, id);
MemoryPointer::new(id, 0)
self.functions.push(instance);
let alloc_id = AllocIdKind::Function(id).into_alloc_id();
self.function_alloc_cache.insert(instance, alloc_id);
MemoryPointer::new(alloc_id, 0)
}
pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> {
......@@ -266,7 +302,7 @@ pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointe
return Ok(MemoryPointer::new(alloc_id, 0));
}
let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?;
let ptr = self.allocate(bytes.len() as u64, 1, MemoryKind::UninitializedStatic)?;
self.write_bytes(ptr.into(), bytes)?;
self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?;
self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id);
......@@ -277,7 +313,7 @@ pub fn allocate(
&mut self,
size: u64,
align: u64,
kind: Kind<M::MemoryKinds>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MemoryPointer> {
assert_ne!(align, 0);
assert!(align.is_power_of_two());
......@@ -300,10 +336,10 @@ pub fn allocate(
mutable: Mutability::Mutable,
locks: RangeMap::new(),
};
let id = self.next_id;
self.next_id.0 += 1;
let id = self.next_alloc_id;
self.next_alloc_id += 1;
self.alloc_map.insert(id, alloc);
Ok(MemoryPointer::new(id, 0))
Ok(MemoryPointer::new(AllocIdKind::Runtime(id).into_alloc_id(), 0))
}
pub fn reallocate(
......@@ -313,7 +349,7 @@ pub fn reallocate(
old_align: u64,
new_size: u64,
new_align: u64,
kind: Kind<M::MemoryKinds>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MemoryPointer> {
use std::cmp::min;
......@@ -338,13 +374,19 @@ pub fn deallocate(
&mut self,
ptr: MemoryPointer,
size_and_align: Option<(u64, u64)>,
kind: Kind<M::MemoryKinds>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
if ptr.offset != 0 {
return err!(DeallocateNonBasePtr);
}
let alloc = match self.alloc_map.remove(&ptr.alloc_id) {
let alloc_id = match ptr.alloc_id.into_alloc_id_kind() {
AllocIdKind::Function(_) =>
return err!(DeallocatedWrongMemoryKind("function".to_string(), format!("{:?}", kind))),
AllocIdKind::Runtime(id) => id,
};
let alloc = match self.alloc_map.remove(&alloc_id) {
Some(alloc) => alloc,
None => return err!(DoubleFree),
};
......@@ -624,22 +666,22 @@ pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<CodeExtent>)
/// Allocation accessors
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::MemoryKinds>> {
match self.alloc_map.get(&id) {
Some(alloc) => Ok(alloc),
None => match self.functions.get(&id) {
Some(_) => err!(DerefFunctionPointer),
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => match self.alloc_map.get(&id) {
Some(alloc) => Ok(alloc),
None => err!(DanglingPointerDeref),
}
},
}
}
fn get_mut_unchecked(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<M::MemoryKinds>> {
match self.alloc_map.get_mut(&id) {
Some(alloc) => Ok(alloc),
None => match self.functions.get(&id) {
Some(_) => err!(DerefFunctionPointer),
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => match self.alloc_map.get_mut(&id) {
Some(alloc) => Ok(alloc),
None => err!(DanglingPointerDeref),
}
},
}
}
......@@ -657,12 +699,9 @@ pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, ty::Instance<'tcx>>
return err!(InvalidFunctionPointer);
}
debug!("reading fn ptr: {}", ptr.alloc_id);
match self.functions.get(&ptr.alloc_id) {
Some(&fndef) => Ok(fndef),
None => match self.alloc_map.get(&ptr.alloc_id) {
Some(_) => err!(ExecuteMemory),
None => err!(InvalidFunctionPointer),
}
match ptr.alloc_id.into_alloc_id_kind() {
AllocIdKind::Function(id) => Ok(self.functions[id]),
AllocIdKind::Runtime(_) => err!(ExecuteMemory),
}
}
......@@ -684,17 +723,18 @@ pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
let prefix_len = msg.len();
let mut relocations = vec![];
let alloc = match (self.alloc_map.get(&id), self.functions.get(&id)) {
(Some(a), None) => a,
(None, Some(instance)) => {
trace!("{} {}", msg, instance);
let alloc = match id.into_alloc_id_kind() {
AllocIdKind::Function(id) => {
trace!("{} {}", msg, self.functions[id]);
continue;
},
(None, None) => {
trace!("{} (deallocated)", msg);
continue;
AllocIdKind::Runtime(id) => match self.alloc_map.get(&id) {
Some(a) => a,
None => {
trace!("{} (deallocated)", msg);
continue;
}
},
(Some(_), Some(_)) => bug!("miri invariant broken: an allocation id exists that points to both a function and a memory location"),
};
for i in 0..(alloc.bytes.len() as u64) {
......@@ -713,11 +753,11 @@ pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
}
let immutable = match (alloc.kind, alloc.mutable) {
(Kind::UninitializedStatic, _) => " (static in the process of initialization)".to_owned(),
(Kind::Static, Mutability::Mutable) => " (static mut)".to_owned(),
(Kind::Static, Mutability::Immutable) => " (immutable)".to_owned(),
(Kind::Machine(m), _) => format!(" ({:?})", m),
(Kind::Stack, _) => " (stack)".to_owned(),
(MemoryKind::UninitializedStatic, _) => " (static in the process of initialization)".to_owned(),
(MemoryKind::Static, Mutability::Mutable) => " (static mut)".to_owned(),
(MemoryKind::Static, Mutability::Immutable) => " (immutable)".to_owned(),
(MemoryKind::Machine(m), _) => format!(" ({:?})", m),
(MemoryKind::Stack, _) => " (stack)".to_owned(),
};
trace!("{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), alloc.align, immutable);
......@@ -744,8 +784,8 @@ pub fn leak_report(&self) -> usize {
let leaks: Vec<_> = self.alloc_map
.iter()
.filter_map(|(&key, val)| {
if val.kind != Kind::Static {
Some(key)
if val.kind != MemoryKind::Static {
Some(AllocIdKind::Runtime(key).into_alloc_id())
} else {
None
}
......@@ -812,18 +852,11 @@ fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalRe
/// Reading and writing
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
/// mark an allocation as being the entry point to a static (see `static_alloc` field)
pub fn mark_static(&mut self, alloc_id: AllocId) {
trace!("mark_static: {:?}", alloc_id);
if !self.static_alloc.insert(alloc_id) {
bug!("tried to mark an allocation ({:?}) as static twice", alloc_id);
}
}
/// mark an allocation pointed to by a static as static and initialized
pub fn mark_inner_allocation(&mut self, alloc: AllocId, mutability: Mutability) -> EvalResult<'tcx> {
// relocations into other statics are not "inner allocations"
if !self.static_alloc.contains(&alloc) {
if self.get(alloc).ok().map_or(false, |alloc| alloc.kind != MemoryKind::UninitializedStatic) {
self.mark_static_initalized(alloc, mutability)?;
}
Ok(())
......@@ -834,28 +867,31 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili
trace!("mark_static_initalized {:?}, mutability: {:?}", alloc_id, mutability);
// do not use `self.get_mut(alloc_id)` here, because we might have already marked a
// sub-element or have circular pointers (e.g. `Rc`-cycles)
let alloc_id = match alloc_id.into_alloc_id_kind() {
AllocIdKind::Function(_) => return Ok(()),
AllocIdKind::Runtime(id) => id,
};
let relocations = match self.alloc_map.get_mut(&alloc_id) {
Some(&mut Allocation { ref mut relocations, ref mut kind, ref mut mutable, .. }) => {
match *kind {
// const eval results can refer to "locals".
// E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1`
Kind::Stack |
MemoryKind::Stack |
// The entire point of this function
Kind::UninitializedStatic => {},
Kind::Machine(m) => M::mark_static_initialized(m)?,
Kind::Static => {
MemoryKind::UninitializedStatic => {},
MemoryKind::Machine(m) => M::mark_static_initialized(m)?,
MemoryKind::Static => {
trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized");
return Ok(());
},
}
*kind = Kind::Static;
*kind = MemoryKind::Static;
*mutable = mutability;
// take out the relocations vector to free the borrow on self, so we can call
// mark recursively
mem::replace(relocations, Default::default())
},
None if !self.functions.contains_key(&alloc_id) => return err!(DanglingPointerDeref),
_ => return Ok(()),
None => return err!(DanglingPointerDeref),
};
// recurse into inner allocations
for &alloc in relocations.values() {
......
......@@ -33,12 +33,12 @@
StackPopCleanup,
DynamicLifetime,
TyAndPacked,
PtrAndAlign,
};
pub use self::lvalue::{
Lvalue,
LvalueExtra,
Global,
GlobalId,
};
......@@ -46,7 +46,7 @@
AllocId,
Memory,
MemoryPointer,
Kind,
MemoryKind,
HasMemory,
};
......
......@@ -39,7 +39,7 @@ pub fn intrinsic_with_overflow(
op: mir::BinOp,
left: &mir::Operand<'tcx>,
right: &mir::Operand<'tcx>,
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
......@@ -54,7 +54,7 @@ pub fn intrinsic_overflowing(
op: mir::BinOp,
left: &mir::Operand<'tcx>,
right: &mir::Operand<'tcx>,
dest: Lvalue<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, bool> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
......
......@@ -13,10 +13,9 @@
use super::{
EvalResult,
EvalContext, StackPopCleanup, TyAndPacked,
Global, GlobalId, Lvalue,
Value, PrimVal,
HasMemory,
EvalContext, StackPopCleanup, TyAndPacked, PtrAndAlign,
GlobalId, Lvalue,
HasMemory, MemoryKind,
Machine,
};
......@@ -179,11 +178,19 @@ fn global_item(
if self.tcx.has_attr(def_id, "linkage") {
// FIXME: check that it's `#[linkage = "extern_weak"]`
trace!("Initializing an extern global with NULL");
self.globals.insert(cid, Global::initialized(self.tcx.type_of(def_id), Value::ByVal(PrimVal::Bytes(0)), mutability));
let ptr_size = self.memory.pointer_size();
let ptr = self.memory.allocate(ptr_size, ptr_size, MemoryKind::UninitializedStatic)?;
self.memory.write_usize(ptr, 0)?;
self.memory.mark_static_initalized(ptr.alloc_id, mutability)?;
self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned: true });
return Ok(false);
}
let mir = self.load_mir(instance.def)?;
self.globals.insert(cid, Global::uninitialized(mir.return_ty));
let size = self.type_size_with_substs(mir.return_ty, substs)?.expect("unsized global");
let align = self.type_align_with_substs(mir.return_ty, substs)?;
let ptr = self.memory.allocate(size, align, MemoryKind::UninitializedStatic)?;
let aligned = !self.is_packed(mir.return_ty)?;
self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
let internally_mutable = !mir.return_ty.is_freeze(
self.tcx,
ty::ParamEnv::empty(Reveal::All),
......@@ -200,7 +207,7 @@ fn global_item(
instance,
span,
mir,
Lvalue::Global(cid),
Lvalue::from_ptr(ptr),
cleanup,
)?;
Ok(true)
......@@ -256,13 +263,16 @@ fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Loca
}
let mir = &self.mir.promoted[index];
self.try(|this| {
let ty = this.ecx.monomorphize(mir.return_ty, this.instance.substs);
this.ecx.globals.insert(cid, Global::uninitialized(ty));
let size = this.ecx.type_size_with_substs(mir.return_ty, this.instance.substs)?.expect("unsized global");
let align = this.ecx.type_align_with_substs(mir.return_ty, this.instance.substs)?;
let ptr = this.ecx.memory.allocate(size, align, MemoryKind::UninitializedStatic)?;
let aligned = !this.ecx.is_packed(mir.return_ty)?;
this.ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
trace!("pushing stack frame for {:?}", index);
this.ecx.push_stack_frame(this.instance,
constant.span,
mir,
Lvalue::Global(cid),
Lvalue::from_ptr(ptr),
StackPopCleanup::MarkStatic(Mutability::Immutable),
)?;
Ok(true)
......
......@@ -11,15 +11,15 @@
};
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
trace!("drop_lvalue: {:#?}", lval);
// We take the address of the object. This may well be unaligned, which is fine for us here.
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
let val = match self.force_allocation(lval)? {
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: _ } => ptr.to_value_with_vtable(vtable),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: _ } => ptr.to_value_with_len(len),
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: _ } => ptr.to_value(),
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => ptr.ptr.to_value_with_vtable(vtable),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => ptr.ptr.to_value_with_len(len),
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => ptr.ptr.to_value(),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span)
......
......@@ -6,7 +6,7 @@
use super::{
EvalError, EvalResult, EvalErrorKind,
EvalContext, eval_context, TyAndPacked,
EvalContext, eval_context, TyAndPacked, PtrAndAlign,
Lvalue,
MemoryPointer,
PrimVal, Value,
......@@ -204,7 +204,7 @@ fn check_ty_compat<'tcx>(
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
destination: Option<(Lvalue, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
......@@ -311,10 +311,10 @@ fn eval_fn_call(
if self.frame().mir.args_iter().count() == fields.len() + 1 {
let offsets = variant.offsets.iter().map(|s| s.bytes());
match arg_val {
Value::ByRef { ptr, aligned } => {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments");
for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) {
let arg = Value::ByRef { ptr: ptr.offset(offset, &self)?, aligned: true};
let arg = Value::by_ref(ptr.offset(offset, &self)?);
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty);
self.write_value(arg, dest, ty)?;
......
......@@ -8,7 +8,7 @@
use super::{
EvalResult,
EvalContext, eval_context,
MemoryPointer, Kind,
MemoryPointer, MemoryKind,
Value, PrimVal,
Machine,
};
......@@ -51,7 +51,7 @@ pub fn get_vtable(&mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) ->
let ptr_size = self.memory.pointer_size();
let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref);
let vtable = self.memory.allocate(ptr_size * (3 + methods.count() as u64), ptr_size, Kind::UninitializedStatic)?;
let vtable = self.memory.allocate(ptr_size * (3 + methods.count() as u64), ptr_size, MemoryKind::UninitializedStatic)?;
let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
......
......@@ -16,7 +16,7 @@
Machine,
};
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue<'tcx>>;
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue>;
#[derive(Copy, Clone, Debug)]
enum ValidationMode {
......@@ -213,7 +213,7 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod
};
if is_owning {
match query.lval {
Lvalue::Ptr { ptr, extra, aligned: _ } => {
Lvalue::Ptr { ptr, extra } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
......@@ -242,8 +242,8 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod
}
}
}
Lvalue::Local { .. } | Lvalue::Global(..) => {
// These are not backed by memory, so we have nothing to do.
Lvalue::Local { .. } => {
// Not backed by memory, so we have nothing to do.
}
}
}
......
......@@ -7,6 +7,7 @@
EvalResult,
Memory, MemoryPointer, HasMemory, PointerArithmetic,
Machine,
PtrAndAlign,
};
pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
......@@ -36,7 +37,7 @@ pub(super) fn f64_to_bytes(f: f64) -> u128 {
/// operations and fat pointers. This idea was taken from rustc's trans.
#[derive(Clone, Copy, Debug)]
pub enum Value {
ByRef { ptr: Pointer, aligned: bool},
ByRef(PtrAndAlign),
ByVal(PrimVal),
ByValPair(PrimVal, PrimVal),
}
......@@ -166,7 +167,7 @@ pub enum PrimValKind {
impl<'a, 'tcx: 'a> Value {
#[inline]
pub fn by_ref(ptr: Pointer) -> Self {
Value::ByRef { ptr, aligned: true }
Value::ByRef(PtrAndAlign { ptr, aligned: true })
}
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
......@@ -174,7 +175,7 @@ pub fn by_ref(ptr: Pointer) -> Self {
pub fn into_ptr<M: Machine<'tcx>>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, Pointer> {
use self::Value::*;
match *self {
ByRef { ptr, aligned } => {
ByRef(PtrAndAlign { ptr, aligned }) => {
mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?) )
},
ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr.into()),
......@@ -187,7 +188,7 @@ pub(super) fn into_ptr_vtable_pair<M: Machine<'tcx>>(
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
use self::Value::*;
match *self {
ByRef { ptr: ref_ptr, aligned } => {
ByRef(PtrAndAlign { ptr: ref_ptr, aligned }) => {
mem.read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
......@@ -205,7 +206,7 @@ pub(super) fn into_ptr_vtable_pair<M: Machine<'tcx>>(
pub(super) fn into_slice<M: Machine<'tcx>>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, (Pointer, u64)> {
use self::Value::*;
match *self {
ByRef { ptr: ref_ptr, aligned } => {
ByRef(PtrAndAlign { ptr: ref_ptr, aligned } ) => {
mem.read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
......
......@@ -10,5 +10,5 @@ fn main() {
let y : *mut u8 = unsafe { mem::transmute(x) };
let y = y.wrapping_offset(1);
let x : fn() = unsafe { mem::transmute(y) };
x(); //~ ERROR: tried to use an integer pointer or a dangling pointer as a function pointer
x(); //~ ERROR: tried to use a function pointer after offsetting it
}
#[repr(packed)]
struct Foo {
i: i32
}
fn main() {
assert_eq!({FOO.i}, 42);
}
static FOO: Foo = Foo { i: 42 };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册