提交 788c5f3c 编写于 作者: D Dylan MacKenzie

Revert "Refactor EvalContext stack and heap into inner struct"

This reverts commit 59d21c526c036d7097d05edd6dffdad9c5b1cb62, and uses
tuple to store the mutable parts of an EvalContext (which now includes
`Machine`). This requires that `Machine` be `Clone`.
上级 7f9b01a0
......@@ -76,8 +76,8 @@ fn cast_from_int(
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
TyRawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory().truncate_to_ptr(v).0 as u128,
defined: self.memory().pointer_size().bits() as u8,
bits: self.memory.truncate_to_ptr(v).0 as u128,
defined: self.memory.pointer_size().bits() as u8,
})
},
......@@ -92,7 +92,7 @@ fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalRe
match dest_ty.sty {
// float -> uint
TyUint(t) => {
let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_u128(width).value,
......@@ -106,7 +106,7 @@ fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalRe
},
// float -> int
TyInt(t) => {
let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_i128(width).value as u128,
......
......@@ -88,7 +88,7 @@ pub fn value_to_const_value<'tcx>(
Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)),
Value::ByRef(ptr, align) => {
let ptr = ptr.to_ptr().unwrap();
let alloc = ecx.memory().get(ptr.alloc_id)?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes());
let mut alloc = alloc.clone();
......@@ -149,7 +149,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>(
}
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ptr = ecx.memory_mut().allocate(
let ptr = ecx.memory.allocate(
layout.size,
layout.align,
MemoryKind::Stack,
......@@ -185,6 +185,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>(
Ok((value, ptr, layout.ty))
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct CompileTimeEvaluator;
impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
......@@ -486,7 +487,7 @@ pub fn const_variant_index<'a, 'tcx>(
let (ptr, align) = match value {
Value::ScalarPair(..) | Value::Scalar(_) => {
let layout = ecx.layout_of(val.ty)?;
let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?;
(ptr, layout.align)
},
......@@ -515,9 +516,9 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>(
());
let value = ecx.const_to_value(val.val)?;
let layout = ecx.layout_of(val.ty)?;
let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?;
let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?;
ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?;
let alloc = ecx.memory().get(ptr.alloc_id)?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
Ok(tcx.intern_const_alloc(alloc.clone()))
};
result().expect("unable to convert ConstValue to Allocation")
......
......@@ -34,8 +34,11 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// Bounds in scope for polymorphic evaluations.
pub param_env: ty::ParamEnv<'tcx>,
/// Virtual memory and call stack.
pub(crate) state: EvalState<'a, 'mir, 'tcx, M>,
/// The virtual memory system.
pub memory: Memory<'a, 'mir, 'tcx, M>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx>>,
/// The maximum number of stack frames allowed
pub(crate) stack_limit: usize,
......@@ -47,50 +50,6 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>,
}
pub(crate) struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// The virtual memory system.
memory: Memory<'a, 'mir, 'tcx, M>,
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx>>,
}
impl<'a, 'mir, 'tcx, M> Clone for EvalState<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn clone(&self) -> Self {
EvalState {
memory: self.memory.clone(),
stack: self.stack.clone(),
}
}
}
impl<'a, 'mir, 'tcx, M> Eq for EvalState<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{}
impl<'a, 'mir, 'tcx, M> PartialEq for EvalState<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn eq(&self, other: &Self) -> bool {
self.memory == other.memory
&& self.stack == other.stack
}
}
impl<'a, 'mir, 'tcx, M> Hash for EvalState<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.memory.hash(state);
self.stack.hash(state);
}
}
/// A stack frame.
#[derive(Clone)]
pub struct Frame<'mir, 'tcx: 'mir> {
......@@ -180,22 +139,26 @@ fn hash<H: Hasher>(&self, state: &mut H) {
}
}
/// The virtual machine state during const-evaluation at a given point in time.
type EvalSnapshot<'a, 'mir, 'tcx, M>
= (M, Vec<Frame<'mir, 'tcx>>, Memory<'a, 'mir, 'tcx, M>);
pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// The set of all `EvalState` *hashes* observed by this detector.
/// The set of all `EvalSnapshot` *hashes* observed by this detector.
///
/// Not a proper bloom filter.
bloom: FxHashSet<u64>,
/// The set of all `EvalState`s observed by this detector.
/// The set of all `EvalSnapshot`s observed by this detector.
///
/// An `EvalState` will only be fully cloned once it has caused a collision
/// An `EvalSnapshot` will only be fully cloned once it has caused a collision
/// in `bloom`. As a result, the detector must observe *two* full cycles of
/// an infinite loop before it triggers.
snapshots: FxHashSet<EvalState<'a, 'mir, 'tcx, M>>,
snapshots: FxHashSet<EvalSnapshot<'a, 'mir, 'tcx, M>>,
}
impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
where M: Eq + Hash + Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn default() -> Self {
......@@ -207,10 +170,17 @@ fn default() -> Self {
}
impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
where M: Clone + Eq + Hash + Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
pub fn observe(&mut self, snapshot: &EvalState<'a, 'mir, 'tcx, M>) -> Result<(), (/*TODO*/)> {
pub fn observe(
&mut self,
machine: &M,
stack: &Vec<Frame<'mir, 'tcx>>,
memory: &Memory<'a, 'mir, 'tcx, M>,
) -> Result<(), (/*TODO*/)> {
let snapshot = (machine, stack, memory);
let mut fx = FxHasher::default();
snapshot.hash(&mut fx);
let hash = fx.finish();
......@@ -220,7 +190,7 @@ pub fn observe(&mut self, snapshot: &EvalState<'a, 'mir, 'tcx, M>) -> Result<(),
return Ok(())
}
if self.snapshots.insert(snapshot.clone()) {
if self.snapshots.insert((machine.clone(), stack.clone(), memory.clone())) {
// Spurious collision or first cycle
return Ok(())
}
......@@ -322,15 +292,15 @@ pub fn new(
param_env: ty::ParamEnv<'tcx>,
machine: M,
memory_data: M::MemoryData,
) -> Self {
) -> Self
where M: Eq + Hash
{
EvalContext {
machine,
tcx,
param_env,
state: EvalState {
memory: Memory::new(tcx, memory_data),
stack: Vec::new(),
},
memory: Memory::new(tcx, memory_data),
stack: Vec::new(),
stack_limit: tcx.sess.const_eval_stack_frame_limit,
loop_detector: Default::default(),
steps_until_detector_enabled: MAX_TERMINATORS,
......@@ -338,10 +308,10 @@ pub fn new(
}
pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R {
let stack = mem::replace(self.stack_mut(), Vec::new());
let stack = mem::replace(&mut self.stack, Vec::new());
let steps = mem::replace(&mut self.steps_until_detector_enabled, MAX_TERMINATORS);
let r = f(self);
*self.stack_mut() = stack;
self.stack = stack;
self.steps_until_detector_enabled = steps;
r
}
......@@ -350,34 +320,29 @@ pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Pointer> {
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
self.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)
self.memory.allocate(layout.size, layout.align, MemoryKind::Stack)
}
pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
&self.state.memory
&self.memory
}
pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
&mut self.state.memory
&mut self.memory
}
#[inline]
pub fn stack(&self) -> &[Frame<'mir, 'tcx>] {
&self.state.stack
}
pub fn stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx>> {
&mut self.state.stack
&self.stack
}
#[inline]
pub fn cur_frame(&self) -> usize {
assert!(self.stack().len() > 0);
self.stack().len() - 1
assert!(self.stack.len() > 0);
self.stack.len() - 1
}
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
let ptr = self.memory_mut().allocate_bytes(s.as_bytes());
let ptr = self.memory.allocate_bytes(s.as_bytes());
Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx))
}
......@@ -395,7 +360,7 @@ pub fn const_to_value(
}
ConstValue::ByRef(alloc, offset) => {
// FIXME: Allocate new AllocId for all constants inside
let id = self.memory_mut().allocate_value(alloc.clone(), MemoryKind::Stack)?;
let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align))
},
ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)),
......@@ -566,7 +531,7 @@ pub fn push_stack_frame(
IndexVec::new()
};
self.stack_mut().push(Frame {
self.stack.push(Frame {
mir,
block: mir::START_BLOCK,
return_to_block,
......@@ -577,9 +542,9 @@ pub fn push_stack_frame(
stmt: 0,
});
self.memory_mut().cur_frame = self.cur_frame();
self.memory.cur_frame = self.cur_frame();
if self.stack().len() > self.stack_limit {
if self.stack.len() > self.stack_limit {
err!(StackFrameLimitReached)
} else {
Ok(())
......@@ -589,18 +554,18 @@ pub fn push_stack_frame(
pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
::log_settings::settings().indentation -= 1;
M::end_region(self, None)?;
let frame = self.stack_mut().pop().expect(
let frame = self.stack.pop().expect(
"tried to pop a stack frame, but there were none",
);
if !self.stack().is_empty() {
if !self.stack.is_empty() {
// TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
self.memory_mut().cur_frame = self.cur_frame();
self.memory.cur_frame = self.cur_frame();
}
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => {
if let Place::Ptr { ptr, .. } = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
self.memory_mut().mark_static_initialized(
self.memory.mark_static_initialized(
ptr.to_ptr()?.alloc_id,
mutable,
)?
......@@ -623,8 +588,8 @@ pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
if let Some(Value::ByRef(ptr, _align)) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory().dump_alloc(ptr.alloc_id);
self.memory_mut().deallocate_local(ptr)?;
self.memory.dump_alloc(ptr.alloc_id);
self.memory.deallocate_local(ptr)?;
};
Ok(())
}
......@@ -637,7 +602,9 @@ pub(super) fn eval_rvalue_into_place(
&mut self,
rvalue: &mir::Rvalue<'tcx>,
place: &mir::Place<'tcx>,
) -> EvalResult<'tcx> {
) -> EvalResult<'tcx>
where M: Clone + Eq + Hash,
{
let dest = self.eval_place(place)?;
let dest_ty = self.place_ty(place);
......@@ -744,7 +711,7 @@ pub(super) fn eval_rvalue_into_place(
let src = self.eval_place(place)?;
let ty = self.place_ty(place);
let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx);
let defined = self.memory().pointer_size().bits() as u8;
let defined = self.memory.pointer_size().bits() as u8;
self.write_scalar(
dest,
Scalar::Bits {
......@@ -786,7 +753,7 @@ pub(super) fn eval_rvalue_into_place(
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(),
"SizeOf nullary MIR operator called for unsized type");
let defined = self.memory().pointer_size().bits() as u8;
let defined = self.memory.pointer_size().bits() as u8;
self.write_scalar(
dest,
Scalar::Bits {
......@@ -881,7 +848,7 @@ pub(super) fn eval_rvalue_into_place(
def_id,
substs,
).ok_or_else(|| EvalErrorKind::TooGeneric.into());
let fn_ptr = self.memory_mut().create_fn_alloc(instance?);
let fn_ptr = self.memory.create_fn_alloc(instance?);
let valty = ValTy {
value: Value::Scalar(fn_ptr.into()),
ty: dest_ty,
......@@ -917,7 +884,7 @@ pub(super) fn eval_rvalue_into_place(
substs,
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory_mut().create_fn_alloc(instance);
let fn_ptr = self.memory.create_fn_alloc(instance);
let valty = ValTy {
value: Value::Scalar(fn_ptr.into()),
ty: dest_ty,
......@@ -1194,7 +1161,7 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons
pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
let new_place = match place {
Place::Local { frame, local } => {
match self.stack()[frame].locals[local] {
match self.stack[frame].locals[local] {
None => return err!(DeadLocal),
Some(Value::ByRef(ptr, align)) => {
Place::Ptr {
......@@ -1204,11 +1171,11 @@ pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
}
}
Some(val) => {
let ty = self.stack()[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack()[frame].instance.substs);
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let layout = self.layout_of(ty)?;
let ptr = self.alloc_ptr(ty)?;
self.stack_mut()[frame].locals[local] =
self.stack[frame].locals[local] =
Some(Value::ByRef(ptr.into(), layout.align)); // it stays live
let place = Place::from_ptr(ptr, layout.align);
self.write_value(ValTy { value: val, ty }, place)?;
......@@ -1290,10 +1257,10 @@ pub fn write_value(
}
Place::Local { frame, local } => {
let dest = self.stack()[frame].get_local(local)?;
let dest = self.stack[frame].get_local(local)?;
self.write_value_possibly_by_val(
src_val,
|this, val| this.stack_mut()[frame].set_local(local, val),
|this, val| this.stack[frame].set_local(local, val),
dest,
dest_ty,
)
......@@ -1335,7 +1302,7 @@ fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
} else {
let dest_ptr = self.alloc_ptr(dest_ty)?.into();
let layout = self.layout_of(dest_ty)?;
self.memory_mut().copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
}
} else {
......@@ -1357,7 +1324,7 @@ pub fn write_value_to_ptr(
trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout);
match value {
Value::ByRef(ptr, align) => {
self.memory_mut().copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
}
Value::Scalar(scalar) => {
let signed = match layout.abi {
......@@ -1370,7 +1337,7 @@ pub fn write_value_to_ptr(
_ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout),
}
};
self.memory_mut().write_scalar(dest, dest_align, scalar, layout.size, signed)
self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed)
}
Value::ScalarPair(a_val, b_val) => {
trace!("write_value_to_ptr valpair: {:#?}", layout);
......@@ -1383,8 +1350,8 @@ pub fn write_value_to_ptr(
let b_offset = a_size.abi_align(b.align(&self));
let b_ptr = dest.ptr_offset(b_offset, &self)?.into();
// TODO: What about signedess?
self.memory_mut().write_scalar(a_ptr, dest_align, a_val, a_size, false)?;
self.memory_mut().write_scalar(b_ptr, dest_align, b_val, b_size, false)
self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?;
self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false)
}
}
}
......@@ -1415,8 +1382,8 @@ pub(crate) fn read_ptr(
ptr_align: Align,
pointee_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
let ptr_size = self.memory().pointer_size();
let p: Scalar = self.memory().read_ptr_sized(ptr, ptr_align)?.into();
let ptr_size = self.memory.pointer_size();
let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into();
if self.type_is_sized(pointee_ty) {
Ok(p.to_value())
} else {
......@@ -1424,11 +1391,11 @@ pub(crate) fn read_ptr(
let extra = ptr.offset(ptr_size, self)?;
match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
self.memory().read_ptr_sized(extra, ptr_align)?.to_ptr()?,
self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?,
)),
ty::TySlice(..) | ty::TyStr => {
let len = self
.memory()
.memory
.read_ptr_sized(extra, ptr_align)?
.to_bits(ptr_size)?;
Ok(p.to_value_with_len(len as u64, self.tcx.tcx))
......@@ -1446,10 +1413,10 @@ pub fn validate_ptr_target(
) -> EvalResult<'tcx> {
match ty.sty {
ty::TyBool => {
self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?;
self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?;
}
ty::TyChar => {
let c = self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32;
let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32;
match ::std::char::from_u32(c) {
Some(..) => (),
None => return err!(InvalidChar(c as u128)),
......@@ -1457,7 +1424,7 @@ pub fn validate_ptr_target(
}
ty::TyFnPtr(_) => {
self.memory().read_ptr_sized(ptr, ptr_align)?;
self.memory.read_ptr_sized(ptr, ptr_align)?;
},
ty::TyRef(_, rty, _) |
ty::TyRawPtr(ty::TypeAndMut { ty: rty, .. }) => {
......@@ -1472,7 +1439,7 @@ pub fn validate_ptr_target(
if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi {
let size = scalar.value.size(self);
self.memory().read_scalar(ptr, ptr_align, size)?;
self.memory.read_scalar(ptr, ptr_align, size)?;
}
}
......@@ -1493,7 +1460,7 @@ pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx,
pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
let layout = self.layout_of(ty)?;
self.memory().check_align(ptr, ptr_align)?;
self.memory.check_align(ptr, ptr_align)?;
if layout.size.bytes() == 0 {
return Ok(Some(Value::Scalar(Scalar::undef())));
......@@ -1506,7 +1473,7 @@ pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> Eva
match layout.abi {
layout::Abi::Scalar(..) => {
let scalar = self.memory().read_scalar(ptr, ptr_align, layout.size)?;
let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?;
Ok(Some(Value::Scalar(scalar)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
......@@ -1515,8 +1482,8 @@ pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> Eva
let a_ptr = ptr;
let b_offset = a_size.abi_align(b.align(self));
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory().read_scalar(a_ptr, ptr_align, a_size)?;
let b_val = self.memory().read_scalar(b_ptr, ptr_align, b_size)?;
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
Ok(Some(Value::ScalarPair(a_val, b_val)))
}
_ => Ok(None),
......@@ -1524,11 +1491,11 @@ pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> Eva
}
pub fn frame(&self) -> &Frame<'mir, 'tcx> {
self.stack().last().expect("no call frames exist")
self.stack.last().expect("no call frames exist")
}
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
self.stack_mut().last_mut().expect("no call frames exist")
self.stack.last_mut().expect("no call frames exist")
}
pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> {
......@@ -1536,7 +1503,7 @@ pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> {
}
pub fn substs(&self) -> &'tcx Substs<'tcx> {
if let Some(frame) = self.stack().last() {
if let Some(frame) = self.stack.last() {
frame.instance.substs
} else {
Substs::empty()
......@@ -1682,7 +1649,7 @@ pub fn dump_local(&self, place: Place) {
}
write!(msg, ":").unwrap();
match self.stack()[frame].get_local(local) {
match self.stack[frame].get_local(local) {
Err(err) => {
if let EvalErrorKind::DeadLocal = err.kind {
write!(msg, " is dead").unwrap();
......@@ -1717,13 +1684,13 @@ pub fn dump_local(&self, place: Place) {
}
trace!("{}", msg);
self.memory().dump_allocs(allocs);
self.memory.dump_allocs(allocs);
}
Place::Ptr { ptr, align, .. } => {
match ptr {
Scalar::Ptr(ptr) => {
trace!("by align({}) ref:", align.abi());
self.memory().dump_alloc(ptr.alloc_id);
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),
}
......@@ -1736,12 +1703,12 @@ pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> Eval
where
F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
{
let val = self.stack()[frame].get_local(local)?;
let val = self.stack[frame].get_local(local)?;
let new_val = f(self, val)?;
self.stack_mut()[frame].set_local(local, new_val)?;
self.stack[frame].set_local(local, new_val)?;
// FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
// if let Value::ByRef(ptr) = self.stack()[frame].get_local(local) {
// self.memory().deallocate(ptr)?;
// if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
// self.memory.deallocate(ptr)?;
// }
Ok(())
}
......
......@@ -32,6 +32,7 @@ pub enum MemoryKind<T> {
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
#[derive(Clone)]
pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// Additional data required by the Machine
pub data: M::MemoryData,
......@@ -48,21 +49,6 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
impl<'a, 'mir, 'tcx, M> Clone for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn clone(&self) -> Self {
Memory {
data: self.data.clone(),
alloc_kind: self.alloc_kind.clone(),
alloc_map: self.alloc_map.clone(),
cur_frame: self.cur_frame.clone(),
tcx: self.tcx.clone(),
}
}
}
impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
......@@ -1067,12 +1053,12 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> {
#[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
self.memory_mut()
&mut self.memory
}
#[inline]
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
self.memory()
&self.memory
}
}
......
......@@ -201,7 +201,7 @@ pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
assert_eq!(extra, PlaceExtra::None);
Ok(Value::ByRef(ptr, align))
}
Place::Local { frame, local } => self.stack()[frame].get_local(local),
Place::Local { frame, local } => self.stack[frame].get_local(local),
}
}
......@@ -261,7 +261,7 @@ pub fn place_field(
let (base_ptr, base_align, base_extra) = match base {
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
Place::Local { frame, local } => {
match (&self.stack()[frame].get_local(local)?, &base_layout.abi) {
match (&self.stack[frame].get_local(local)?, &base_layout.abi) {
// in case the field covers the entire type, just return the value
(&Value::Scalar(_), &layout::Abi::Scalar(_)) |
(&Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
......
......@@ -2,18 +2,22 @@
//!
//! The main entry point is the `step` method.
use std::hash::Hash;
use rustc::mir;
use rustc::mir::interpret::EvalResult;
use super::{EvalContext, Machine};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
where M: Clone + Eq + Hash,
{
pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) {
self.steps_until_detector_enabled
= self.steps_until_detector_enabled.saturating_sub(n);
if self.steps_until_detector_enabled == 0 {
let _ = self.loop_detector.observe(&self.state); // TODO: Handle error
let _ = self.loop_detector.observe(&self.machine, &self.stack, &self.memory); // TODO: Handle error
// FIXME(#49980): make this warning a lint
self.tcx.sess.span_warn(self.frame().span, "Constant evaluating a complex constant, this might take some time");
......@@ -23,7 +27,7 @@ pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) {
/// Returns true as long as there are more things to do.
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
if self.stack().is_empty() {
if self.stack.is_empty() {
return Ok(false);
}
......@@ -57,7 +61,7 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
// *before* executing the statement.
let frame_idx = self.cur_frame();
self.tcx.span = stmt.source_info.span;
self.memory_mut().tcx.span = stmt.source_info.span;
self.memory.tcx.span = stmt.source_info.span;
match stmt.kind {
Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?,
......@@ -106,16 +110,16 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
InlineAsm { .. } => return err!(InlineAsm),
}
self.stack_mut()[frame_idx].stmt += 1;
self.stack[frame_idx].stmt += 1;
Ok(())
}
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
trace!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory_mut().tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
if !self.stack.is_empty() {
trace!("// {:?}", self.frame().block);
}
Ok(())
......
......@@ -71,7 +71,7 @@ pub(super) fn eval_terminator(
let (fn_def, sig) = match func.ty.sty {
ty::TyFnPtr(sig) => {
let fn_ptr = self.value_to_scalar(func)?.to_ptr()?;
let instance = self.memory().get_fn(fn_ptr)?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.ty(*self.tcx);
match instance_ty.sty {
ty::TyFnDef(..) => {
......@@ -377,14 +377,14 @@ fn eval_fn_call(
}
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory().pointer_size();
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
let fn_ptr = self.memory().read_ptr_sized(
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory().get_fn(fn_ptr)?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
args[0].ty = ty;
......
......@@ -25,26 +25,26 @@ pub fn get_vtable(
let size = layout.size.bytes();
let align = layout.align.abi();
let ptr_size = self.memory().pointer_size();
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory_mut().allocate(
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_align,
MemoryKind::Stack,
)?;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = self.memory_mut().create_fn_alloc(drop);
self.memory_mut().write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?;
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory_mut().write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
defined: ptr_size.bits() as u8,
})?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory_mut().write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
defined: ptr_size.bits() as u8,
})?;
......@@ -52,13 +52,13 @@ pub fn get_vtable(
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory_mut().create_fn_alloc(instance);
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
self.memory_mut().write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?;
self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?;
}
}
self.memory_mut().mark_static_initialized(
self.memory.mark_static_initialized(
vtable.alloc_id,
Mutability::Immutable,
)?;
......@@ -76,7 +76,7 @@ pub fn read_drop_type_from_vtable(
match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None),
Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory().get_fn(drop_fn).map(Some),
Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
_ => err!(ReadBytesAsPointer),
}
}
......@@ -85,10 +85,10 @@ pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory().pointer_size();
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory().read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory().read_ptr_sized(
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册