提交 788c5f3c 编写于 作者: D Dylan MacKenzie

Revert "Refactor EvalContext stack and heap into inner struct"

This reverts commit 59d21c526c036d7097d05edd6dffdad9c5b1cb62, and uses
tuple to store the mutable parts of an EvalContext (which now includes
`Machine`). This requires that `Machine` be `Clone`.
上级 7f9b01a0
......@@ -76,8 +76,8 @@ fn cast_from_int(
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
TyRawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory().truncate_to_ptr(v).0 as u128,
defined: self.memory().pointer_size().bits() as u8,
bits: self.memory.truncate_to_ptr(v).0 as u128,
defined: self.memory.pointer_size().bits() as u8,
})
},
......@@ -92,7 +92,7 @@ fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalRe
match dest_ty.sty {
// float -> uint
TyUint(t) => {
let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_u128(width).value,
......@@ -106,7 +106,7 @@ fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalRe
},
// float -> int
TyInt(t) => {
let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_i128(width).value as u128,
......
......@@ -88,7 +88,7 @@ pub fn value_to_const_value<'tcx>(
Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)),
Value::ByRef(ptr, align) => {
let ptr = ptr.to_ptr().unwrap();
let alloc = ecx.memory().get(ptr.alloc_id)?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes());
let mut alloc = alloc.clone();
......@@ -149,7 +149,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>(
}
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ptr = ecx.memory_mut().allocate(
let ptr = ecx.memory.allocate(
layout.size,
layout.align,
MemoryKind::Stack,
......@@ -185,6 +185,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>(
Ok((value, ptr, layout.ty))
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct CompileTimeEvaluator;
impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
......@@ -486,7 +487,7 @@ pub fn const_variant_index<'a, 'tcx>(
let (ptr, align) = match value {
Value::ScalarPair(..) | Value::Scalar(_) => {
let layout = ecx.layout_of(val.ty)?;
let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?;
(ptr, layout.align)
},
......@@ -515,9 +516,9 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>(
());
let value = ecx.const_to_value(val.val)?;
let layout = ecx.layout_of(val.ty)?;
let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?;
let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?;
ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?;
let alloc = ecx.memory().get(ptr.alloc_id)?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
Ok(tcx.intern_const_alloc(alloc.clone()))
};
result().expect("unable to convert ConstValue to Allocation")
......
......@@ -32,6 +32,7 @@ pub enum MemoryKind<T> {
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
#[derive(Clone)]
pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// Additional data required by the Machine
pub data: M::MemoryData,
......@@ -48,21 +49,6 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
impl<'a, 'mir, 'tcx, M> Clone for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
{
fn clone(&self) -> Self {
Memory {
data: self.data.clone(),
alloc_kind: self.alloc_kind.clone(),
alloc_map: self.alloc_map.clone(),
cur_frame: self.cur_frame.clone(),
tcx: self.tcx.clone(),
}
}
}
impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
'tcx: 'a + 'mir,
......@@ -1067,12 +1053,12 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> {
#[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
self.memory_mut()
&mut self.memory
}
#[inline]
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
self.memory()
&self.memory
}
}
......
......@@ -201,7 +201,7 @@ pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
assert_eq!(extra, PlaceExtra::None);
Ok(Value::ByRef(ptr, align))
}
Place::Local { frame, local } => self.stack()[frame].get_local(local),
Place::Local { frame, local } => self.stack[frame].get_local(local),
}
}
......@@ -261,7 +261,7 @@ pub fn place_field(
let (base_ptr, base_align, base_extra) = match base {
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
Place::Local { frame, local } => {
match (&self.stack()[frame].get_local(local)?, &base_layout.abi) {
match (&self.stack[frame].get_local(local)?, &base_layout.abi) {
// in case the field covers the entire type, just return the value
(&Value::Scalar(_), &layout::Abi::Scalar(_)) |
(&Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
......
......@@ -2,18 +2,22 @@
//!
//! The main entry point is the `step` method.
use std::hash::Hash;
use rustc::mir;
use rustc::mir::interpret::EvalResult;
use super::{EvalContext, Machine};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
where M: Clone + Eq + Hash,
{
pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) {
self.steps_until_detector_enabled
= self.steps_until_detector_enabled.saturating_sub(n);
if self.steps_until_detector_enabled == 0 {
let _ = self.loop_detector.observe(&self.state); // TODO: Handle error
let _ = self.loop_detector.observe(&self.machine, &self.stack, &self.memory); // TODO: Handle error
// FIXME(#49980): make this warning a lint
self.tcx.sess.span_warn(self.frame().span, "Constant evaluating a complex constant, this might take some time");
......@@ -23,7 +27,7 @@ pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) {
/// Returns true as long as there are more things to do.
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
if self.stack().is_empty() {
if self.stack.is_empty() {
return Ok(false);
}
......@@ -57,7 +61,7 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
// *before* executing the statement.
let frame_idx = self.cur_frame();
self.tcx.span = stmt.source_info.span;
self.memory_mut().tcx.span = stmt.source_info.span;
self.memory.tcx.span = stmt.source_info.span;
match stmt.kind {
Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?,
......@@ -106,16 +110,16 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
InlineAsm { .. } => return err!(InlineAsm),
}
self.stack_mut()[frame_idx].stmt += 1;
self.stack[frame_idx].stmt += 1;
Ok(())
}
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
trace!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory_mut().tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
if !self.stack.is_empty() {
trace!("// {:?}", self.frame().block);
}
Ok(())
......
......@@ -71,7 +71,7 @@ pub(super) fn eval_terminator(
let (fn_def, sig) = match func.ty.sty {
ty::TyFnPtr(sig) => {
let fn_ptr = self.value_to_scalar(func)?.to_ptr()?;
let instance = self.memory().get_fn(fn_ptr)?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.ty(*self.tcx);
match instance_ty.sty {
ty::TyFnDef(..) => {
......@@ -377,14 +377,14 @@ fn eval_fn_call(
}
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory().pointer_size();
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
let fn_ptr = self.memory().read_ptr_sized(
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory().get_fn(fn_ptr)?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
args[0].ty = ty;
......
......@@ -25,26 +25,26 @@ pub fn get_vtable(
let size = layout.size.bytes();
let align = layout.align.abi();
let ptr_size = self.memory().pointer_size();
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory_mut().allocate(
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_align,
MemoryKind::Stack,
)?;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = self.memory_mut().create_fn_alloc(drop);
self.memory_mut().write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?;
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory_mut().write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
defined: ptr_size.bits() as u8,
})?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory_mut().write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
defined: ptr_size.bits() as u8,
})?;
......@@ -52,13 +52,13 @@ pub fn get_vtable(
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory_mut().create_fn_alloc(instance);
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
self.memory_mut().write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?;
self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?;
}
}
self.memory_mut().mark_static_initialized(
self.memory.mark_static_initialized(
vtable.alloc_id,
Mutability::Immutable,
)?;
......@@ -76,7 +76,7 @@ pub fn read_drop_type_from_vtable(
match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None),
Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory().get_fn(drop_fn).map(Some),
Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
_ => err!(ReadBytesAsPointer),
}
}
......@@ -85,10 +85,10 @@ pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory().pointer_size();
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory().read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory().read_ptr_sized(
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册