Fixup long code lines

上级 35f25bfe
...@@ -240,7 +240,12 @@ fn cast_from_int( ...@@ -240,7 +240,12 @@ fn cast_from_int(
} }
} }
fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> { fn cast_from_float(
&self,
bits: u128,
fty: FloatTy,
dest_ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar> {
use rustc::ty::TyKind::*; use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert; use rustc_apfloat::FloatConvert;
match dest_ty.sty { match dest_ty.sty {
......
...@@ -280,7 +280,9 @@ fn data_layout(&self) -> &layout::TargetDataLayout { ...@@ -280,7 +280,9 @@ fn data_layout(&self) -> &layout::TargetDataLayout {
} }
} }
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline] #[inline]
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
*self.tcx *self.tcx
...@@ -340,7 +342,8 @@ pub fn new( ...@@ -340,7 +342,8 @@ pub fn new(
pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R { pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R {
let stack = mem::replace(&mut self.stack, Vec::new()); let stack = mem::replace(&mut self.stack, Vec::new());
let steps = mem::replace(&mut self.steps_since_detector_enabled, -STEPS_UNTIL_DETECTOR_ENABLED); let steps = mem::replace(&mut self.steps_since_detector_enabled,
-STEPS_UNTIL_DETECTOR_ENABLED);
let r = f(self); let r = f(self);
self.stack = stack; self.stack = stack;
self.steps_since_detector_enabled = steps; self.steps_since_detector_enabled = steps;
...@@ -389,7 +392,8 @@ pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { ...@@ -389,7 +392,8 @@ pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
} }
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> { pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>)
-> EvalResult<'tcx, ty::Instance<'tcx>> {
trace!("resolve: {:?}, {:#?}", def_id, substs); trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("substs: {:#?}", self.substs()); trace!("substs: {:#?}", self.substs());
trace!("param_env: {:#?}", self.param_env); trace!("param_env: {:#?}", self.param_env);
...@@ -416,7 +420,9 @@ pub fn load_mir( ...@@ -416,7 +420,9 @@ pub fn load_mir(
) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate) // do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id(); let did = instance.def_id();
if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors { if did.is_local()
&& self.tcx.has_typeck_tables(did)
&& self.tcx.typeck_tables_of(did).tainted_by_errors {
return err!(TypeckError); return err!(TypeckError);
} }
trace!("load mir {:?}", instance); trace!("load mir {:?}", instance);
...@@ -663,7 +669,8 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons ...@@ -663,7 +669,8 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons
} else { } else {
self.param_env self.param_env
}; };
self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into()) self.tcx.const_eval(param_env.and(gid))
.map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
} }
#[inline(always)] #[inline(always)]
...@@ -769,7 +776,8 @@ pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> (Vec<FrameInfo ...@@ -769,7 +776,8 @@ pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> (Vec<FrameInfo
} else { } else {
last_span = Some(span); last_span = Some(span);
} }
let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr { let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data
== DefPathData::ClosureExpr {
"closure".to_owned() "closure".to_owned()
} else { } else {
instance.to_string() instance.to_string()
......
...@@ -241,7 +241,9 @@ pub fn deallocate( ...@@ -241,7 +241,9 @@ pub fn deallocate(
} }
}; };
let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id).expect("alloc_map out of sync with alloc_kind"); let alloc_kind = self.alloc_kind
.remove(&ptr.alloc_id)
.expect("alloc_map out of sync with alloc_kind");
// It is okay for us to still holds locks on deallocation -- for example, we could store // It is okay for us to still holds locks on deallocation -- for example, we could store
// data we own in a local, and the local could be deallocated (from StorageDead) before the // data we own in a local, and the local could be deallocated (from StorageDead) before the
...@@ -259,7 +261,11 @@ pub fn deallocate( ...@@ -259,7 +261,11 @@ pub fn deallocate(
} }
if let Some((size, align)) = size_and_align { if let Some((size, align)) = size_and_align {
if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align { if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
return err!(IncorrectAllocationInformation(size, Size::from_bytes(alloc.bytes.len() as u64), align, alloc.align)); let bytes = Size::from_bytes(alloc.bytes.len() as u64);
return err!(IncorrectAllocationInformation(size,
bytes,
align,
alloc.align));
} }
} }
...@@ -678,7 +684,8 @@ pub fn copy_repeatedly( ...@@ -678,7 +684,8 @@ pub fn copy_repeatedly(
relocations relocations
.iter() .iter()
.map(|&(offset, alloc_id)| { .map(|&(offset, alloc_id)| {
(offset + dest.offset - src.offset + (i * size * relocations.len() as u64), alloc_id) (offset + dest.offset - src.offset + (i * size * relocations.len() as u64),
alloc_id)
}) })
); );
} }
...@@ -707,11 +714,15 @@ pub fn copy_repeatedly( ...@@ -707,11 +714,15 @@ pub fn copy_repeatedly(
} }
for i in 0..length { for i in 0..length {
ptr::copy(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize); ptr::copy(src_bytes,
dest_bytes.offset((size.bytes() * i) as isize),
size.bytes() as usize);
} }
} else { } else {
for i in 0..length { for i in 0..length {
ptr::copy_nonoverlapping(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize); ptr::copy_nonoverlapping(src_bytes,
dest_bytes.offset((size.bytes() * i) as isize),
size.bytes() as usize);
} }
} }
} }
...@@ -778,7 +789,8 @@ pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult< ...@@ -778,7 +789,8 @@ pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<
} }
/// Read a *non-ZST* scalar /// Read a *non-ZST* scalar
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> { pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size)
-> EvalResult<'tcx, ScalarMaybeUndef> {
// Make sure we don't read part of a pointer as a pointer // Make sure we don't read part of a pointer as a pointer
self.check_relocation_edges(ptr, size)?; self.check_relocation_edges(ptr, size)?;
let endianness = self.endianness(); let endianness = self.endianness();
...@@ -801,7 +813,10 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes ...@@ -801,7 +813,10 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes
} else { } else {
let alloc = self.get(ptr.alloc_id)?; let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) { match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => return Ok(ScalarMaybeUndef::Scalar(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into())), Some(&alloc_id) => {
let ptr = Pointer::new(alloc_id, Size::from_bytes(bits as u64));
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
}
None => {}, None => {},
} }
} }
...@@ -812,7 +827,8 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes ...@@ -812,7 +827,8 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes
})) }))
} }
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, ScalarMaybeUndef> { pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align)
-> EvalResult<'tcx, ScalarMaybeUndef> {
self.read_scalar(ptr, ptr_align, self.pointer_size()) self.read_scalar(ptr, ptr_align, self.pointer_size())
} }
...@@ -865,7 +881,8 @@ pub fn write_scalar( ...@@ -865,7 +881,8 @@ pub fn write_scalar(
Ok(()) Ok(())
} }
pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> { pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef)
-> EvalResult<'tcx> {
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size) self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
} }
...@@ -1009,7 +1026,9 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { ...@@ -1009,7 +1026,9 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>; fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
} }
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline] #[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
self self
...@@ -1021,7 +1040,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { ...@@ -1021,7 +1040,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
} }
} }
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline] #[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
&mut self.memory &mut self.memory
...@@ -1033,7 +1054,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { ...@@ -1033,7 +1054,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
} }
} }
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline] #[inline]
fn data_layout(&self) -> &TargetDataLayout { fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout &self.tcx.data_layout
......
...@@ -68,7 +68,8 @@ pub fn binary_op( ...@@ -68,7 +68,8 @@ pub fn binary_op(
layout::Abi::Scalar(ref scalar) => scalar.value, layout::Abi::Scalar(ref scalar) => scalar.value,
_ => return err!(TypeNotPrimitive(right_layout.ty)), _ => return err!(TypeNotPrimitive(right_layout.ty)),
}; };
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, left, left_kind, right, right_kind);
// I: Handle operations that support pointers // I: Handle operations that support pointers
if !left_kind.is_float() && !right_kind.is_float() { if !left_kind.is_float() && !right_kind.is_float() {
......
...@@ -287,11 +287,13 @@ pub fn mplace_field( ...@@ -287,11 +287,13 @@ pub fn mplace_field(
offsets[usize::try_from(field).unwrap()], offsets[usize::try_from(field).unwrap()],
layout::FieldPlacement::Array { stride, .. } => { layout::FieldPlacement::Array { stride, .. } => {
let len = base.len(); let len = base.len();
assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len); assert!(field < len,
"Tried to access element {} of array/slice with length {}", field, len);
stride * field stride * field
} }
layout::FieldPlacement::Union(count) => { layout::FieldPlacement::Union(count) => {
assert!(field < count as u64, "Tried to access field {} of union with {} fields", field, count); assert!(field < count as u64,
"Tried to access field {} of union with {} fields", field, count);
// Offset is always 0 // Offset is always 0
Size::from_bytes(0) Size::from_bytes(0)
} }
...@@ -604,7 +606,8 @@ fn write_value_to_mplace( ...@@ -604,7 +606,8 @@ fn write_value_to_mplace(
Value::ScalarPair(a_val, b_val) => { Value::ScalarPair(a_val, b_val) => {
let (a, b) = match dest.layout.abi { let (a, b) = match dest.layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout) _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout)
}; };
let (a_size, b_size) = (a.size(&self), b.size(&self)); let (a_size, b_size) = (a.size(&self), b.size(&self));
let (a_align, b_align) = (a.align(&self), b.align(&self)); let (a_align, b_align) = (a.align(&self), b.align(&self));
...@@ -770,7 +773,8 @@ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> ...@@ -770,7 +773,8 @@ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>>
/// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout) /// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout)
/// or a slice into the specific fixed-size place and layout that is given by the vtable/len. /// or a slice into the specific fixed-size place and layout that is given by the vtable/len.
/// This "unpacks" the existential quantifier, so to speak. /// This "unpacks" the existential quantifier, so to speak.
pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>) -> EvalResult<'tcx, MPlaceTy<'tcx>> { pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>)
-> EvalResult<'tcx, MPlaceTy<'tcx>> {
trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty); trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty);
let layout = match mplace.extra { let layout = match mplace.extra {
PlaceExtra::Vtable(vtable) => { PlaceExtra::Vtable(vtable) => {
......
...@@ -55,7 +55,10 @@ pub(super) fn eval_terminator( ...@@ -55,7 +55,10 @@ pub(super) fn eval_terminator(
for (index, &const_int) in values.iter().enumerate() { for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op // Compare using binary_op
let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 }; let const_int = Scalar::Bits {
bits: const_int,
size: discr.layout.size.bytes() as u8
};
let (res, _) = self.binary_op(mir::BinOp::Eq, let (res, _) = self.binary_op(mir::BinOp::Eq,
discr, discr,
ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout } ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
...@@ -154,7 +157,9 @@ pub(super) fn eval_terminator( ...@@ -154,7 +157,9 @@ pub(super) fn eval_terminator(
target, target,
.. ..
} => { } => {
let cond_val = self.eval_operand_and_read_value(cond, None)?.to_scalar()?.to_bool()?; let cond_val = self.eval_operand_and_read_value(cond, None)?
.to_scalar()?
.to_bool()?;
if expected == cond_val { if expected == cond_val {
self.goto_block(target); self.goto_block(target);
} else { } else {
...@@ -239,15 +244,24 @@ fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool { ...@@ -239,15 +244,24 @@ fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool {
// We need to allow what comes up when a non-capturing closure is cast to a fn(). // We need to allow what comes up when a non-capturing closure is cast to a fn().
match (sig.abi, real_sig.abi) { match (sig.abi, real_sig.abi) {
(Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric. (Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric.
if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => { if check_ty_compat(sig.output(), real_sig.output())
&& real_sig.inputs_and_output.len() == 3 => {
// First argument of real_sig must be a ZST // First argument of real_sig must be a ZST
let fst_ty = real_sig.inputs_and_output[0]; let fst_ty = real_sig.inputs_and_output[0];
if self.layout_of(fst_ty)?.is_zst() { if self.layout_of(fst_ty)?.is_zst() {
// Second argument must be a tuple matching the argument list of sig // Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1]; let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty { match snd_ty.sty {
<<<<<<< HEAD
ty::Tuple(tys) if sig.inputs().len() == tys.len() => ty::Tuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
=======
ty::TyTuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs()
.iter()
.zip(tys)
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
>>>>>>> 7d30ba9... Fixup long code lines
return Ok(true) return Ok(true)
}, },
_ => {} _ => {}
...@@ -304,7 +318,8 @@ fn eval_fn_call( ...@@ -304,7 +318,8 @@ fn eval_fn_call(
trace!( trace!(
"args: {:#?}", "args: {:#?}",
self.frame().mir.args_iter().zip(args.iter()) self.frame().mir.args_iter().zip(args.iter())
.map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>() .map(|(local, arg)| (local, **arg, arg.layout.ty))
.collect::<Vec<_>>()
); );
let local = arg_locals.nth(1).unwrap(); let local = arg_locals.nth(1).unwrap();
for (i, &op) in args.into_iter().enumerate() { for (i, &op) in args.into_iter().enumerate() {
...@@ -325,7 +340,7 @@ fn eval_fn_call( ...@@ -325,7 +340,7 @@ fn eval_fn_call(
ty::InstanceDef::Item(_) => { ty::InstanceDef::Item(_) => {
// Push the stack frame, and potentially be entirely done if the call got hooked // Push the stack frame, and potentially be entirely done if the call got hooked
if M::eval_fn_call(self, instance, destination, args, span)? { if M::eval_fn_call(self, instance, destination, args, span)? {
// TODO: Can we make it return the frame to push, instead // FIXME: Can we make it return the frame to push, instead
// of the hook doing half of the work and us doing the argument // of the hook doing half of the work and us doing the argument
// initialization? // initialization?
return Ok(()); return Ok(());
......
...@@ -92,7 +92,8 @@ pub fn read_size_and_align_from_vtable( ...@@ -92,7 +92,8 @@ pub fn read_size_and_align_from_vtable(
) -> EvalResult<'tcx, (Size, Align)> { ) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size(); let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align; let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized( let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?, vtable.offset(pointer_size * 2, self)?,
pointer_align pointer_align
......
...@@ -221,7 +221,10 @@ pub fn validate_mplace( ...@@ -221,7 +221,10 @@ pub fn validate_mplace(
let variant = self.read_discriminant_as_variant_index(dest.into())?; let variant = self.read_discriminant_as_variant_index(dest.into())?;
let inner_dest = self.mplace_downcast(dest, variant)?; let inner_dest = self.mplace_downcast(dest, variant)?;
// Put the variant projection onto the path, as a field // Put the variant projection onto the path, as a field
path.push(PathElem::Field(dest.layout.ty.ty_adt_def().unwrap().variants[variant].name)); path.push(PathElem::Field(dest.layout.ty
.ty_adt_def()
.unwrap()
.variants[variant].name));
trace!("variant layout: {:#?}", dest.layout); trace!("variant layout: {:#?}", dest.layout);
(variant, inner_dest) (variant, inner_dest)
}, },
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册