提交 5b75ec0a 编写于 作者: R Ralf Jung

fix validation around transmuting copy_op

上级 c47785f6
......@@ -322,7 +322,8 @@ fn unsize_into_ptr(
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
self.copy_op(src, dest)
let val = self.read_value(src)?;
self.write_value(*val, dest)
}
(_, &ty::Dynamic(ref data, _)) => {
// Initial cast from sized to dyn trait
......
......@@ -151,11 +151,7 @@ pub fn emulate_intrinsic(
self.write_scalar(val, dest)?;
}
"transmute" => {
// Go through an allocation, to make sure the completely different layouts
// do not pose a problem. (When the user transmutes through a union,
// there will not be a layout mismatch.)
let dest = self.force_allocation(dest)?;
self.copy_op(args[0], dest.into())?;
self.copy_op_transmute(args[0], dest)?;
}
_ => return Ok(false),
......
......@@ -599,18 +599,47 @@ pub fn write_scalar(
}
/// Write a value to a place
#[inline(always)]
pub fn write_value(
&mut self,
src_val: Value<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
trace!("write_value: {:?} <- {:?}", *dest, src_val);
// Check that the value actually is okay for that type
self.write_value_no_validate(src_val, dest)?;
if M::ENFORCE_VALIDITY {
// Something changed somewhere, better make sure it matches the type!
let op = OpTy { op: Operand::Immediate(src_val), layout: dest.layout };
self.validate_operand(op, &mut vec![], None, /*const_mode*/false)?;
// Data got changed, better make sure it matches the type!
self.validate_operand(self.place_to_op(dest)?, &mut vec![], None, /*const_mode*/false)?;
}
Ok(())
}
/// Write a value to a place.
/// If you use this you are responsible for validating that things got copied at the
/// right type.
fn write_value_no_validate(
&mut self,
src_val: Value<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
match src_val {
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
assert_eq!(self.pointer_size(), dest.layout.size,
"Size mismatch when writing pointer"),
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) =>
assert_eq!(Size::from_bytes(size.into()), dest.layout.size,
"Size mismatch when writing bits"),
Value::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size
Value::ScalarPair(_, _) => {
// FIXME: Can we check anything here?
}
}
}
trace!("write_value: {:?} <- {:?}: {}", *dest, src_val, dest.layout.ty);
// See if we can avoid an allocation. This is the counterpart to `try_read_value`,
// but not factored as a separate function.
......@@ -627,15 +656,16 @@ pub fn write_value(
},
Place::Ptr(mplace) => mplace, // already in memory
};
let dest = MPlaceTy { mplace, layout: dest.layout };
// This is already in memory, write there.
let dest = MPlaceTy { mplace, layout: dest.layout };
self.write_value_to_mplace(src_val, dest)
self.write_value_to_mplace_no_validate(src_val, dest)
}
/// Write a value to memory. This does NOT do validation, so you better had already
/// done that before calling this!
fn write_value_to_mplace(
/// Write a value to memory.
/// If you use this you are responsible for validating that things git copied at the
/// right type.
fn write_value_to_mplace_no_validate(
&mut self,
value: Value<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
......@@ -653,8 +683,17 @@ fn write_value_to_mplace(
}
let ptr = ptr.to_ptr()?;
// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
match value {
Value::Scalar(scalar) => {
match dest.layout.abi {
layout::Abi::Scalar(_) => {}, // fine
_ => bug!("write_value_to_mplace: invalid Scalar layout: {:#?}",
dest.layout)
}
self.memory.write_scalar(
ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size
)
......@@ -670,45 +709,109 @@ fn write_value_to_mplace(
let b_offset = a_size.abi_align(b_align);
let b_ptr = ptr.offset(b_offset, &self)?.into();
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?;
self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size)
}
}
}
/// Copy the data from an operand to a place
/// Copy the data from an operand to a place. This does not support transmuting!
/// Use `copy_op_transmute` if the layouts could disagree.
#[inline(always)]
pub fn copy_op(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
self.copy_op_no_validate(src, dest)?;
if M::ENFORCE_VALIDITY {
// Data got changed, better make sure it matches the type!
self.validate_operand(self.place_to_op(dest)?, &mut vec![], None, /*const_mode*/false)?;
}
Ok(())
}
/// Copy the data from an operand to a place. This does not support transmuting!
/// Use `copy_op_transmute` if the layouts could disagree.
/// Also, if you use this you are responsible for validating that things git copied at the
/// right type.
fn copy_op_no_validate(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot copy unsized data");
assert_eq!(src.layout.size, dest.layout.size,
"Size mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(src.layout.details == dest.layout.details,
"Layout mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
let (src_ptr, src_align) = match self.try_read_value(src)? {
Ok(src_val) =>
// Yay, we got a value that we can write directly. We write with the
// *source layout*, because that was used to load, and if they do not match
// this is a transmute we want to support.
return self.write_value(src_val, PlaceTy { place: *dest, layout: src.layout }),
Err(mplace) => mplace.to_scalar_ptr_align(),
let src = match self.try_read_value(src)? {
Ok(src_val) => {
// Yay, we got a value that we can write directly.
return self.write_value_no_validate(src_val, dest);
}
Err(mplace) => mplace,
};
// Slow path, this does not fit into an immediate. Just memcpy.
trace!("copy_op: {:?} <- {:?}", *dest, *src);
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
let dest = self.force_allocation(dest)?;
let (src_ptr, src_align) = src.to_scalar_ptr_align();
let (dest_ptr, dest_align) = dest.to_scalar_ptr_align();
self.memory.copy(
src_ptr, src_align,
dest_ptr, dest_align,
src.layout.size, false
dest.layout.size, false
)?;
Ok(())
}
/// Copy the data from an operand to a place. The layouts may disagree, but they must
/// have the same size.
pub fn copy_op_transmute(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
if src.layout.details == dest.layout.details {
// Fast path: Just use normal `copy_op`
return self.copy_op(src, dest);
}
// We still require the sizes to match
debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot copy unsized data");
assert!(src.layout.size == dest.layout.size,
"Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
// The hard case is `ScalarPair`. `src` is already read from memory in this case,
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
// We have to write them to `dest` at the offsets they were *read at*, which is
// not necessarily the same as the offsets in `dest.layout`!
// Hence we do the copy with the source layout on both sides. We also make sure to write
// into memory, because if `dest` is a local we would not even have a way to write
// at the `src` offsets; the fact that we came from a different layout would
// just be lost.
let dest = self.force_allocation(dest)?;
self.copy_op_no_validate(
src,
PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
)?;
if M::ENFORCE_VALIDITY {
// Something changed somewhere, better make sure it matches the type!
// Data got changed, better make sure it matches the type!
self.validate_operand(dest.into(), &mut vec![], None, /*const_mode*/false)?;
}
Ok(())
}
......@@ -734,7 +837,7 @@ pub fn force_allocation(
let ptr = self.allocate(local_layout, MemoryKind::Stack)?;
// We don't have to validate as we can assume the local
// was already valid for its type.
self.write_value_to_mplace(value, ptr)?;
self.write_value_to_mplace_no_validate(value, ptr)?;
let mplace = ptr.mplace;
// Update the local
*self.stack[frame].locals[local].access_mut()? =
......
......@@ -222,7 +222,8 @@ fn pass_argument(
if !Self::check_argument_compat(caller_arg.layout, callee_arg.layout) {
return err!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty));
}
self.copy_op(caller_arg, callee_arg)
// We allow some transmutes here
self.copy_op_transmute(caller_arg, callee_arg)
}
/// Call this function -- pushing the stack frame and initializing the arguments.
......
......@@ -218,7 +218,9 @@ fn validate_primitive_type(
return validation_failure!("unaligned reference", path),
_ =>
return validation_failure!(
"dangling (deallocated) reference", path
"dangling (out-of-bounds) reference (might be NULL at \
run-time)",
path
),
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册