未验证 提交 dfb4194e 编写于 作者: D Dylan DPC 提交者: GitHub

Rollup merge of #95785 - RalfJung:interpret-size-mismatch, r=oli-obk

interpret: err instead of ICE on size mismatches in to_bits_or_ptr_internal

We did this a while ago already for `to_i32()` and friends, but missed this one. That became quite annoying when I was debugging an ICE caused by `read_pointer` in a Miri shim where the code was passing an argument at the wrong type.

Having `scalar_to_ptr` be fallible is consistent with all the other `Scalar::to_*` methods being fallible. I added `unwrap` only in code outside the interpreter, which is no worse off than before now in terms of panics.

r? ````@oli-obk````
...@@ -167,17 +167,18 @@ pub(super) fn op_to_const<'tcx>( ...@@ -167,17 +167,18 @@ pub(super) fn op_to_const<'tcx>(
}, },
Immediate::ScalarPair(a, b) => { Immediate::ScalarPair(a, b) => {
// We know `offset` is relative to the allocation, so we can use `into_parts`. // We know `offset` is relative to the allocation, so we can use `into_parts`.
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() { let (data, start) =
(Some(alloc_id), offset) => { match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() {
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes()) (Some(alloc_id), offset) => {
} (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
(None, _offset) => ( }
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable( (None, _offset) => (
b"" as &[u8], ecx.tcx.intern_const_alloc(
)), Allocation::from_bytes_byte_aligned_immutable(b"" as &[u8]),
0, ),
), 0,
}; ),
};
let len = b.to_machine_usize(ecx).unwrap(); let len = b.to_machine_usize(ecx).unwrap();
let start = start.try_into().unwrap(); let start = start.try_into().unwrap();
let len: usize = len.try_into().unwrap(); let len: usize = len.try_into().unwrap();
......
...@@ -197,8 +197,8 @@ fn may_leak(self) -> bool { ...@@ -197,8 +197,8 @@ fn may_leak(self) -> bool {
} }
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool { fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
match (a, b) { Ok(match (a, b) {
// Comparisons between integers are always known. // Comparisons between integers are always known.
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b, (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
// Equality with integers can never be known for sure. // Equality with integers can never be known for sure.
...@@ -207,11 +207,11 @@ fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool { ...@@ -207,11 +207,11 @@ fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
// some things (like functions and vtables) do not have stable addresses // some things (like functions and vtables) do not have stable addresses
// so we need to be careful around them (see e.g. #73722). // so we need to be careful around them (see e.g. #73722).
(Scalar::Ptr(..), Scalar::Ptr(..)) => false, (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
} })
} }
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool { fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
match (a, b) { Ok(match (a, b) {
// Comparisons between integers are always known. // Comparisons between integers are always known.
(Scalar::Int(_), Scalar::Int(_)) => a != b, (Scalar::Int(_), Scalar::Int(_)) => a != b,
// Comparisons of abstract pointers with null pointers are known if the pointer // Comparisons of abstract pointers with null pointers are known if the pointer
...@@ -219,13 +219,13 @@ fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool { ...@@ -219,13 +219,13 @@ fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
// Inequality with integers other than null can never be known for sure. // Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), ptr @ Scalar::Ptr(..)) (Scalar::Int(int), ptr @ Scalar::Ptr(..))
| (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => { | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
int.is_null() && !self.scalar_may_be_null(ptr) int.is_null() && !self.scalar_may_be_null(ptr)?
} }
// FIXME: return `true` for at least some comparisons where we can reliably // FIXME: return `true` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time. // determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items. // Examples include comparison of addresses in different static items.
(Scalar::Ptr(..), Scalar::Ptr(..)) => false, (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
} })
} }
} }
...@@ -329,9 +329,9 @@ fn call_intrinsic( ...@@ -329,9 +329,9 @@ fn call_intrinsic(
let a = ecx.read_immediate(&args[0])?.to_scalar()?; let a = ecx.read_immediate(&args[0])?.to_scalar()?;
let b = ecx.read_immediate(&args[1])?.to_scalar()?; let b = ecx.read_immediate(&args[1])?.to_scalar()?;
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq { let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
ecx.guaranteed_eq(a, b) ecx.guaranteed_eq(a, b)?
} else { } else {
ecx.guaranteed_ne(a, b) ecx.guaranteed_ne(a, b)?
}; };
ecx.write_scalar(Scalar::from_bool(cmp), dest)?; ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
} }
......
...@@ -283,7 +283,7 @@ fn unsize_into_ptr( ...@@ -283,7 +283,7 @@ fn unsize_into_ptr(
if let Some(entry_idx) = vptr_entry_idx { if let Some(entry_idx) = vptr_entry_idx {
let entry_idx = u64::try_from(entry_idx).unwrap(); let entry_idx = u64::try_from(entry_idx).unwrap();
let (old_data, old_vptr) = val.to_scalar_pair()?; let (old_data, old_vptr) = val.to_scalar_pair()?;
let old_vptr = self.scalar_to_ptr(old_vptr); let old_vptr = self.scalar_to_ptr(old_vptr)?;
let new_vptr = self let new_vptr = self
.read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?; .read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?;
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest) self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
......
...@@ -640,7 +640,7 @@ pub(super) fn size_and_align_of( ...@@ -640,7 +640,7 @@ pub(super) fn size_and_align_of(
Ok(Some((size, align))) Ok(Some((size, align)))
} }
ty::Dynamic(..) => { ty::Dynamic(..) => {
let vtable = self.scalar_to_ptr(metadata.unwrap_meta()); let vtable = self.scalar_to_ptr(metadata.unwrap_meta())?;
// Read size and align from vtable (already checks size). // Read size and align from vtable (already checks size).
Ok(Some(self.read_size_and_align_from_vtable(vtable)?)) Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
} }
......
...@@ -202,7 +202,7 @@ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> { ...@@ -202,7 +202,7 @@ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
if let ty::Dynamic(..) = if let ty::Dynamic(..) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind() tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{ {
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta()); let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta())?;
if let Some(alloc_id) = ptr.provenance { if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even // Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable. // if the reference of the fat pointer is mutable.
......
...@@ -1102,30 +1102,38 @@ pub fn mem_copy_repeatedly( ...@@ -1102,30 +1102,38 @@ pub fn mem_copy_repeatedly(
/// Machine pointer introspection. /// Machine pointer introspection.
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> { pub fn scalar_to_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to // We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
// call to force getting out a pointer. // call to force getting out a pointer.
match scalar.to_bits_or_ptr_internal(self.pointer_size()) { Ok(
Err(ptr) => ptr.into(), match scalar
Ok(bits) => { .to_bits_or_ptr_internal(self.pointer_size())
let addr = u64::try_from(bits).unwrap(); .map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
let ptr = M::ptr_from_addr(&self, addr); {
if addr == 0 { Err(ptr) => ptr.into(),
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId"); Ok(bits) => {
let addr = u64::try_from(bits).unwrap();
let ptr = M::ptr_from_addr(&self, addr);
if addr == 0 {
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
}
ptr
} }
ptr },
} )
}
} }
/// Test if this value might be null. /// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative. /// If the machine does not support ptr-to-int casts, this is conservative.
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> bool { pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<'tcx, bool> {
match scalar.try_to_int() { Ok(match scalar.try_to_int() {
Ok(int) => int.is_null(), Ok(int) => int.is_null(),
Err(_) => { Err(_) => {
// Can only happen during CTFE. // Can only happen during CTFE.
let ptr = self.scalar_to_ptr(scalar); let ptr = self.scalar_to_ptr(scalar)?;
match self.ptr_try_get_alloc_id(ptr) { match self.ptr_try_get_alloc_id(ptr) {
Ok((alloc_id, offset, _)) => { Ok((alloc_id, offset, _)) => {
let (size, _align) = self let (size, _align) = self
...@@ -1138,7 +1146,7 @@ pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> bool { ...@@ -1138,7 +1146,7 @@ pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> bool {
Err(_offset) => bug!("a non-int scalar is always a pointer"), Err(_offset) => bug!("a non-int scalar is always a pointer"),
} }
} }
} })
} }
/// Turning a "maybe pointer" into a proper pointer (and some information /// Turning a "maybe pointer" into a proper pointer (and some information
......
...@@ -342,7 +342,7 @@ pub fn read_pointer( ...@@ -342,7 +342,7 @@ pub fn read_pointer(
&self, &self,
op: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> { ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)) self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
} }
// Turn the wide MPlace into a string (must already be dereferenced!) // Turn the wide MPlace into a string (must already be dereferenced!)
...@@ -738,7 +738,7 @@ pub fn read_discriminant( ...@@ -738,7 +738,7 @@ pub fn read_discriminant(
// okay. Everything else, we conservatively reject. // okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0 let ptr_valid = niche_start == 0
&& variants_start == variants_end && variants_start == variants_end
&& !self.scalar_may_be_null(tag_val); && !self.scalar_may_be_null(tag_val)?;
if !ptr_valid { if !ptr_valid {
throw_ub!(InvalidTag(dbg_val)) throw_ub!(InvalidTag(dbg_val))
} }
......
...@@ -281,7 +281,7 @@ pub fn ref_to_mplace( ...@@ -281,7 +281,7 @@ pub fn ref_to_mplace(
}; };
let mplace = MemPlace { let mplace = MemPlace {
ptr: self.scalar_to_ptr(ptr.check_init()?), ptr: self.scalar_to_ptr(ptr.check_init()?)?,
// We could use the run-time alignment here. For now, we do not, because // We could use the run-time alignment here. For now, we do not, because
// the point of tracking the alignment here is to make sure that the *static* // the point of tracking the alignment here is to make sure that the *static*
// alignment information emitted with the loads is correct. The run-time // alignment information emitted with the loads is correct. The run-time
...@@ -1104,7 +1104,7 @@ pub(super) fn unpack_dyn_trait( ...@@ -1104,7 +1104,7 @@ pub(super) fn unpack_dyn_trait(
&self, &self,
mplace: &MPlaceTy<'tcx, M::PointerTag>, mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type let vtable = self.scalar_to_ptr(mplace.vtable())?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
......
...@@ -519,7 +519,7 @@ pub(crate) fn eval_fn_call( ...@@ -519,7 +519,7 @@ pub(crate) fn eval_fn_call(
.kind(), .kind(),
ty::Dynamic(..) ty::Dynamic(..)
)); ));
let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta()); let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta())?;
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?; let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we // `*mut receiver_place.layout.ty` is almost the layout that we
......
...@@ -50,7 +50,7 @@ pub fn get_vtable_slot( ...@@ -50,7 +50,7 @@ pub fn get_vtable_slot(
let vtable_slot = self let vtable_slot = self
.get_ptr_alloc(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)? .get_ptr_alloc(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST"); .expect("cannot be a ZST");
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?); let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?)?;
self.get_ptr_fn(fn_ptr) self.get_ptr_fn(fn_ptr)
} }
...@@ -75,7 +75,7 @@ pub fn read_drop_type_from_vtable( ...@@ -75,7 +75,7 @@ pub fn read_drop_type_from_vtable(
.check_init()?; .check_init()?;
// We *need* an instance here, no other kind of function value, to be able // We *need* an instance here, no other kind of function value, to be able
// to determine the type. // to determine the type.
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?; let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn)?)?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance); trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx); let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig); let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
...@@ -132,7 +132,8 @@ pub fn read_new_vtable_after_trait_upcasting_from_vtable( ...@@ -132,7 +132,8 @@ pub fn read_new_vtable_after_trait_upcasting_from_vtable(
.get_ptr_alloc(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)? .get_ptr_alloc(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST"); .expect("cannot be a ZST");
let new_vtable = self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?); let new_vtable =
self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?)?;
Ok(new_vtable) Ok(new_vtable)
} }
......
...@@ -312,7 +312,7 @@ fn check_wide_ptr_meta( ...@@ -312,7 +312,7 @@ fn check_wide_ptr_meta(
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env); let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() { match tail.kind() {
ty::Dynamic(..) => { ty::Dynamic(..) => {
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta()); let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta())?;
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines. // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!( try_validation!(
self.ecx.check_ptr_access_align( self.ecx.check_ptr_access_align(
...@@ -577,7 +577,7 @@ fn try_visit_primitive( ...@@ -577,7 +577,7 @@ fn try_visit_primitive(
// If we check references recursively, also check that this points to a function. // If we check references recursively, also check that this points to a function.
if let Some(_) = self.ref_tracking { if let Some(_) = self.ref_tracking {
let ptr = self.ecx.scalar_to_ptr(value); let ptr = self.ecx.scalar_to_ptr(value)?;
let _fn = try_validation!( let _fn = try_validation!(
self.ecx.get_ptr_fn(ptr), self.ecx.get_ptr_fn(ptr),
self.path, self.path,
...@@ -590,7 +590,7 @@ fn try_visit_primitive( ...@@ -590,7 +590,7 @@ fn try_visit_primitive(
// FIXME: Check if the signature matches // FIXME: Check if the signature matches
} else { } else {
// Otherwise (for standalone Miri), we have to still check it to be non-null. // Otherwise (for standalone Miri), we have to still check it to be non-null.
if self.ecx.scalar_may_be_null(value) { if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path, { "a null function pointer" }); throw_validation_failure!(self.path, { "a null function pointer" });
} }
} }
...@@ -667,7 +667,7 @@ fn visit_scalar( ...@@ -667,7 +667,7 @@ fn visit_scalar(
// We support 2 kinds of ranges here: full range, and excluding zero. // We support 2 kinds of ranges here: full range, and excluding zero.
if start == 1 && end == max_value { if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null. // Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.scalar_may_be_null(value) { if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path, throw_validation_failure!(self.path,
{ "a potentially null pointer" } { "a potentially null pointer" }
expected { expected {
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
use super::{ use super::{
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance, read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess, ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
UnsupportedOpInfo, UninitBytesAccess, UnsupportedOpInfo,
}; };
use crate::ty; use crate::ty;
...@@ -81,6 +81,8 @@ pub fn inner(self) -> &'tcx Allocation<Tag, Extra> { ...@@ -81,6 +81,8 @@ pub fn inner(self) -> &'tcx Allocation<Tag, Extra> {
/// is added when converting to `InterpError`. /// is added when converting to `InterpError`.
#[derive(Debug)] #[derive(Debug)]
pub enum AllocError { pub enum AllocError {
/// A scalar had the wrong size.
ScalarSizeMismatch(ScalarSizeMismatch),
/// Encountered a pointer where we needed raw bytes. /// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes, ReadPointerAsBytes,
/// Partially overwriting a pointer. /// Partially overwriting a pointer.
...@@ -90,10 +92,19 @@ pub enum AllocError { ...@@ -90,10 +92,19 @@ pub enum AllocError {
} }
pub type AllocResult<T = ()> = Result<T, AllocError>; pub type AllocResult<T = ()> = Result<T, AllocError>;
impl From<ScalarSizeMismatch> for AllocError {
fn from(s: ScalarSizeMismatch) -> Self {
AllocError::ScalarSizeMismatch(s)
}
}
impl AllocError { impl AllocError {
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> { pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
use AllocError::*; use AllocError::*;
match self { match self {
ScalarSizeMismatch(s) => {
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
}
ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes), ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
PartialPointerOverwrite(offset) => InterpError::Unsupported( PartialPointerOverwrite(offset) => InterpError::Unsupported(
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)), UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
...@@ -425,7 +436,7 @@ pub fn write_scalar( ...@@ -425,7 +436,7 @@ pub fn write_scalar(
// `to_bits_or_ptr_internal` is the right method because we just want to store this data // `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory. // as-is into memory.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size) { let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
Err(val) => { Err(val) => {
let (provenance, offset) = val.into_parts(); let (provenance, offset) = val.into_parts();
(u128::from(offset.bytes()), Some(provenance)) (u128::from(offset.bytes()), Some(provenance))
......
...@@ -221,6 +221,13 @@ pub struct UninitBytesAccess { ...@@ -221,6 +221,13 @@ pub struct UninitBytesAccess {
pub uninit_size: Size, pub uninit_size: Size,
} }
/// Information about a size mismatch.
#[derive(Debug)]
pub struct ScalarSizeMismatch {
pub target_size: u64,
pub data_size: u64,
}
/// Error information for when the program caused Undefined Behavior. /// Error information for when the program caused Undefined Behavior.
pub enum UndefinedBehaviorInfo<'tcx> { pub enum UndefinedBehaviorInfo<'tcx> {
/// Free-form case. Only for errors that are never caught! /// Free-form case. Only for errors that are never caught!
...@@ -298,10 +305,7 @@ pub enum UndefinedBehaviorInfo<'tcx> { ...@@ -298,10 +305,7 @@ pub enum UndefinedBehaviorInfo<'tcx> {
/// Working with a local that is not currently live. /// Working with a local that is not currently live.
DeadLocal, DeadLocal,
/// Data size is not equal to target size. /// Data size is not equal to target size.
ScalarSizeMismatch { ScalarSizeMismatch(ScalarSizeMismatch),
target_size: u64,
data_size: u64,
},
/// A discriminant of an uninhabited enum variant is written. /// A discriminant of an uninhabited enum variant is written.
UninhabitedEnumVariantWritten, UninhabitedEnumVariantWritten,
} }
...@@ -408,7 +412,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ...@@ -408,7 +412,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"using uninitialized data, but this operation requires initialized memory" "using uninitialized data, but this operation requires initialized memory"
), ),
DeadLocal => write!(f, "accessing a dead local variable"), DeadLocal => write!(f, "accessing a dead local variable"),
ScalarSizeMismatch { target_size, data_size } => write!( ScalarSizeMismatch(self::ScalarSizeMismatch { target_size, data_size }) => write!(
f, f,
"scalar size mismatch: expected {} bytes but got {} bytes instead", "scalar size mismatch: expected {} bytes but got {} bytes instead",
target_size, data_size target_size, data_size
......
...@@ -120,7 +120,8 @@ ...@@ -120,7 +120,8 @@
pub use self::error::{ pub use self::error::{
struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult, struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType, InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
ResourceExhaustionInfo, UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo, ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
UnsupportedOpInfo,
}; };
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit}; pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
use super::{ use super::{
AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance, AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
ScalarSizeMismatch,
}; };
/// Represents the result of const evaluation via the `eval_to_allocation` query. /// Represents the result of const evaluation via the `eval_to_allocation` query.
...@@ -300,16 +301,29 @@ pub fn from_f64(f: Double) -> Self { ...@@ -300,16 +301,29 @@ pub fn from_f64(f: Double) -> Self {
/// ///
/// This method only exists for the benefit of low-level operations that truly need to treat the /// This method only exists for the benefit of low-level operations that truly need to treat the
/// scalar in whatever form it is. /// scalar in whatever form it is.
///
/// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
/// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
#[inline] #[inline]
pub fn to_bits_or_ptr_internal(self, target_size: Size) -> Result<u128, Pointer<Tag>> { pub fn to_bits_or_ptr_internal(
self,
target_size: Size,
) -> Result<Result<u128, Pointer<Tag>>, ScalarSizeMismatch> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self { Ok(match self {
Scalar::Int(int) => Ok(int.assert_bits(target_size)), Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() }
})?),
Scalar::Ptr(ptr, sz) => { Scalar::Ptr(ptr, sz) => {
assert_eq!(target_size.bytes(), u64::from(sz)); if target_size.bytes() != sz.into() {
return Err(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: sz.into(),
});
}
Err(ptr) Err(ptr)
} }
} })
} }
} }
...@@ -348,10 +362,10 @@ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { ...@@ -348,10 +362,10 @@ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err( self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err(
|size| { |size| {
err_ub!(ScalarSizeMismatch { err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
target_size: target_size.bytes(), target_size: target_size.bytes(),
data_size: size.bytes(), data_size: size.bytes(),
}) }))
.into() .into()
}, },
) )
......
...@@ -146,7 +146,7 @@ fn from_const<'tcx>( ...@@ -146,7 +146,7 @@ fn from_const<'tcx>(
// straight to the result, after doing a bit of checking. (We // straight to the result, after doing a bit of checking. (We
// could remove this branch and just fall through, which // could remove this branch and just fall through, which
// is more general but much slower.) // is more general but much slower.)
if let Ok(bits) = scalar.to_bits_or_ptr_internal(target_size) { if let Ok(bits) = scalar.to_bits_or_ptr_internal(target_size).unwrap() {
return Some(bits); return Some(bits);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册