提交 7cdbc87a 编写于 作者: B bors

Auto merge of #69999 - RalfJung:miri-unwind, r=oli-obk

adjust Miri to needs of changed unwinding strategy

As expected, https://github.com/rust-lang/rust/pull/67502 broke unwinding in Miri. To fix it we have to adjust parts of the engine and the panic runtime, which this PR does. The Miri-side changes are in https://github.com/rust-lang/miri/pull/1227.

Cc @oli-obk @Aaron1011 @Mark-Simulacrum @Amanieu
......@@ -1892,10 +1892,12 @@
pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
/// Internal hook used by Miri to implement unwinding.
/// Compiles to a NOP during non-Miri codegen.
/// ICEs when encountered during non-Miri codegen.
///
/// Perma-unstable: do not use
pub fn miri_start_panic(data: *mut (dyn crate::any::Any + crate::marker::Send)) -> ();
/// The `payload` ptr here will be exactly the one `do_catch` gets passed by `try`.
///
/// Perma-unstable: do not use.
pub fn miri_start_panic(payload: *mut u8) -> !;
}
// Some functions are defined here because they accidentally got made
......
......@@ -30,6 +30,8 @@
#![feature(raw)]
#![panic_runtime]
#![feature(panic_runtime)]
// `real_imp` is unused with Miri, so silence warnings.
#![cfg_attr(miri, allow(dead_code))]
use alloc::boxed::Box;
use core::any::Any;
......@@ -38,25 +40,38 @@
cfg_if::cfg_if! {
if #[cfg(target_os = "emscripten")] {
#[path = "emcc.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_arch = "wasm32")] {
#[path = "dummy.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_os = "hermit")] {
#[path = "hermit.rs"]
mod imp;
mod real_imp;
} else if #[cfg(all(target_env = "msvc", target_arch = "aarch64"))] {
#[path = "dummy.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_env = "msvc")] {
#[path = "seh.rs"]
mod imp;
mod real_imp;
} else {
// Rust runtime's startup objects depend on these symbols, so make them public.
#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
pub use imp::eh_frame_registry::*;
pub use real_imp::eh_frame_registry::*;
#[path = "gcc.rs"]
mod real_imp;
}
}
cfg_if::cfg_if! {
if #[cfg(miri)] {
// Use the Miri runtime.
// We still need to also load the normal runtime above, as rustc expects certain lang
// items from there to be defined.
#[path = "miri.rs"]
mod imp;
} else {
// Use the real runtime.
use real_imp as imp;
}
}
......@@ -81,12 +96,5 @@
let payload = payload as *mut &mut dyn BoxMeUp;
let payload = (*payload).take_box();
// Miri panic support: cfg'd out of normal builds just to be sure.
// When going through normal codegen, `miri_start_panic` is a NOP, so the
// Miri-enabled sysroot still supports normal unwinding. But when executed in
// Miri, this line initiates unwinding.
#[cfg(miri)]
core::intrinsics::miri_start_panic(payload);
imp::panic(Box::from_raw(payload))
}
//! Unwinding panics for Miri.
use alloc::boxed::Box;
use core::any::Any;
// The type of the payload that the Miri engine propagates through unwinding for us.
// Must be pointer-sized.
type Payload = Box<Box<dyn Any + Send>>;
pub unsafe fn panic(payload: Box<dyn Any + Send>) -> u32 {
// The payload we pass to `miri_start_panic` will be exactly the argument we get
// in `cleanup` below. So we just box it up once, to get something pointer-sized.
let payload_box: Payload = Box::new(payload);
core::intrinsics::miri_start_panic(Box::into_raw(payload_box) as *mut u8)
}
pub unsafe fn cleanup(payload_box: *mut u8) -> Box<dyn Any + Send> {
// Recover the underlying `Box`.
let payload_box: Payload = Box::from_raw(payload_box as *mut _);
*payload_box
}
......@@ -272,11 +272,13 @@ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self
#[inline]
pub fn from_bool(b: bool) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: b as u128, size: 1 }
}
#[inline]
pub fn from_char(c: char) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: c as u128, size: 4 }
}
......@@ -299,21 +301,25 @@ pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
#[inline]
pub fn from_u8(i: u8) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 1 }
}
#[inline]
pub fn from_u16(i: u16) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 2 }
}
#[inline]
pub fn from_u32(i: u32) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 4 }
}
#[inline]
pub fn from_u64(i: u64) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 8 }
}
......@@ -341,6 +347,26 @@ pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
.unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
}
#[inline]
pub fn from_i8(i: i8) -> Self {
Self::from_int(i, Size::from_bits(8))
}
#[inline]
pub fn from_i16(i: i16) -> Self {
Self::from_int(i, Size::from_bits(16))
}
#[inline]
pub fn from_i32(i: i32) -> Self {
Self::from_int(i, Size::from_bits(32))
}
#[inline]
pub fn from_i64(i: i64) -> Self {
Self::from_int(i, Size::from_bits(64))
}
#[inline]
pub fn from_machine_isize(i: i64, cx: &impl HasDataLayout) -> Self {
Self::from_int(i, cx.data_layout().pointer_size)
......
......@@ -21,7 +21,7 @@
use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
ScalarMaybeUndef, StackPopInfo,
ScalarMaybeUndef, StackPopJump,
};
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
......@@ -623,23 +623,15 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
let stack_pop_info = M::stack_pop(self, frame.extra, unwinding)?;
if let (false, StackPopInfo::StopUnwinding) = (unwinding, stack_pop_info) {
bug!("Attempted to stop unwinding while there is no unwinding!");
}
// Now where do we jump next?
// Determine if we leave this function normally or via unwinding.
let cur_unwinding =
if let StackPopInfo::StopUnwinding = stack_pop_info { false } else { unwinding };
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// In that case, we return early. We also avoid validation in that case,
// because this is CTFE and the final value will be thoroughly validated anyway.
let (cleanup, next_block) = match frame.return_to_block {
StackPopCleanup::Goto { ret, unwind } => {
(true, Some(if cur_unwinding { unwind } else { ret }))
(true, Some(if unwinding { unwind } else { ret }))
}
StackPopCleanup::None { cleanup, .. } => (cleanup, None),
};
......@@ -647,7 +639,8 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
if !cleanup {
assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
// Leak the locals, skip validation.
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Leak the locals, skip validation, skip machine hook.
return Ok(());
}
......@@ -656,13 +649,13 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
self.deallocate_local(local.value)?;
}
trace!(
"StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}",
frame.return_to_block,
stack_pop_info,
cur_unwinding
);
if cur_unwinding {
if M::stack_pop(self, frame.extra, unwinding)? == StackPopJump::NoJump {
// The hook already did everything.
// We want to skip the `info!` below, hence early return.
return Ok(());
}
// Normal return.
if unwinding {
// Follow the unwind edge.
let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
self.unwind_to_block(unwind);
......@@ -697,7 +690,7 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
"CONTINUING({}) {} (unwinding = {})",
self.cur_frame(),
self.frame().instance,
cur_unwinding
unwinding
);
}
......
......@@ -17,16 +17,16 @@
/// Data returned by Machine::stack_pop,
/// to provide further control over the popping of the stack frame
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum StackPopInfo {
pub enum StackPopJump {
/// Indicates that no special handling should be
/// done - we'll either return normally or unwind
/// based on the terminator for the function
/// we're leaving.
Normal,
/// Indicates that we should stop unwinding,
/// as we've reached a catch frame
StopUnwinding,
/// Indicates that we should *not* jump to the return/unwind address, as the callback already
/// took care of everything.
NoJump,
}
/// Whether this kind of memory is allowed to leak
......@@ -276,9 +276,9 @@ fn stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_extra: Self::FrameExtra,
_unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
) -> InterpResult<'tcx, StackPopJump> {
// By default, we do not support unwinding from panics
Ok(StackPopInfo::Normal)
Ok(StackPopJump::Normal)
}
fn int_to_ptr(
......
......@@ -24,7 +24,7 @@
pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};
pub use self::machine::{AllocMap, Machine, MayLeak, StackPopInfo};
pub use self::machine::{AllocMap, Machine, MayLeak, StackPopJump};
pub use self::operand::{ImmTy, Immediate, OpTy, Operand, ScalarMaybeUndef};
......
......@@ -251,21 +251,20 @@ union Data<F, R> {
//
// We go through a transition where:
//
// * First, we set the data to be the closure that we're going to call.
// * First, we set the data field `f` to be the argumentless closure that we're going to call.
// * When we make the function call, the `do_call` function below, we take
// ownership of the function pointer. At this point the `Data` union is
// ownership of the function pointer. At this point the `data` union is
// entirely uninitialized.
// * If the closure successfully returns, we write the return value into the
// data's return slot. Note that `ptr::write` is used as it's overwriting
// uninitialized data.
// data's return slot (field `r`).
// * If the closure panics (`do_catch` below), we write the panic payload into field `p`.
// * Finally, when we come back out of the `try` intrinsic we're
// in one of two states:
//
// 1. The closure didn't panic, in which case the return value was
// filled in. We move it out of `data` and return it.
// 2. The closure panicked, in which case the return value wasn't
// filled in. In this case the entire `data` union is invalid, so
// there is no need to drop anything.
// filled in. We move it out of `data.r` and return it.
// 2. The closure panicked, in which case the panic payload was
// filled in. We move it out of `data.p` and return it.
//
// Once we stack all that together we should have the "most efficient'
// method of calling a catch panic whilst juggling ownership.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册