提交 9e46807c 编写于 作者: V Vytautas Astrauskas

Add function eval_maybe_thread_local_static_const that allows handling thread...

Add function eval_maybe_thread_local_static_const that allows handling thread locals without touching debug info; address other PR comments.
上级 2960a6cf
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance. /// Stores the `Machine` instance.
///
/// Note: the stack is provided by the machine.
pub machine: M, pub machine: M,
/// The results of the type checker, from rustc. /// The results of the type checker, from rustc.
...@@ -343,17 +345,19 @@ pub fn tag_global_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> { ...@@ -343,17 +345,19 @@ pub fn tag_global_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
} }
#[inline(always)] #[inline(always)]
pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] { pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
M::stack(self) M::stack(self)
} }
#[inline(always)] #[inline(always)]
pub fn stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> { pub(crate) fn stack_mut(
&mut self,
) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
M::stack_mut(self) M::stack_mut(self)
} }
#[inline(always)] #[inline(always)]
pub fn cur_frame(&self) -> usize { pub fn frame_idx(&self) -> usize {
let stack = self.stack(); let stack = self.stack();
assert!(!stack.is_empty()); assert!(!stack.is_empty());
stack.len() - 1 stack.len() - 1
...@@ -598,7 +602,7 @@ pub fn push_stack_frame( ...@@ -598,7 +602,7 @@ pub fn push_stack_frame(
return_to_block: StackPopCleanup, return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
if !self.stack().is_empty() { if !self.stack().is_empty() {
info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance); info!("PAUSING({}) {}", self.frame_idx(), self.frame().instance);
} }
::log_settings::settings().indentation += 1; ::log_settings::settings().indentation += 1;
...@@ -649,7 +653,7 @@ pub fn push_stack_frame( ...@@ -649,7 +653,7 @@ pub fn push_stack_frame(
} }
M::after_stack_push(self)?; M::after_stack_push(self)?;
info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance); info!("ENTERING({}) {}", self.frame_idx(), self.frame().instance);
if self.stack().len() > *self.tcx.sess.recursion_limit.get() { if self.stack().len() > *self.tcx.sess.recursion_limit.get() {
throw_exhaust!(StackFrameLimitReached) throw_exhaust!(StackFrameLimitReached)
...@@ -706,7 +710,7 @@ pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) { ...@@ -706,7 +710,7 @@ pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> { pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
info!( info!(
"LEAVING({}) {} (unwinding = {})", "LEAVING({}) {} (unwinding = {})",
self.cur_frame(), self.frame_idx(),
self.frame().instance, self.frame().instance,
unwinding unwinding
); );
...@@ -789,7 +793,7 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> ...@@ -789,7 +793,7 @@ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx>
if !self.stack().is_empty() { if !self.stack().is_empty() {
info!( info!(
"CONTINUING({}) {} (unwinding = {})", "CONTINUING({}) {} (unwinding = {})",
self.cur_frame(), self.frame_idx(),
self.frame().instance, self.frame().instance,
unwinding unwinding
); );
...@@ -897,8 +901,8 @@ pub fn dump_place(&self, place: Place<M::PointerTag>) { ...@@ -897,8 +901,8 @@ pub fn dump_place(&self, place: Place<M::PointerTag>) {
Place::Local { frame, local } => { Place::Local { frame, local } => {
let mut allocs = Vec::new(); let mut allocs = Vec::new();
let mut msg = format!("{:?}", local); let mut msg = format!("{:?}", local);
if frame != self.cur_frame() { if frame != self.frame_idx() {
write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap(); write!(msg, " ({} frames up)", self.frame_idx() - frame).unwrap();
} }
write!(msg, ":").unwrap(); write!(msg, ":").unwrap();
......
...@@ -120,16 +120,6 @@ pub trait Machine<'mir, 'tcx>: Sized { ...@@ -120,16 +120,6 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Whether memory accesses should be alignment-checked. /// Whether memory accesses should be alignment-checked.
fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool; fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool;
/// Borrow the current thread's stack.
fn stack(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
/// Mutably borrow the current thread's stack.
fn stack_mut(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
/// Whether to enforce the validity invariant /// Whether to enforce the validity invariant
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
...@@ -229,25 +219,10 @@ fn before_access_global( ...@@ -229,25 +219,10 @@ fn before_access_global(
Ok(()) Ok(())
} }
/// Called for *every* memory access to determine the real ID of the given /// Called for *every* memory access to determine the real ID of the given allocation.
/// allocation. This provides a way for the machine to "redirect" certain /// This provides a way for the machine to "redirect" certain allocations as it sees fit.
/// allocations as it sees fit.
/// ///
/// This is used by Miri for two purposes: /// This is used by Miri to redirect extern statics to real allocations.
/// 1. Redirecting extern statics to real allocations.
/// 2. Creating unique allocation ids for thread locals.
///
/// In Rust, one way for creating a thread local is by marking a static
/// with `#[thread_local]`. On supported platforms this gets translated
/// to a LLVM thread local. The problem with supporting these thread
/// locals in Miri is that in the internals of the compiler they look as
/// normal statics, except that they have the `thread_local` attribute.
/// However, in Miri we want to have a property that each allocation has
/// a unique id. Therefore, for these thread locals in
/// `canonical_alloc_id` we reserve fresh allocation ids for each
/// thread. Please note that `canonical_alloc_id` only reserves the
/// allocation ids, the actual allocation for the thread local statics
/// is done in the same way as for regular statics.
/// ///
/// This function must be idempotent. /// This function must be idempotent.
#[inline] #[inline]
...@@ -255,6 +230,32 @@ fn canonical_alloc_id(_mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId { ...@@ -255,6 +230,32 @@ fn canonical_alloc_id(_mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId {
id id
} }
/// Called when converting a `ty::Const` to an operand.
///
/// Miri uses this callback for creating unique allocation ids for thread
/// locals. In Rust, one way for creating a thread local is by marking a
/// static with `#[thread_local]`. On supported platforms this gets
/// translated to a LLVM thread local for which LLVM automatically ensures
/// that each thread gets its own copy. Since LLVM automatically handles
/// thread locals, the Rust compiler just treats thread local statics as
/// regular statics even though accessing a thread local static should be an
/// effectful computation that depends on the current thread. The long term
/// plan is to change MIR to make accesses to thread locals explicit
/// (https://github.com/rust-lang/rust/issues/70685). While the issue 70685
/// is not fixed, our current workaround in Miri is to use this function to
/// reserve fresh allocation ids for each thread. Please note that here we
/// only **reserve** the allocation ids; the actual allocation for the
/// thread local statics is done in `Memory::get_global_alloc`, which uses
/// `resolve_maybe_global_alloc` to retrieve information about the
/// allocation id we generated.
#[inline]
fn eval_maybe_thread_local_static_const(
_ecx: &InterpCx<'mir, 'tcx, Self>,
val: mir::interpret::ConstValue<'tcx>,
) -> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> {
Ok(val)
}
/// Called to obtain the `GlobalAlloc` associated with the allocation id. /// Called to obtain the `GlobalAlloc` associated with the allocation id.
/// ///
/// Miri uses this callback to resolve the information about the original /// Miri uses this callback to resolve the information about the original
...@@ -326,6 +327,16 @@ fn init_frame_extra( ...@@ -326,6 +327,16 @@ fn init_frame_extra(
frame: Frame<'mir, 'tcx, Self::PointerTag>, frame: Frame<'mir, 'tcx, Self::PointerTag>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>; ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
/// Borrow the current thread's stack.
fn stack(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
/// Mutably borrow the current thread's stack.
fn stack_mut(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
/// Called immediately after a stack frame got pushed and its locals got initialized. /// Called immediately after a stack frame got pushed and its locals got initialized.
fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
Ok(()) Ok(())
......
...@@ -430,11 +430,12 @@ fn get_global_alloc( ...@@ -430,11 +430,12 @@ fn get_global_alloc(
is_write: bool, is_write: bool,
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> { ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
// The call to `resolve_maybe_global_alloc` is needed to enable Miri to // The call to `resolve_maybe_global_alloc` is needed to enable Miri to
// support thread local statics. In `M::canonical_alloc_id`, for a // support thread local statics. In
// thread local static, Miri reserves a fresh allocation id, but the // `M::eval_maybe_thread_local_static_const`, for a thread local static,
// actual allocation is left to the code that handles statics which // Miri reserves a fresh allocation id, but the actual allocation is
// calls this function (`get_global_alloc`). Since the allocation id is // left to the code that handles statics which calls this function
// fresh, it has no information about the original static. The call to // (`get_global_alloc`). Since the allocation id is fresh, it has no
// information about the original static. The call to
// `resolve_maybe_global_alloc` allows Miri to retrieve this information // `resolve_maybe_global_alloc` allows Miri to retrieve this information
// for us. // for us.
let (alloc, def_id) = match M::resolve_maybe_global_alloc(tcx, memory_extra, id) { let (alloc, def_id) = match M::resolve_maybe_global_alloc(tcx, memory_extra, id) {
...@@ -598,9 +599,10 @@ pub fn get_size_and_align( ...@@ -598,9 +599,10 @@ pub fn get_size_and_align(
// # Statics // # Statics
// The call to `resolve_maybe_global_alloc` is needed here because Miri // The call to `resolve_maybe_global_alloc` is needed here because Miri
// via the call to `canonical_alloc_id` above reserves fresh allocation // via the callback to `eval_maybe_thread_local_static_const` in
// ids for thread local statics. However, the actual allocation is done // `eval_const_to_op` reserves fresh allocation ids for thread local
// not in `canonical_alloc_id`, but in `get_raw` and `get_raw_mut`. // statics. However, the actual allocation is done not in
// `resolve_maybe_global_alloc`, but in `get_raw` and `get_raw_mut`.
// Since this function may get called before `get_raw`, we need to allow // Since this function may get called before `get_raw`, we need to allow
// Miri to retrieve the information about the static for us. // Miri to retrieve the information about the static for us.
match M::resolve_maybe_global_alloc(self.tcx, &self.extra, id) { match M::resolve_maybe_global_alloc(self.tcx, &self.extra, id) {
......
...@@ -537,6 +537,7 @@ pub(super) fn eval_operands( ...@@ -537,6 +537,7 @@ pub(super) fn eval_operands(
} }
ty::ConstKind::Value(val_val) => val_val, ty::ConstKind::Value(val_val) => val_val,
}; };
let val_val = M::eval_maybe_thread_local_static_const(self, val_val)?;
// Other cases need layout. // Other cases need layout.
let layout = from_known_layout(self.tcx, layout, || self.layout_of(val.ty))?; let layout = from_known_layout(self.tcx, layout, || self.layout_of(val.ty))?;
let op = match val_val { let op = match val_val {
......
...@@ -662,7 +662,7 @@ pub fn eval_place( ...@@ -662,7 +662,7 @@ pub fn eval_place(
} }
local => PlaceTy { local => PlaceTy {
// This works even for dead/uninitialized locals; we check further when writing // This works even for dead/uninitialized locals; we check further when writing
place: Place::Local { frame: self.cur_frame(), local }, place: Place::Local { frame: self.frame_idx(), local },
layout: self.layout_of_local(self.frame(), local, None)?, layout: self.layout_of_local(self.frame(), local, None)?,
}, },
}; };
......
...@@ -60,10 +60,10 @@ pub fn step(&mut self) -> InterpResult<'tcx, bool> { ...@@ -60,10 +60,10 @@ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
let body = self.body(); let body = self.body();
let basic_block = &body.basic_blocks()[block]; let basic_block = &body.basic_blocks()[block];
let old_frames = self.cur_frame(); let old_frames = self.frame_idx();
if let Some(stmt) = basic_block.statements.get(stmt_id) { if let Some(stmt) = basic_block.statements.get(stmt_id) {
assert_eq!(old_frames, self.cur_frame()); assert_eq!(old_frames, self.frame_idx());
self.statement(stmt)?; self.statement(stmt)?;
return Ok(true); return Ok(true);
} }
...@@ -71,7 +71,7 @@ pub fn step(&mut self) -> InterpResult<'tcx, bool> { ...@@ -71,7 +71,7 @@ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
M::before_terminator(self)?; M::before_terminator(self)?;
let terminator = basic_block.terminator(); let terminator = basic_block.terminator();
assert_eq!(old_frames, self.cur_frame()); assert_eq!(old_frames, self.frame_idx());
self.terminator(terminator)?; self.terminator(terminator)?;
Ok(true) Ok(true)
} }
...@@ -84,7 +84,7 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { ...@@ -84,7 +84,7 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
// Some statements (e.g., box) push new stack frames. // Some statements (e.g., box) push new stack frames.
// We have to record the stack frame number *before* executing the statement. // We have to record the stack frame number *before* executing the statement.
let frame_idx = self.cur_frame(); let frame_idx = self.frame_idx();
match &stmt.kind { match &stmt.kind {
Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?, Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
......
...@@ -52,7 +52,7 @@ pub(super) fn eval_terminator( ...@@ -52,7 +52,7 @@ pub(super) fn eval_terminator(
} }
Call { ref func, ref args, destination, ref cleanup, .. } => { Call { ref func, ref args, destination, ref cleanup, .. } => {
let old_stack = self.cur_frame(); let old_stack = self.frame_idx();
let old_bb = self.frame().block; let old_bb = self.frame().block;
let func = self.eval_operand(func, None)?; let func = self.eval_operand(func, None)?;
let (fn_val, abi) = match func.layout.ty.kind { let (fn_val, abi) = match func.layout.ty.kind {
...@@ -80,7 +80,7 @@ pub(super) fn eval_terminator( ...@@ -80,7 +80,7 @@ pub(super) fn eval_terminator(
self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?; self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?;
// Sanity-check that `eval_fn_call` either pushed a new frame or // Sanity-check that `eval_fn_call` either pushed a new frame or
// did a jump to another block. // did a jump to another block.
if self.cur_frame() == old_stack && self.frame().block == old_bb { if self.frame_idx() == old_stack && self.frame().block == old_bb {
span_bug!(terminator.source_info.span, "evaluating this call made no progress"); span_bug!(terminator.source_info.span, "evaluating this call made no progress");
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册