Reflow and fixup comments

上级 37db3db5
......@@ -50,7 +50,8 @@
pub enum Lock {
NoLock,
WriteLock(DynamicLifetime),
/// This should never be empty -- that would be a read lock held and nobody there to release it...
/// This should never be empty -- that would be a read lock held and nobody
/// there to release it...
ReadLock(Vec<DynamicLifetime>),
}
......
......@@ -226,7 +226,8 @@ fn cast_from_int(
Ok(Scalar::Bits { bits: v, size: 4 })
},
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
// No alignment check needed for raw pointers.
// But we have to truncate to target ptr size.
RawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory.truncate_to_ptr(v).0 as u128,
......@@ -302,7 +303,8 @@ fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalRe
fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
// Casting to a reference or fn pointer is not permitted by rustc,
// no need to support it here.
RawPtr(_) |
Int(IntTy::Isize) |
Uint(UintTy::Usize) => Ok(ptr.into()),
......
......@@ -92,7 +92,8 @@ pub struct Frame<'mir, 'tcx: 'mir> {
pub return_place: Place,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
pub locals: IndexVec<mir::Local, LocalValue>,
......@@ -624,7 +625,8 @@ pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => {
if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
// FIXME: to_ptr()? might be too extreme here,
// static zsts might reach this under certain conditions
self.memory.mark_static_initialized(
ptr.to_ptr()?.alloc_id,
mutable,
......
......@@ -243,11 +243,12 @@ pub fn deallocate(
let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id).expect("alloc_map out of sync with alloc_kind");
// It is okay for us to still holds locks on deallocation -- for example, we could store data we own
// in a local, and the local could be deallocated (from StorageDead) before the function returns.
// However, we should check *something*. For now, we make sure that there is no conflicting write
// lock by another frame. We *have* to permit deallocation if we hold a read lock.
// TODO: Figure out the exact rules here.
// It is okay for us to still holds locks on deallocation -- for example, we could store
// data we own in a local, and the local could be deallocated (from StorageDead) before the
// function returns. However, we should check *something*. For now, we make sure that there
// is no conflicting write lock by another frame. We *have* to permit deallocation if we
// hold a read lock.
// FIXME: Figure out the exact rules here.
M::free_lock(self, ptr.alloc_id, alloc.bytes.len() as u64)?;
if alloc_kind != kind {
......@@ -521,13 +522,15 @@ fn get_bytes_unchecked(
size: Size,
align: Align,
) -> EvalResult<'tcx, &[u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
// Zero-sized accesses can use dangling pointers,
// but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align)?;
if size.bytes() == 0 {
return Ok(&[]);
}
M::check_locks(self, ptr, size, AccessKind::Read)?;
self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds(ptr.offset(size, self)?, true)?;
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
......@@ -542,13 +545,15 @@ fn get_bytes_unchecked_mut(
size: Size,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
// Zero-sized accesses can use dangling pointers,
// but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align)?;
if size.bytes() == 0 {
return Ok(&mut []);
}
M::check_locks(self, ptr, size, AccessKind::Write)?;
self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds(ptr.offset(size, &*self)?, true)?;
let alloc = self.get_mut(ptr.alloc_id)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
......@@ -774,14 +779,16 @@ pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<
/// Read a *non-ZST* scalar
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
// Make sure we don't read part of a pointer as a pointer
self.check_relocation_edges(ptr, size)?;
let endianness = self.endianness();
// get_bytes_unchecked tests alignment
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() {
// this inflates undefined bytes to the entire scalar, even if only a few bytes are undefined
// this inflates undefined bytes to the entire scalar,
// even if only a few bytes are undefined
return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
......
......@@ -119,8 +119,9 @@ pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
/// Extract the ptr part of the mplace
#[inline(always)]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
// At this point, we forget about the alignment information -- the place has been turned into a reference,
// and no matter where it came from, it now must be aligned.
// At this point, we forget about the alignment information --
// the place has been turned into a reference, and no matter where it came from,
// it now must be aligned.
self.to_scalar_ptr_align().0.to_ptr()
}
......@@ -582,9 +583,10 @@ fn write_value_to_mplace(
dest: MPlaceTy<'tcx>,
) -> EvalResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// Note that it is really important that the type here is the right one, and matches the type things are read at.
// In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
// correct if we never look at this data with the wrong type.
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
// wrong type.
// Nothing to do for ZSTs, other than checking alignment
if dest.layout.size.bytes() == 0 {
......
......@@ -108,8 +108,8 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
use rustc::mir::StatementKind::*;
// Some statements (e.g. box) push new stack frames. We have to record the stack frame number
// *before* executing the statement.
// Some statements (e.g. box) push new stack frames.
// We have to record the stack frame number *before* executing the statement.
let frame_idx = self.cur_frame();
self.tcx.span = stmt.source_info.span;
self.memory.tcx.span = stmt.source_info.span;
......
......@@ -24,9 +24,9 @@ pub(crate) fn drop_in_place(
target: BasicBlock,
) -> EvalResult<'tcx> {
trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
// We take the address of the object. This may well be unaligned, which is fine for us here.
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
// We take the address of the object. This may well be unaligned, which is fine for us
// here. However, unaligned accesses will probably make the actual drop implementation fail
// -- a problem shared by rustc.
let place = self.force_allocation(place)?;
let (instance, place) = match place.layout.ty.sty {
......
......@@ -185,15 +185,18 @@ pub(super) fn eval_terminator(
DropAndReplace { .. } => unimplemented!(),
Resume => unimplemented!(),
Abort => unimplemented!(),
FalseEdges { .. } => bug!("should have been eliminated by `simplify_branches` mir pass"),
FalseUnwind { .. } => bug!("should have been eliminated by `simplify_branches` mir pass"),
FalseEdges { .. } => bug!("should have been eliminated by\
`simplify_branches` mir pass"),
FalseUnwind { .. } => bug!("should have been eliminated by\
`simplify_branches` mir pass"),
Unreachable => return err!(Unreachable),
}
Ok(())
}
/// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`.
/// Decides whether it is okay to call the method with signature `real_sig`
/// using signature `sig`.
/// FIXME: This should take into account the platform-dependent ABI description.
fn check_sig_compat(
&mut self,
......@@ -207,7 +210,7 @@ fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool {
return match (&ty.sty, &real_ty.sty) {
// Permit changing the pointer type of raw pointers and references as well as
// mutability of raw pointers.
// TODO: Should not be allowed when fat pointers are involved.
// FIXME: Should not be allowed when fat pointers are involved.
(&ty::RawPtr(_), &ty::RawPtr(_)) => true,
(&ty::Ref(_, _, _), &ty::Ref(_, _, _)) => {
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
......
......@@ -265,7 +265,8 @@ pub fn validate_mplace(
if value.layout.ty.builtin_deref(false).is_some() {
trace!("Recursing below ptr {:#?}", value);
let ptr_place = self.ref_to_mplace(value)?;
// we have not encountered this pointer+layout combination before
// we have not encountered this pointer+layout
// combination before
if seen.insert(ptr_place) {
todo.push((ptr_place, path_clone_and_deref(path)));
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册