提交 14cb8580 编写于 作者: R Ralf Jung

always test alignment in memory.rs

上级 f822ad5c
......@@ -194,7 +194,7 @@ pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointe
}
let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?;
self.write_bytes(PrimVal::Ptr(ptr), bytes)?;
self.write_bytes(ptr.into(), bytes)?;
self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?;
self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id);
Ok(ptr)
......@@ -280,6 +280,7 @@ pub fn endianess(&self) -> layout::Endian {
self.layout.endian
}
/// Check that the pointer is aligned and non-NULL
pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
let offset = match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
......@@ -532,13 +533,13 @@ pub fn leak_report(&self) -> usize {
/// Byte accessors
impl<'a, 'tcx> Memory<'a, 'tcx> {
fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
return Ok(&[]);
}
// FIXME: check alignment for zst memory accesses?
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
if self.reads_are_aligned {
self.check_align(ptr.into(), align)?;
}
if size == 0 {
return Ok(&[]);
}
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
......@@ -548,13 +549,13 @@ fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> Eval
}
fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
if size == 0 {
return Ok(&mut []);
}
// FIXME: check alignment for zst memory accesses?
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
if self.writes_are_aligned {
self.check_align(ptr.into(), align)?;
}
if size == 0 {
return Ok(&mut []);
}
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
......@@ -643,7 +644,13 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
if size == 0 {
// TODO: Should we check for alignment here? (Also see write_bytes intrinsic)
// Empty accesses don't need to be valid pointers, but they should still be aligned
if self.reads_are_aligned {
self.check_align(src, align)?;
}
if self.writes_are_aligned {
self.check_align(dest, align)?;
}
return Ok(());
}
let src = src.to_ptr()?;
......@@ -695,13 +702,21 @@ pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.reads_are_aligned {
self.check_align(ptr, 1)?;
}
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, 1)
}
pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> {
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
if src.is_empty() {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.writes_are_aligned {
self.check_align(ptr, 1)?;
}
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
......@@ -711,6 +726,10 @@ pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> {
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
if count == 0 {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.writes_are_aligned {
self.check_align(ptr, 1)?;
}
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
......
......@@ -143,11 +143,13 @@ pub(super) fn call_intrinsic(
"copy_nonoverlapping" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
if elem_size != 0 {
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
if count * elem_size != 0 {
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
// Also see the write_bytes intrinsic.
let elem_align = self.type_align(elem_ty)?;
let src = arg_vals[0].into_ptr(&mut self.memory)?;
let dest = arg_vals[1].into_ptr(&mut self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
}
}
......@@ -465,7 +467,8 @@ pub(super) fn call_intrinsic(
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
if count > 0 {
// TODO: Should we, at least, validate the alignment? (Also see memory::copy)
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
// TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
self.memory.check_align(ptr, ty_align)?;
self.memory.write_repeat(ptr, val_byte, size * count)?;
}
......
......@@ -813,8 +813,8 @@ fn call_c_abi(
if let Some((name, value)) = new {
// +1 for the null terminator
let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env)?;
self.memory.write_bytes(PrimVal::Ptr(value_copy), &value)?;
self.memory.write_bytes(PrimVal::Ptr(value_copy.offset(value.len() as u64, self.memory.layout)?), &[0])?;
self.memory.write_bytes(value_copy.into(), &value)?;
self.memory.write_bytes(value_copy.offset(value.len() as u64, self.memory.layout)?.into(), &[0])?;
if let Some(var) = self.env_vars.insert(name.to_owned(), value_copy) {
self.memory.deallocate(var, None, Kind::Env)?;
}
......
fn main() {
let x = &2u16;
let x = x as *const _ as *const [u32; 0];
// This must fail because alignment is violated. Test specifically for loading ZST.
let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required
}
......@@ -24,6 +24,13 @@ fn vec_into_iter() -> u8 {
.fold(0, |x, y| x + y)
}
fn vec_into_iter_zst() -> usize {
vec![[0u64; 0], [0u64; 0]]
.into_iter()
.map(|x| x.len())
.sum()
}
fn vec_reallocate() -> Vec<u8> {
let mut v = vec![1, 2];
v.push(3);
......@@ -35,6 +42,7 @@ fn vec_reallocate() -> Vec<u8> {
fn main() {
assert_eq!(vec_reallocate().len(), 5);
assert_eq!(vec_into_iter(), 30);
assert_eq!(vec_into_iter_zst(), 0);
assert_eq!(make_vec().capacity(), 4);
assert_eq!(make_vec_macro(), [1, 2]);
assert_eq!(make_vec_macro_repeat(), [42; 5]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册