diff --git a/src/memory.rs b/src/memory.rs index 71f4c329b5be0ca58b183d9fc3f04682f374b218..481744b11c4905b37053bb60258e4161b60ef716 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -194,7 +194,7 @@ pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointe } let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?; - self.write_bytes(PrimVal::Ptr(ptr), bytes)?; + self.write_bytes(ptr.into(), bytes)?; self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?; self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id); Ok(ptr) @@ -280,6 +280,7 @@ pub fn endianess(&self) -> layout::Endian { self.layout.endian } + /// Check that the pointer is aligned and non-NULL pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> { let offset = match ptr.into_inner_primval() { PrimVal::Ptr(ptr) => { @@ -532,13 +533,13 @@ pub fn leak_report(&self) -> usize { /// Byte accessors impl<'a, 'tcx> Memory<'a, 'tcx> { fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { - if size == 0 { - return Ok(&[]); - } - // FIXME: check alignment for zst memory accesses? + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL if self.reads_are_aligned { self.check_align(ptr.into(), align)?; } + if size == 0 { + return Ok(&[]); + } self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) let alloc = self.get(ptr.alloc_id)?; assert_eq!(ptr.offset as usize as u64, ptr.offset); @@ -548,13 +549,13 @@ fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> Eval } fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> { - if size == 0 { - return Ok(&mut []); - } - // FIXME: check alignment for zst memory accesses? + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL if self.writes_are_aligned { self.check_align(ptr.into(), align)?; } + if size == 0 { + return Ok(&mut []); + } self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) let alloc = self.get_mut(ptr.alloc_id)?; assert_eq!(ptr.offset as usize as u64, ptr.offset); @@ -643,7 +644,13 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> { if size == 0 { - // TODO: Should we check for alignment here? (Also see write_bytes intrinsic) + // Empty accesses don't need to be valid pointers, but they should still be aligned + if self.reads_are_aligned { + self.check_align(src, align)?; + } + if self.writes_are_aligned { + self.check_align(dest, align)?; + } return Ok(()); } let src = src.to_ptr()?; @@ -695,13 +702,21 @@ pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> { if size == 0 { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + if self.reads_are_aligned { + self.check_align(ptr, 1)?; + } return Ok(&[]); } self.get_bytes(ptr.to_ptr()?, size, 1) } - pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> { + pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> { if src.is_empty() { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + if self.writes_are_aligned { + self.check_align(ptr, 1)?; + } return Ok(()); } let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?; @@ -711,6 +726,10 @@ pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> { pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> { if count == 0 { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + if self.writes_are_aligned { + self.check_align(ptr, 1)?; + } return Ok(()); } let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; diff --git a/src/terminator/intrinsic.rs b/src/terminator/intrinsic.rs index da45d7b410a6095eddd4975ecf9ab92c288f73b4..2be5b7666f0517cb09b7fce65243b3b9c6ffbd57 100644 --- a/src/terminator/intrinsic.rs +++ b/src/terminator/intrinsic.rs @@ -143,11 +143,13 @@ pub(super) fn call_intrinsic( "copy_nonoverlapping" => { let elem_ty = substs.type_at(0); let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value"); - if elem_size != 0 { + let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; + if count * elem_size != 0 { + // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next. + // Also see the write_bytes intrinsic. let elem_align = self.type_align(elem_ty)?; let src = arg_vals[0].into_ptr(&mut self.memory)?; let dest = arg_vals[1].into_ptr(&mut self.memory)?; - let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?; } } @@ -465,7 +467,8 @@ pub(super) fn call_intrinsic( let ptr = arg_vals[0].into_ptr(&mut self.memory)?; let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; if count > 0 { - // TODO: Should we, at least, validate the alignment? (Also see memory::copy) + // HashMap relies on write_bytes on a NULL ptr with count == 0 to work + // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic) self.memory.check_align(ptr, ty_align)?; self.memory.write_repeat(ptr, val_byte, size * count)?; } diff --git a/src/terminator/mod.rs b/src/terminator/mod.rs index 54a988c566786d719c32fe043b441e9381965448..73511c3834115a7e9f8256dadf94b26865825cf6 100644 --- a/src/terminator/mod.rs +++ b/src/terminator/mod.rs @@ -814,8 +814,8 @@ fn call_c_abi( if let Some((name, value)) = new { // +1 for the null terminator let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env)?; - self.memory.write_bytes(PrimVal::Ptr(value_copy), &value)?; - self.memory.write_bytes(PrimVal::Ptr(value_copy.offset(value.len() as u64, self.memory.layout)?), &[0])?; + self.memory.write_bytes(value_copy.into(), &value)?; + self.memory.write_bytes(value_copy.offset(value.len() as u64, self.memory.layout)?.into(), &[0])?; if let Some(var) = self.env_vars.insert(name.to_owned(), value_copy) { self.memory.deallocate(var, None, Kind::Env)?; } diff --git a/tests/compile-fail/unaligned_ptr_cast_zst.rs b/tests/compile-fail/unaligned_ptr_cast_zst.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc603840684e8b0369274e0e37d197a0b1ac313d --- /dev/null +++ b/tests/compile-fail/unaligned_ptr_cast_zst.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &2u16; + let x = x as *const _ as *const [u32; 0]; + // This must fail because alignment is violated. Test specifically for loading ZST. + let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required +} diff --git a/tests/run-pass-fullmir/vecs.rs b/tests/run-pass-fullmir/vecs.rs index fd3a00031d61e0cbfa817058e76b945abaa68796..776791bbc9b9ee1394231bb2080f4aaadee14155 100644 --- a/tests/run-pass-fullmir/vecs.rs +++ b/tests/run-pass-fullmir/vecs.rs @@ -24,6 +24,13 @@ fn vec_into_iter() -> u8 { .fold(0, |x, y| x + y) } +fn vec_into_iter_zst() -> usize { + vec![[0u64; 0], [0u64; 0]] + .into_iter() + .map(|x| x.len()) + .sum() +} + fn vec_reallocate() -> Vec { let mut v = vec![1, 2]; v.push(3); @@ -35,6 +42,7 @@ fn vec_reallocate() -> Vec { fn main() { assert_eq!(vec_reallocate().len(), 5); assert_eq!(vec_into_iter(), 30); + assert_eq!(vec_into_iter_zst(), 0); assert_eq!(make_vec().capacity(), 4); assert_eq!(make_vec_macro(), [1, 2]); assert_eq!(make_vec_macro_repeat(), [42; 5]);