提交 9cc5d927 编写于 作者: O Oliver Schneider

Add constant for `Size::from_bytes(0)`

上级 a41dd6fc
......@@ -120,6 +120,10 @@ pub fn new(alloc_id: AllocId, offset: Size) -> Self {
MemoryPointer { alloc_id, offset }
}
pub fn zero(alloc_id: AllocId) -> Self {
MemoryPointer::new(alloc_id, Size::ZERO)
}
pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
MemoryPointer::new(
self.alloc_id,
......@@ -355,7 +359,7 @@ pub struct Allocation {
impl Allocation {
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
let mut undef_mask = UndefMask::new(Size::from_bytes(0));
let mut undef_mask = UndefMask::new(Size::ZERO);
undef_mask.grow(Size::from_bytes(slice.len() as u64), true);
Self {
bytes: slice.to_owned(),
......@@ -467,7 +471,7 @@ impl UndefMask {
pub fn new(size: Size) -> Self {
let mut m = UndefMask {
blocks: vec![],
len: Size::from_bytes(0),
len: Size::ZERO,
};
m.grow(size, false);
m
......
......@@ -231,7 +231,7 @@ fn layout_raw_uncached(self, ty: Ty<'tcx>)
LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0), b_offset],
offsets: vec![Size::ZERO, b_offset],
memory_index: vec![0, 1]
},
abi: Abi::ScalarPair(a, b),
......@@ -267,7 +267,7 @@ enum StructKind {
};
let mut sized = true;
let mut offsets = vec![Size::from_bytes(0); fields.len()];
let mut offsets = vec![Size::ZERO; fields.len()];
let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
let mut optimize = !repr.inhibit_struct_field_reordering_opt();
......@@ -307,7 +307,7 @@ enum StructKind {
// field 5 with offset 0 puts 0 in offsets[5].
// At the bottom of this function, we use inverse_memory_index to produce memory_index.
let mut offset = Size::from_bytes(0);
let mut offset = Size::ZERO;
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
if packed {
......@@ -503,7 +503,7 @@ enum StructKind {
fields: FieldPlacement::Union(0),
abi: Abi::Uninhabited,
align: dl.i8_align,
size: Size::from_bytes(0)
size: Size::ZERO
})
}
......@@ -575,7 +575,7 @@ enum StructKind {
},
abi: Abi::Aggregate { sized: false },
align: element.align,
size: Size::from_bytes(0)
size: Size::ZERO
})
}
ty::TyStr => {
......@@ -587,7 +587,7 @@ enum StructKind {
},
abi: Abi::Aggregate { sized: false },
align: dl.i8_align,
size: Size::from_bytes(0)
size: Size::ZERO
})
}
......@@ -696,7 +696,7 @@ enum StructKind {
Align::from_bytes(repr_align, repr_align).unwrap());
}
let mut size = Size::from_bytes(0);
let mut size = Size::ZERO;
for field in &variants[0] {
assert!(!field.is_unsized());
......@@ -908,7 +908,7 @@ enum StructKind {
let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
let mut align = dl.aggregate_align;
let mut size = Size::from_bytes(0);
let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large.
let mut start_align = Align::from_bytes(256, 256).unwrap();
......@@ -1078,7 +1078,7 @@ enum StructKind {
}
_ => bug!()
};
if pair_offsets[0] == Size::from_bytes(0) &&
if pair_offsets[0] == Size::ZERO &&
pair_offsets[1] == *offset &&
align == pair.align &&
size == pair.size {
......@@ -1099,7 +1099,7 @@ enum StructKind {
variants: layout_variants,
},
fields: FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0)],
offsets: vec![Size::ZERO],
memory_index: vec![0]
},
abi,
......@@ -1182,7 +1182,7 @@ fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
let build_variant_info = |n: Option<ast::Name>,
flds: &[ast::Name],
layout: TyLayout<'tcx>| {
let mut min_size = Size::from_bytes(0);
let mut min_size = Size::ZERO;
let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
match layout.field(self, i) {
Err(err) => {
......@@ -1567,7 +1567,7 @@ fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'t
fields: FieldPlacement::Union(fields),
abi: Abi::Uninhabited,
align: tcx.data_layout.i8_align,
size: Size::from_bytes(0)
size: Size::ZERO
})
}
......@@ -1746,19 +1746,19 @@ fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError
match layout.abi {
Abi::Scalar(ref scalar) => {
return Ok(scalar_niche(scalar, Size::from_bytes(0)));
return Ok(scalar_niche(scalar, Size::ZERO));
}
Abi::ScalarPair(ref a, ref b) => {
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum.
let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
.chain(iter::once((a, Size::from_bytes(0))))
.chain(iter::once((a, Size::ZERO)))
.filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
.max_by_key(|niche| niche.available);
return Ok(niche);
}
Abi::Vector { ref element, .. } => {
return Ok(scalar_niche(element, Size::from_bytes(0)));
return Ok(scalar_niche(element, Size::ZERO));
}
_ => {}
}
......
......@@ -454,7 +454,7 @@ fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
adjust_for_rust_scalar(&mut a_attrs,
a,
arg.layout,
Size::from_bytes(0),
Size::ZERO,
false);
adjust_for_rust_scalar(&mut b_attrs,
b,
......@@ -471,7 +471,7 @@ fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
adjust_for_rust_scalar(attrs,
scalar,
arg.layout,
Size::from_bytes(0),
Size::ZERO,
is_return);
}
}
......
......@@ -325,7 +325,7 @@ fn vec_slice_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
MemberDescription {
name: "data_ptr".to_string(),
type_metadata: data_ptr_metadata,
offset: Size::from_bytes(0),
offset: Size::ZERO,
size: pointer_size,
align: pointer_align,
flags: DIFlags::FlagZero,
......@@ -1074,7 +1074,7 @@ fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
MemberDescription {
name: f.name.to_string(),
type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::from_bytes(0),
offset: Size::ZERO,
size,
align,
flags: DIFlags::FlagZero,
......@@ -1158,7 +1158,7 @@ fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
MemberDescription {
name: "".to_string(),
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
flags: DIFlags::FlagZero
......@@ -1187,7 +1187,7 @@ fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
MemberDescription {
name: "".to_string(),
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
offset: Size::ZERO,
size: variant.size,
align: variant.align,
flags: DIFlags::FlagZero
......@@ -1248,7 +1248,7 @@ fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
MemberDescription {
name,
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
offset: Size::ZERO,
size: variant.size,
align: variant.align,
flags: DIFlags::FlagZero
......@@ -1747,7 +1747,7 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
name.as_ptr(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
Size::from_bytes(0).bits(),
Size::ZERO.bits(),
cx.tcx.data_layout.pointer_align.abi_bits() as u32,
DIFlags::FlagArtificial,
ptr::null_mut(),
......
......@@ -40,7 +40,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
if use_x86_mmx {
return Type::x86_mmx(cx)
} else {
let element = layout.scalar_llvm_type_at(cx, element, Size::from_bytes(0));
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
return Type::vector(&element, count);
}
}
......@@ -120,7 +120,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let field_count = layout.fields.count();
let mut packed = false;
let mut offset = Size::from_bytes(0);
let mut offset = Size::ZERO;
let mut prev_align = layout.align;
let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
......@@ -265,7 +265,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
);
FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to()
}
_ => self.scalar_llvm_type_at(cx, scalar, Size::from_bytes(0))
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
};
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty;
......@@ -372,7 +372,7 @@ fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
}
let offset = if index == 0 {
Size::from_bytes(0)
Size::ZERO
} else {
a.value.size(cx).abi_align(b.value.align(cx))
};
......
......@@ -21,7 +21,7 @@
use rustc::hir::map::blocks::FnLikeNode;
use rustc::middle::region;
use rustc::infer::InferCtxt;
use rustc::ty::layout::{IntegerExt, Size};
use rustc::ty::layout::IntegerExt;
use rustc::ty::subst::Subst;
use rustc::ty::{self, Ty, TyCtxt, layout};
use rustc::ty::subst::{Kind, Substs};
......@@ -182,7 +182,7 @@ pub fn const_eval_literal(
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
let ptr = MemoryPointer::new(id, Size::from_bytes(0));
let ptr = MemoryPointer::zero(id);
ConstValue::ByValPair(
PrimVal::Ptr(ptr),
PrimVal::from_u128(s.len() as u128),
......@@ -190,7 +190,7 @@ pub fn const_eval_literal(
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
let ptr = MemoryPointer::new(id, Size::from_bytes(0));
let ptr = MemoryPointer::zero(id);
ConstValue::ByVal(PrimVal::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)),
......
......@@ -22,7 +22,6 @@
use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability};
use rustc::mir::interpret::{PrimVal, GlobalId, ConstValue, Value};
use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region};
use rustc::ty::layout::Size;
use rustc::ty::subst::{Substs, Kind};
use rustc::hir::{self, PatKind, RangeEnd};
use rustc::hir::def::{Def, CtorKind};
......@@ -1123,7 +1122,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind,
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
let ptr = MemoryPointer::new(id, Size::from_bytes(0));
let ptr = MemoryPointer::zero(id);
ConstValue::ByValPair(
PrimVal::Ptr(ptr),
PrimVal::from_u128(s.len() as u128),
......@@ -1131,7 +1130,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind,
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
let ptr = MemoryPointer::new(id, Size::from_bytes(0));
let ptr = MemoryPointer::zero(id);
ConstValue::ByVal(PrimVal::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)),
......
......@@ -1019,7 +1019,7 @@ pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>, ty: Ty<'tcx>) -> Eva
.lock()
.intern_static(gid.instance.def_id());
let layout = self.layout_of(ty)?;
let ptr = MemoryPointer::new(alloc_id, Size::from_bytes(0));
let ptr = MemoryPointer::zero(alloc_id);
return Ok(Value::ByRef(ptr.into(), layout.align))
}
let cv = self.const_eval(gid)?;
......
......@@ -73,12 +73,12 @@ pub fn allocations<'x>(
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer {
let id = self.tcx.alloc_map.lock().create_fn_alloc(instance);
MemoryPointer::new(id, Size::from_bytes(0))
MemoryPointer::zero(id)
}
pub fn allocate_bytes(&mut self, bytes: &[u8]) -> MemoryPointer {
let id = self.tcx.allocate_bytes(bytes);
MemoryPointer::new(id, Size::from_bytes(0))
MemoryPointer::zero(id)
}
/// kind is `None` for statics
......@@ -110,7 +110,7 @@ pub fn allocate(
kind: Option<MemoryKind<M::MemoryKinds>>,
) -> EvalResult<'tcx, MemoryPointer> {
let id = self.allocate_value(Allocation::undef(size, align), kind)?;
Ok(MemoryPointer::new(id, Size::from_bytes(0)))
Ok(MemoryPointer::zero(id))
}
pub fn reallocate(
......@@ -448,7 +448,7 @@ pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
if !relocations.is_empty() {
msg.clear();
write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
let mut pos = Size::from_bytes(0);
let mut pos = Size::ZERO;
let relocation_width = (self.pointer_size().bytes() - 1) * 3;
for (i, target_id) in relocations {
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
......@@ -847,8 +847,8 @@ fn clear_relocations(&mut self, ptr: MemoryPointer, size: Size) -> EvalResult<'t
}
fn check_relocation_edges(&self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> {
let overlapping_start = self.relocations(ptr, Size::from_bytes(0))?.len();
let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::from_bytes(0))?.len();
let overlapping_start = self.relocations(ptr, Size::ZERO)?.len();
let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::ZERO)?.len();
if overlapping_start + overlapping_end != 0 {
return err!(ReadPointerAsBytes);
}
......
use rustc::mir;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, Value, PrimVal, EvalResult, Pointer, MemoryPointer};
......@@ -210,7 +210,7 @@ pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, P
};
let alloc = Machine::init_static(self, cid)?;
Place::Ptr {
ptr: MemoryPointer::new(alloc, Size::from_bytes(0)).into(),
ptr: MemoryPointer::zero(alloc).into(),
align: layout.align,
extra: PlaceExtra::None,
}
......
......@@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
}
......
......@@ -109,7 +109,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
abi::FieldPlacement::Arbitrary { .. } => {
// Structures are split up into a series of 64-bit integer chunks, but any aligned
// doubles not part of another aggregate are passed as floats.
let mut last_offset = Size::from_bytes(0);
let mut last_offset = Size::ZERO;
for i in 0..arg.layout.fields.count() {
let field = arg.layout.field(cx, i);
......
......@@ -83,7 +83,7 @@ impl ArgAttributes {
pub fn new() -> Self {
ArgAttributes {
regular: ArgAttribute::default(),
pointee_size: Size::from_bytes(0),
pointee_size: Size::ZERO,
pointee_align: None,
}
}
......@@ -206,7 +206,7 @@ impl From<Uniform> for CastTarget {
fn from(uniform: Uniform) -> CastTarget {
CastTarget {
prefix: [None; 8],
prefix_chunk: Size::from_bytes(0),
prefix_chunk: Size::ZERO,
rest: uniform
}
}
......@@ -274,7 +274,7 @@ fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
Abi::ScalarPair(..) |
Abi::Aggregate { .. } => {
let mut total = Size::from_bytes(0);
let mut total = Size::ZERO;
let mut result = None;
let is_union = match self.fields {
......
......@@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
}
......
......@@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
}
......
......@@ -101,7 +101,7 @@ fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
}
let mut cls = [None; MAX_EIGHTBYTES];
classify(cx, arg.layout, &mut cls, Size::from_bytes(0))?;
classify(cx, arg.layout, &mut cls, Size::ZERO)?;
if n > 2 {
if cls[0] != Some(Class::Sse) {
return Err(Memory);
......@@ -175,7 +175,7 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
target = CastTarget::pair(lo, hi);
}
}
assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None);
assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
target
}
......
......@@ -227,6 +227,8 @@ pub struct Size {
}
impl Size {
pub const ZERO: Size = Self::from_bytes(0);
pub fn from_bits(bits: u64) -> Size {
// Avoid potential overflow from `bits + 7`.
Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
......@@ -614,7 +616,7 @@ pub fn count(&self) -> usize {
pub fn offset(&self, i: usize) -> Size {
match *self {
FieldPlacement::Union(_) => Size::from_bytes(0),
FieldPlacement::Union(_) => Size::ZERO,
FieldPlacement::Array { stride, count } => {
let i = i as u64;
assert!(i < count);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册