提交 5b4747de 编写于 作者: E Eduard-Mihai Burtescu

rustc_target: avoid using AbiAndPrefAlign where possible.

上级 3ce8d444
......@@ -12,7 +12,7 @@
use super::{Pointer, EvalResult, AllocId};
use ty::layout::{Size, Align, AbiAndPrefAlign};
use ty::layout::{Size, Align};
use syntax::ast::Mutability;
use std::iter;
use mir;
......@@ -40,7 +40,7 @@ pub struct Allocation<Tag=(),Extra=()> {
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: AbiAndPrefAlign,
pub align: Align,
/// Whether the allocation is mutable.
/// Also used by codegen to determine if a static should be put into mutable memory,
/// which happens for `static mut` and `static` with interior mutability.
......@@ -90,7 +90,7 @@ impl AllocationExtra<()> for () {}
impl<Tag, Extra: Default> Allocation<Tag, Extra> {
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes(slice: &[u8], align: AbiAndPrefAlign) -> Self {
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
let mut undef_mask = UndefMask::new(Size::ZERO);
undef_mask.grow(Size::from_bytes(slice.len() as u64), true);
Self {
......@@ -104,10 +104,10 @@ pub fn from_bytes(slice: &[u8], align: AbiAndPrefAlign) -> Self {
}
pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self {
Allocation::from_bytes(slice, AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
}
pub fn undef(size: Size, align: AbiAndPrefAlign) -> Self {
pub fn undef(size: Size, align: Align) -> Self {
assert_eq!(size.bytes() as usize as u64, size.bytes());
Allocation {
bytes: vec![0; size.bytes() as usize],
......
......@@ -13,7 +13,7 @@
use hir::map::definitions::DefPathData;
use mir;
use ty::{self, Ty, layout};
use ty::layout::{Size, AbiAndPrefAlign, LayoutError};
use ty::layout::{Size, Align, LayoutError};
use rustc_target::spec::abi::Abi;
use super::{RawConst, Pointer, InboundsCheck, ScalarMaybeUndef};
......@@ -301,8 +301,8 @@ pub enum EvalErrorKind<'tcx, O> {
TlsOutOfBounds,
AbiViolation(String),
AlignmentCheckFailed {
required: AbiAndPrefAlign,
has: AbiAndPrefAlign,
required: Align,
has: Align,
},
ValidationFailure(String),
CalledClosureAsFunction,
......@@ -315,7 +315,7 @@ pub enum EvalErrorKind<'tcx, O> {
DeallocatedWrongMemoryKind(String, String),
ReallocateNonBasePtr,
DeallocateNonBasePtr,
IncorrectAllocationInformation(Size, Size, AbiAndPrefAlign, AbiAndPrefAlign),
IncorrectAllocationInformation(Size, Size, Align, Align),
Layout(layout::LayoutError<'tcx>),
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
......@@ -527,7 +527,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
has.abi.bytes(), required.abi.bytes()),
has.bytes(), required.bytes()),
TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty),
Layout(ref err) =>
......@@ -539,7 +539,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, \
got size {} and align {}",
size.bytes(), align.abi.bytes(), size2.bytes(), align2.abi.bytes()),
size.bytes(), align.bytes(), size2.bytes(), align2.bytes()),
Panic { ref msg, line, col, ref file } =>
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
InvalidDiscriminant(val) =>
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc_target::abi::{AbiAndPrefAlign, Size};
use rustc_target::abi::{Align, Size};
use rustc_data_structures::fx::{FxHashSet};
use std::cmp::{self, Ordering};
......@@ -63,7 +63,7 @@ impl CodeStats {
pub fn record_type_size<S: ToString>(&mut self,
kind: DataTypeKind,
type_desc: S,
align: AbiAndPrefAlign,
align: Align,
overall_size: Size,
packed: bool,
opt_discr_size: Option<Size>,
......@@ -71,7 +71,7 @@ pub fn record_type_size<S: ToString>(&mut self,
let info = TypeSizeInfo {
kind,
type_description: type_desc.to_string(),
align: align.abi.bytes(),
align: align.bytes(),
overall_size: overall_size.bytes(),
packed: packed,
opt_discr_size: opt_discr_size.map(|s| s.bytes()),
......
......@@ -226,9 +226,10 @@ fn layout_raw_uncached(&self, ty: Ty<'tcx>)
tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
};
let scalar_pair = |a: Scalar, b: Scalar| {
let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
let size = (b_offset + b.value.size(dl)).abi_align(align);
let b_align = b.value.align(dl);
let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
let b_offset = a.value.size(dl).align_to(b_align.abi);
let size = (b_offset + b.value.size(dl)).align_to(align.abi);
LayoutDetails {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldPlacement::Arbitrary {
......@@ -248,7 +249,7 @@ enum StructKind {
/// A univariant, the last field of which may be coerced to unsized.
MaybeUnsized,
/// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
Prefixed(Size, AbiAndPrefAlign),
Prefixed(Size, Align),
}
let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
......@@ -257,10 +258,7 @@ enum StructKind {
bug!("struct cannot be packed and aligned");
}
let pack = {
let pack = repr.pack as u64;
AbiAndPrefAlign::new(Align::from_bytes(pack).unwrap())
};
let pack = Align::from_bytes(repr.pack as u64).unwrap();
let mut align = if packed {
dl.i8_align
......@@ -274,7 +272,7 @@ enum StructKind {
let mut optimize = !repr.inhibit_struct_field_reordering_opt();
if let StructKind::Prefixed(_, align) = kind {
optimize &= align.abi.bytes() == 1;
optimize &= align.bytes() == 1;
}
if optimize {
......@@ -285,7 +283,7 @@ enum StructKind {
};
let optimizing = &mut inverse_memory_index[..end];
let field_align = |f: &TyLayout<'_>| {
if packed { f.align.min(pack).abi } else { f.align.abi }
if packed { f.align.abi.min(pack) } else { f.align.abi }
};
match kind {
StructKind::AlwaysSized |
......@@ -312,13 +310,13 @@ enum StructKind {
let mut offset = Size::ZERO;
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
if packed {
let prefix_align = prefix_align.min(pack);
align = align.max(prefix_align);
let prefix_align = if packed {
prefix_align.min(pack)
} else {
align = align.max(prefix_align);
}
offset = prefix_size.abi_align(prefix_align);
prefix_align
};
align = align.max(AbiAndPrefAlign::new(prefix_align));
offset = prefix_size.align_to(prefix_align);
}
for &i in &inverse_memory_index {
......@@ -333,15 +331,13 @@ enum StructKind {
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
if packed {
let field_pack = field.align.min(pack);
offset = offset.abi_align(field_pack);
align = align.max(field_pack);
}
else {
offset = offset.abi_align(field.align);
align = align.max(field.align);
}
let field_align = if packed {
field.align.min(AbiAndPrefAlign::new(pack))
} else {
field.align
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i as usize] = offset;
......@@ -377,7 +373,7 @@ enum StructKind {
memory_index = inverse_memory_index;
}
let size = min_size.abi_align(align);
let size = min_size.align_to(align.abi);
let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs.
......@@ -648,7 +644,7 @@ enum StructKind {
let size = element.size.checked_mul(count, dl)
.ok_or(LayoutError::SizeOverflow(ty))?;
let align = dl.vector_align(size);
let size = size.abi_align(align);
let size = size.align_to(align.abi);
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: VariantIdx::new(0) },
......@@ -680,10 +676,7 @@ enum StructKind {
bug!("Union cannot be packed and aligned");
}
let pack = {
let pack = def.repr.pack as u64;
AbiAndPrefAlign::new(Align::from_bytes(pack).unwrap())
};
let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
let mut align = if packed {
dl.i8_align
......@@ -704,12 +697,12 @@ enum StructKind {
for field in &variants[index] {
assert!(!field.is_unsized());
if packed {
let field_pack = field.align.min(pack);
align = align.max(field_pack);
let field_align = if packed {
field.align.min(AbiAndPrefAlign::new(pack))
} else {
align = align.max(field.align);
}
field.align
};
align = align.max(field_align);
// If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() {
......@@ -749,7 +742,7 @@ enum StructKind {
fields: FieldPlacement::Union(variants[index].len()),
abi,
align,
size: size.abi_align(align)
size: size.align_to(align.abi)
}));
}
......@@ -964,19 +957,19 @@ enum StructKind {
let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large.
let mut start_align = AbiAndPrefAlign::new(Align::from_bytes(256).unwrap());
assert_eq!(Integer::for_abi_align(dl, start_align), None);
let mut start_align = Align::from_bytes(256).unwrap();
assert_eq!(Integer::for_align(dl, start_align), None);
// repr(C) on an enum tells us to make a (tag, union) layout,
// so we need to grow the prefix alignment to be at least
// the alignment of the union. (This value is used both for
// determining the alignment of the overall enum, and the
// determining the alignment of the payload after the tag.)
let mut prefix_align = min_ity.align(dl);
let mut prefix_align = min_ity.align(dl).abi;
if def.repr.c() {
for fields in &variants {
for field in fields {
prefix_align = prefix_align.max(field.align);
prefix_align = prefix_align.max(field.align.abi);
}
}
}
......@@ -990,7 +983,7 @@ enum StructKind {
// to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
if !field.is_zst() || field.align.abi.bytes() != 1 {
start_align = start_align.min(field.align);
start_align = start_align.min(field.align.abi);
break;
}
}
......@@ -1000,7 +993,7 @@ enum StructKind {
}).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
// Align the maximum variant size to the largest alignment.
size = size.abi_align(align);
size = size.align_to(align.abi);
if size.bytes() >= dl.obj_size_bound() {
return Err(LayoutError::SizeOverflow(ty));
......@@ -1036,7 +1029,7 @@ enum StructKind {
let mut ity = if def.repr.c() || def.repr.int.is_some() {
min_ity
} else {
Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
Integer::for_align(dl, start_align).unwrap_or(min_ity)
};
// If the alignment is not larger than the chosen discriminant size,
......@@ -1204,7 +1197,7 @@ fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
let type_desc = format!("{:?}", layout.ty);
self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
type_desc,
layout.align,
layout.align.abi,
layout.size,
packed,
opt_discr_size,
......@@ -1823,7 +1816,9 @@ fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutErro
Abi::ScalarPair(ref a, ref b) => {
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum.
let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
let niche = iter::once(
(b, a.value.size(self).align_to(b.value.align(self).abi))
)
.chain(iter::once((a, Size::ZERO)))
.filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
.max_by_key(|niche| niche.available);
......
......@@ -73,7 +73,7 @@ fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn,
idx.as_uint(),
align.abi.bytes() as u32);
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
}
......@@ -98,7 +98,7 @@ fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
idx.as_uint(),
align.abi.bytes() as u32);
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
}
......@@ -204,7 +204,7 @@ fn store(
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode {
......@@ -214,7 +214,7 @@ fn store(
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
......@@ -242,7 +242,7 @@ fn store(
// ...and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align,
self.layout.align.abi,
llscratch,
scratch_align,
bx.cx().const_usize(self.layout.size.bytes()),
......@@ -273,7 +273,7 @@ fn store_fn_arg(
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect(_, Some(_)) => {
OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
self.store(bx, next(), dst);
......@@ -545,7 +545,7 @@ fn new_internal(
adjust_for_rust_scalar(&mut b_attrs,
b,
arg.layout,
a.value.size(cx).abi_align(b.value.align(cx)),
a.value.size(cx).align_to(b.value.align(cx).abi),
false);
arg.mode = PassMode::Pair(a_attrs, b_attrs);
return arg;
......
......@@ -19,7 +19,7 @@
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, Size, TyLayout};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
......@@ -457,7 +457,7 @@ fn not(&mut self, v: &'ll Value) -> &'ll Value {
}
}
fn alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
......@@ -465,7 +465,7 @@ fn alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll
bx.dynamic_alloca(ty, name, align)
}
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
......@@ -475,7 +475,7 @@ fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign)
llvm::LLVMBuildAlloca(self.llbuilder, ty,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
......@@ -484,7 +484,7 @@ fn array_alloca(&mut self,
ty: &'ll Type,
len: &'ll Value,
name: &str,
align: AbiAndPrefAlign) -> &'ll Value {
align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
......@@ -494,16 +494,16 @@ fn array_alloca(&mut self,
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
fn load(&mut self, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetAlignment(load, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}
......@@ -639,7 +639,7 @@ fn nonnull_metadata(&mut self, load: &'ll Value) {
}
}
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
......@@ -647,7 +647,7 @@ fn store_with_flags(
&mut self,
val: &'ll Value,
ptr: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
......@@ -658,7 +658,7 @@ fn store_with_flags(
let align = if flags.contains(MemFlags::UNALIGNED) {
1
} else {
align.abi.bytes() as c_uint
align.bytes() as c_uint
};
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
......@@ -878,8 +878,8 @@ fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr,
}
}
fn memcpy(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
......@@ -893,13 +893,13 @@ fn memcpy(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
fn memmove(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
......@@ -913,8 +913,8 @@ fn memmove(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
......@@ -923,14 +923,14 @@ fn memset(
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
let align = self.cx().const_u32(align.abi.bytes() as u32);
let align = self.cx().const_u32(align.bytes() as u32);
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}
......
......@@ -357,7 +357,7 @@ fn from_const_alloc(
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, layout.align, None);
let base_addr = self.static_addr_of(init, layout.align.abi, None);
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),
......
......@@ -28,7 +28,7 @@
use rustc::ty::{self, Ty};
use rustc_codegen_ssa::traits::*;
use rustc::ty::layout::{self, Size, Align, AbiAndPrefAlign, LayoutOf};
use rustc::ty::layout::{self, Size, Align, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
......@@ -89,20 +89,20 @@ pub fn codegen_static_initializer(
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: AbiAndPrefAlign) {
mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(AbiAndPrefAlign::new(min)),
Ok(min) => align = align.max(min),
Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
}
}
}
unsafe {
llvm::LLVMSetAlignment(gv, align.abi.bytes() as u32);
llvm::LLVMSetAlignment(gv, align.bytes() as u32);
}
}
......@@ -186,7 +186,7 @@ fn static_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
fn static_addr_of_mut(
&self,
cv: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
unsafe {
......@@ -212,14 +212,14 @@ fn static_addr_of_mut(
fn static_addr_of(
&self,
cv: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.abi.bytes() as u32;
let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}
......
......@@ -35,7 +35,7 @@
use rustc::ty::Instance;
use common::CodegenCx;
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, HasDataLayout, Integer, IntegerExt, LayoutOf,
use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf,
PrimitiveExt, Size, TyLayout};
use rustc::session::config;
use rustc::util::nodemap::FxHashMap;
......@@ -323,7 +323,7 @@ fn fixed_vec_metadata(
llvm::LLVMRustDIBuilderCreateArrayType(
DIB(cx),
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
element_type_metadata,
subscripts)
};
......@@ -465,7 +465,7 @@ fn trait_pointer_metadata(
syntax_pos::DUMMY_SP),
offset: layout.fields.offset(0),
size: data_ptr_field.size,
align: data_ptr_field.align,
align: data_ptr_field.align.abi,
flags: DIFlags::FlagArtificial,
discriminant: None,
},
......@@ -474,7 +474,7 @@ fn trait_pointer_metadata(
type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
offset: layout.fields.offset(1),
size: vtable_field.size,
align: vtable_field.align,
align: vtable_field.align.abi,
flags: DIFlags::FlagArtificial,
discriminant: None,
},
......@@ -787,7 +787,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
DIB(cx),
name.as_ptr(),
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
encoding)
};
......@@ -818,7 +818,7 @@ fn pointer_type_metadata(
DIB(cx),
pointee_type_metadata,
pointer_size.bits(),
pointer_align.abi.bits() as u32,
pointer_align.bits() as u32,
name.as_ptr())
}
}
......@@ -923,7 +923,7 @@ struct MemberDescription<'ll> {
type_metadata: &'ll DIType,
offset: Size,
size: Size,
align: AbiAndPrefAlign,
align: Align,
flags: DIFlags,
discriminant: Option<u64>,
}
......@@ -990,7 +990,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
type_metadata: type_metadata(cx, field.ty, self.span),
offset: layout.fields.offset(i),
size: field.size,
align: field.align,
align: field.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
......@@ -1113,7 +1113,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::ZERO,
size: field.size,
align: field.align,
align: field.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
......@@ -1226,7 +1226,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
......@@ -1265,7 +1265,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: Some(self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(cx.tcx, i)
......@@ -1334,7 +1334,7 @@ fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: variant.size,
align: variant.align,
align: variant.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
......@@ -1372,7 +1372,7 @@ fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: niche_value,
}
......@@ -1675,7 +1675,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
layout.fields.offset(0).bits(),
DIFlags::FlagArtificial,
discr_metadata))
......@@ -1803,7 +1803,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
member_description.size.bits(),
member_description.align.abi.bits() as u32,
member_description.align.bits() as u32,
member_description.offset.bits(),
match member_description.discriminant {
None => None,
......@@ -1851,7 +1851,7 @@ fn create_struct_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
struct_size.bits(),
struct_align.abi.bits() as u32,
struct_align.bits() as u32,
DIFlags::FlagZero,
None,
empty_array,
......@@ -1889,7 +1889,7 @@ fn create_union_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
union_size.bits(),
union_align.abi.bits() as u32,
union_align.bits() as u32,
DIFlags::FlagZero,
Some(empty_array),
0, // RuntimeLang
......@@ -1958,7 +1958,7 @@ pub fn create_global_var_metadata(
is_local_to_unit,
global,
None,
global_align.abi.bytes() as u32,
global_align.bytes() as u32,
);
}
}
......
......@@ -201,7 +201,7 @@ fn declare_local(
cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero,
argument_index,
align.abi.bytes() as u32,
align.bytes() as u32,
)
};
source_loc::set_debug_location(self,
......
......@@ -110,7 +110,7 @@ fn codegen_intrinsic_call(
let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
let simple = get_simple_intrinsic(self.cx(), name);
let llval = match name {
......@@ -158,7 +158,7 @@ fn codegen_intrinsic_call(
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
......@@ -167,12 +167,12 @@ fn codegen_intrinsic_call(
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign
} else {
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).pref.bytes())
self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes())
}
"type_name" => {
let tp_ty = substs.type_at(0);
......@@ -261,7 +261,7 @@ fn codegen_intrinsic_call(
let align = if name == "unaligned_volatile_load" {
1
} else {
self.cx().align_of(tp_ty).abi.bytes() as u32
self.cx().align_of(tp_ty).bytes() as u32
};
unsafe {
llvm::LLVMSetAlignment(load, align);
......@@ -815,7 +815,7 @@ fn try_intrinsic(
) {
if bx.cx().sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.cx().sess()) {
codegen_msvc_try(bx, func, data, local_ptr, dest);
......@@ -890,7 +890,7 @@ fn codegen_msvc_try(
//
// More information can be found in libstd's seh.rs implementation.
let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
......@@ -906,7 +906,7 @@ fn codegen_msvc_try(
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align;
let i64_align = bx.tcx().data_layout.i64_align.abi;
let arg1 = catchpad.load(addr, i64_align);
let val1 = bx.cx().const_i32(1);
let gep1 = catchpad.inbounds_gep(addr, &[val1]);
......@@ -923,7 +923,7 @@ fn codegen_msvc_try(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
......@@ -982,7 +982,7 @@ fn codegen_gnu_try(
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
catch.store(ptr, bitcast, ptr_align);
catch.ret(bx.cx().const_i32(1));
......@@ -991,7 +991,7 @@ fn codegen_gnu_try(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
......@@ -1436,7 +1436,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
......@@ -1536,7 +1536,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
......
......@@ -12,7 +12,7 @@
use common::*;
use rustc::hir;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, AbiAndPrefAlign, LayoutOf, Size, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc_target::abi::FloatTy;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use type_::Type;
......@@ -80,7 +80,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
match layout.fields {
layout::FieldPlacement::Union(_) => {
let fill = cx.type_padding_filler(layout.size, layout.align);
let fill = cx.type_padding_filler(layout.size, layout.align.abi);
let packed = false;
match name {
None => {
......@@ -120,23 +120,23 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let mut packed = false;
let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align;
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
let effective_field_align = AbiAndPrefAlign::new(layout.align.abi
let effective_field_align = layout.align.abi
.min(field.align.abi)
.restrict_for_offset(target_offset));
packed |= effective_field_align.abi < field.align.abi;
.restrict_for_offset(target_offset);
packed |= effective_field_align < field.align.abi;
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}",
i, field, offset, target_offset, effective_field_align.abi.bytes());
i, field, offset, target_offset, effective_field_align.bytes());
assert!(target_offset >= offset);
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler( padding, padding_align));
debug!(" padding before: {:?}", padding);
......@@ -151,7 +151,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
let padding = layout.size - offset;
let padding_align = prev_effective_align;
assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
assert_eq!(offset.align_to(padding_align) + padding, layout.size);
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size);
result.push(cx.type_padding_filler(padding, padding_align));
......@@ -165,17 +165,17 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> AbiAndPrefAlign {
self.layout_of(ty).align
pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
self.layout_of(ty).align.abi
}
pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
self.layout_of(ty).size
}
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, AbiAndPrefAlign) {
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
let layout = self.layout_of(ty);
(layout.size, layout.align)
(layout.size, layout.align.abi)
}
}
......@@ -197,7 +197,7 @@ pub enum PointerKind {
#[derive(Copy, Clone)]
pub struct PointeeInfo {
pub size: Size,
pub align: AbiAndPrefAlign,
pub align: Align,
pub safe: Option<PointerKind>,
}
......@@ -333,7 +333,7 @@ fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
layout::Pointer => {
// If we know the alignment, pick something better than i8.
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_abi_align( pointee.align)
cx.type_pointee_for_align(pointee.align)
} else {
cx.type_i8()
};
......@@ -377,7 +377,7 @@ fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
let offset = if index == 0 {
Size::ZERO
} else {
a.value.size(cx).abi_align(b.value.align(cx))
a.value.size(cx).align_to(b.value.align(cx).abi)
};
self.scalar_llvm_type_at(cx, scalar, offset)
}
......
......@@ -31,7 +31,7 @@
use rustc::middle::weak_lang_items;
use rustc::mir::mono::{Stats, CodegenUnitNameBuilder};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::util::common::{time, print_time_passes_entry};
......@@ -410,9 +410,9 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
dst: Bx::Value,
dst_align: AbiAndPrefAlign,
dst_align: Align,
src: Bx::Value,
src_align: AbiAndPrefAlign,
src_align: Align,
layout: TyLayout<'tcx>,
flags: MemFlags,
) {
......
......@@ -41,7 +41,7 @@ pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
llvtable,
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
let ptr = bx.load(gep, ptr_align);
bx.nonnull_metadata(ptr);
......@@ -59,7 +59,7 @@ pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
debug!("get_int({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
let usize_align = bx.tcx().data_layout.pointer_align;
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
let ptr = bx.load(gep, usize_align);
// Vtable loads are invariant
......@@ -112,7 +112,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
].iter().cloned().chain(methods).collect();
let vtable_const = cx.const_struct(&components, false);
let align = cx.data_layout().pointer_align;
let align = cx.data_layout().pointer_align.abi;
let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
cx.create_vtable_metadata(ty, vtable);
......
......@@ -280,7 +280,7 @@ fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
scratch.llval
}
Ref(llval, _, align) => {
assert_eq!(align.abi, op.layout.align.abi,
assert_eq!(align, op.layout.align.abi,
"return place is unaligned!");
llval
}
......@@ -288,7 +288,7 @@ fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&cast_ty)
));
bx.load(addr, self.fn_ty.ret.layout.align)
bx.load(addr, self.fn_ty.ret.layout.align.abi)
}
};
bx.ret(llval);
......@@ -386,9 +386,9 @@ fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
let align = tcx.data_layout.aggregate_align.abi
.max(tcx.data_layout.i32_align.abi)
.max(tcx.data_layout.pointer_align.abi);
// Put together the arguments to the panic entry point.
let (lang_item, args) = match *msg {
......@@ -522,9 +522,9 @@ fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
let align = tcx.data_layout.aggregate_align.abi
.max(tcx.data_layout.i32_align.abi)
.max(tcx.data_layout.pointer_align.abi);
let str = format!(
"Attempted to instantiate uninhabited type {} using mem::{}",
......@@ -800,12 +800,12 @@ fn codegen_argument(
(scratch.llval, scratch.align, true)
}
_ => {
(op.immediate_or_packed_pair(bx), arg.layout.align, false)
(op.immediate_or_packed_pair(bx), arg.layout.align.abi, false)
}
}
}
Ref(llval, _, align) => {
if arg.is_indirect() && align.abi < arg.layout.align.abi {
if arg.is_indirect() && align < arg.layout.align.abi {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
......@@ -826,7 +826,7 @@ fn codegen_argument(
let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&ty))
);
llval = bx.load(addr, align.min(arg.layout.align));
llval = bx.load(addr, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
......@@ -1006,7 +1006,7 @@ fn make_return_dest(
self.codegen_place(bx, dest)
};
if fn_ret.is_indirect() {
if dest.align.abi < dest.layout.align.abi {
if dest.align < dest.layout.align.abi {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps),
......@@ -1062,7 +1062,7 @@ fn codegen_transmute_into(
let src = self.codegen_operand(bx, src);
let llty = bx.cx().backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
let align = src.layout.align.min(dst.layout.align);
let align = src.layout.align.abi.min(dst.align);
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
}
......
......@@ -304,7 +304,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = fx.cx.get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi))
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
......@@ -555,7 +555,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
} else if arg.is_unsized_indirect() {
// As the storage for the indirect argument lives during
// the whole function call, we just copy the fat pointer.
......
......@@ -11,7 +11,7 @@
use rustc::mir::interpret::{ConstValue, ErrorHandled};
use rustc::mir;
use rustc::ty;
use rustc::ty::layout::{self, Align, AbiAndPrefAlign, LayoutOf, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use base;
use MemFlags;
......@@ -33,7 +33,7 @@ pub enum OperandValue<V> {
/// to be valid for the operand's lifetime.
/// The second value, if any, is the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
Ref(V, Option<V>, AbiAndPrefAlign),
Ref(V, Option<V>, Align),
/// A single LLVM value.
Immediate(V),
/// A pair of immediate LLVM values. Used by fat pointers too.
......@@ -152,7 +152,7 @@ pub fn deref<Cx: CodegenMethods<'tcx, Value = V>>(
llval: llptr,
llextra,
layout,
align: layout.align,
align: layout.align.abi,
}
}
......@@ -228,7 +228,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
OperandValue::Immediate(a_llval)
} else {
assert_eq!(offset, a.value.size(bx.cx())
.abi_align(b.value.align(bx.cx())));
.align_to(b.value.align(bx.cx()).abi));
assert_eq!(field.size, b.value.size(bx.cx()));
OperandValue::Immediate(b_llval)
}
......@@ -348,8 +348,8 @@ pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
};
// FIXME: choose an appropriate alignment, or use dynamic align somehow
let max_align = AbiAndPrefAlign::new(Align::from_bits(128).unwrap());
let min_align = AbiAndPrefAlign::new(Align::from_bits(8).unwrap());
let max_align = Align::from_bits(128).unwrap();
let min_align = Align::from_bits(8).unwrap();
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
......@@ -470,7 +470,7 @@ pub fn codegen_operand(
bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
layout,
layout.align,
layout.align.abi,
))
})
}
......
......@@ -9,7 +9,7 @@
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use MemFlags;
......@@ -33,14 +33,14 @@ pub struct PlaceRef<'tcx, V> {
pub layout: TyLayout<'tcx>,
/// What alignment we know for this place
pub align: AbiAndPrefAlign,
pub align: Align,
}
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(
llval: V,
layout: TyLayout<'tcx>,
align: AbiAndPrefAlign,
align: Align,
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
......@@ -58,8 +58,8 @@ pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
) -> Self {
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
Self::new_sized(tmp, layout, layout.align)
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
Self::new_sized(tmp, layout, layout.align.abi)
}
/// Returns a place for an indirect reference to an unsized place.
......@@ -101,7 +101,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
) -> Self {
let field = self.layout.field(bx.cx(), ix);
let offset = self.layout.fields.offset(ix);
let effective_field_align = self.align.abi.restrict_for_offset(offset);
let effective_field_align = self.align.restrict_for_offset(offset);
let mut simple = || {
// Unions and newtypes only use an offset of 0.
......@@ -109,7 +109,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self.llval
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx())));
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
bx.struct_gep(self.llval, 1)
} else {
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
......@@ -123,7 +123,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
None
},
layout: field,
align: AbiAndPrefAlign::new(effective_field_align),
align: effective_field_align,
}
};
......@@ -197,7 +197,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
llextra: self.llextra,
layout: field,
align: AbiAndPrefAlign::new(effective_field_align),
align: effective_field_align,
}
}
......@@ -418,13 +418,13 @@ pub fn codegen_place(
let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(bx.cx().backend_type(layout))
);
PlaceRef::new_sized(llval, layout, layout.align)
PlaceRef::new_sized(llval, layout, layout.align.abi)
}
}
}
mir::Place::Static(box mir::Static { def_id, ty }) => {
let layout = cx.layout_of(self.monomorphize(&ty));
PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align.abi)
},
mir::Place::Projection(box mir::Projection {
ref base,
......
......@@ -17,7 +17,7 @@
use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::ty::layout::{AbiAndPrefAlign, Size};
use rustc::ty::layout::{Align, Size};
use std::ffi::CStr;
use MemFlags;
......@@ -97,18 +97,17 @@ fn invoke(
fn fneg(&mut self, v: Self::Value) -> Self::Value;
fn not(&mut self, v: Self::Value) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, name: &str, align: AbiAndPrefAlign) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: AbiAndPrefAlign)
-> Self::Value;
fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
fn array_alloca(
&mut self,
ty: Self::Type,
len: Self::Value,
name: &str,
align: AbiAndPrefAlign,
align: Align,
) -> Self::Value;
fn load(&mut self, ptr: Self::Value, align: AbiAndPrefAlign) -> Self::Value;
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
......@@ -117,12 +116,12 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
fn range_metadata(&mut self, load: Self::Value, range: Range<u128>);
fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: AbiAndPrefAlign) -> Self::Value;
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store_with_flags(
&mut self,
val: Self::Value,
ptr: Self::Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
) -> Self::Value;
fn atomic_store(
......@@ -175,18 +174,18 @@ fn inline_asm_call(
fn memcpy(
&mut self,
dst: Self::Value,
dst_align: AbiAndPrefAlign,
dst_align: Align,
src: Self::Value,
src_align: AbiAndPrefAlign,
src_align: Align,
size: Self::Value,
flags: MemFlags,
);
fn memmove(
&mut self,
dst: Self::Value,
dst_align: AbiAndPrefAlign,
dst_align: Align,
src: Self::Value,
src_align: AbiAndPrefAlign,
src_align: Align,
size: Self::Value,
flags: MemFlags,
);
......@@ -195,7 +194,7 @@ fn memset(
ptr: Self::Value,
fill_byte: Self::Value,
size: Self::Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
);
......
......@@ -10,23 +10,13 @@
use super::Backend;
use rustc::hir::def_id::DefId;
use rustc::ty::layout::AbiAndPrefAlign;
use rustc::ty::layout::Align;
pub trait StaticMethods<'tcx>: Backend<'tcx> {
fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn static_addr_of_mut(
&self,
cv: Self::Value,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> Self::Value;
fn static_addr_of(
&self,
cv: Self::Value,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> Self::Value;
fn static_addr_of_mut(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
fn get_static(&self, def_id: DefId) -> Self::Value;
fn codegen_static(&self, def_id: DefId, is_mutable: bool);
unsafe fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value);
......
......@@ -13,7 +13,7 @@
use super::HasCodegen;
use common::{self, TypeKind};
use mir::place::PlaceRef;
use rustc::ty::layout::{self, AbiAndPrefAlign, Size, TyLayout};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::{self, Ty};
use rustc::util::nodemap::FxHashMap;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
......@@ -120,16 +120,16 @@ fn type_from_integer(&self, i: layout::Integer) -> Self::Type {
}
}
fn type_pointee_for_abi_align(&self, align: AbiAndPrefAlign) -> Self::Type {
fn type_pointee_for_align(&self, align: Align) -> Self::Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = layout::Integer::approximate_abi_align(self, align);
let ity = layout::Integer::approximate_align(self, align);
self.type_from_integer(ity)
}
/// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
fn type_padding_filler(&self, size: Size, align: AbiAndPrefAlign) -> Self::Type {
let unit = layout::Integer::approximate_abi_align(self, align);
fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type {
let unit = layout::Integer::approximate_align(self, align);
let size = size.bytes();
let unit_size = unit.size().bytes();
assert_eq!(size % unit_size, 0);
......
......@@ -129,7 +129,7 @@ pub fn op_to_const<'tcx>(
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi >= align.abi);
assert!(alloc.align >= align);
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
......
......@@ -16,7 +16,7 @@
use rustc::hir::def::Def;
use rustc::mir;
use rustc::ty::layout::{
self, Size, AbiAndPrefAlign, HasDataLayout, LayoutOf, TyLayout
self, Size, Align, HasDataLayout, LayoutOf, TyLayout
};
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
......@@ -314,9 +314,9 @@ pub(super) fn size_and_align_of(
&self,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Size, AbiAndPrefAlign)>> {
) -> EvalResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
return Ok(Some((layout.size, layout.align)));
return Ok(Some((layout.size, layout.align.abi)));
}
match layout.ty.sty {
ty::Adt(..) | ty::Tuple(..) => {
......@@ -328,7 +328,7 @@ pub(super) fn size_and_align_of(
trace!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align;
let sized_align = layout.align.abi;
trace!(
"DST {} statically sized prefix size: {:?} align: {:?}",
layout.ty,
......@@ -381,7 +381,7 @@ pub(super) fn size_and_align_of(
//
// `(size + (align-1)) & -align`
Ok(Some((size.abi_align(align), align)))
Ok(Some((size.align_to(align), align)))
}
ty::Dynamic(..) => {
let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?;
......@@ -392,7 +392,7 @@ pub(super) fn size_and_align_of(
ty::Slice(_) | ty::Str => {
let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?;
let elem = layout.field(self, 0)?;
Ok(Some((elem.size * len, elem.align)))
Ok(Some((elem.size * len, elem.align.abi)))
}
ty::Foreign(_) => {
......@@ -406,7 +406,7 @@ pub(super) fn size_and_align_of(
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, Option<(Size, AbiAndPrefAlign)>> {
) -> EvalResult<'tcx, Option<(Size, Align)>> {
self.size_and_align_of(mplace.meta, mplace.layout)
}
......@@ -636,7 +636,7 @@ pub fn dump_place(&self, place: Place<M::PointerTag>) {
let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr {
Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.abi.bytes()).unwrap();
write!(msg, " by align({}) ref:", align.bytes()).unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
......@@ -665,7 +665,7 @@ pub fn dump_place(&self, place: Place<M::PointerTag>) {
Place::Ptr(mplace) => {
match mplace.ptr {
Scalar::Ptr(ptr) => {
trace!("by align({}) ref:", mplace.align.abi.bytes());
trace!("by align({}) ref:", mplace.align.bytes());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),
......
......@@ -21,7 +21,7 @@
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, AbiAndPrefAlign, TargetDataLayout, Size, HasDataLayout};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
......@@ -71,7 +71,7 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
/// To be able to compare pointers with NULL, and to check alignment for accesses
/// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
/// that do not exist any more.
dead_alloc_map: FxHashMap<AllocId, (Size, AbiAndPrefAlign)>,
dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
/// Lets us implement `HasDataLayout`, which is awfully convenient.
pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
......@@ -130,7 +130,7 @@ pub fn allocate_with(
pub fn allocate(
&mut self,
size: Size,
align: AbiAndPrefAlign,
align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?))
......@@ -140,9 +140,9 @@ pub fn reallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
old_size: Size,
old_align: AbiAndPrefAlign,
old_align: Align,
new_size: Size,
new_align: AbiAndPrefAlign,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
if ptr.offset.bytes() != 0 {
......@@ -179,7 +179,7 @@ pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'t
pub fn deallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
size_and_align: Option<(Size, AbiAndPrefAlign)>,
size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
......@@ -244,7 +244,7 @@ pub fn deallocate(
pub fn check_align(
&self,
ptr: Scalar<M::PointerTag>,
required_align: AbiAndPrefAlign
required_align: Align
) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr {
......@@ -268,18 +268,18 @@ pub fn check_align(
}
};
// Check alignment
if alloc_align.abi < required_align.abi {
if alloc_align.bytes() < required_align.bytes() {
return err!(AlignmentCheckFailed {
has: alloc_align,
required: required_align,
});
}
if offset % required_align.abi.bytes() == 0 {
if offset % required_align.bytes() == 0 {
Ok(())
} else {
let has = offset % required_align.abi.bytes();
let has = offset % required_align.bytes();
err!(AlignmentCheckFailed {
has: AbiAndPrefAlign::new(Align::from_bytes(has).unwrap()),
has: Align::from_bytes(has).unwrap(),
required: required_align,
})
}
......@@ -443,22 +443,20 @@ pub fn get_mut(
}
}
pub fn get_size_and_align(&self, id: AllocId) -> (Size, AbiAndPrefAlign) {
pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) {
if let Ok(alloc) = self.get(id) {
return (Size::from_bytes(alloc.bytes.len() as u64), alloc.align);
}
// Could also be a fn ptr or extern static
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Function(..)) => {
(Size::ZERO, AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
}
Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()),
Some(AllocType::Static(did)) => {
// The only way `get` couldn't have worked here is if this is an extern static
assert!(self.tcx.is_foreign_item(did));
// Use size and align of the type
let ty = self.tcx.type_of(did);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
(layout.size, layout.align)
(layout.size, layout.align.abi)
}
_ => {
// Must be a deallocated pointer
......@@ -523,7 +521,7 @@ fn dump_alloc_helper<Tag, Extra>(
"{}({} bytes, alignment {}){}",
msg,
alloc.bytes.len(),
alloc.align.abi.bytes(),
alloc.align.bytes(),
extra
);
......@@ -624,7 +622,7 @@ fn get_bytes_internal(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: AbiAndPrefAlign,
align: Align,
check_defined_and_ptr: bool,
) -> EvalResult<'tcx, &[u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
......@@ -653,7 +651,7 @@ fn get_bytes(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: AbiAndPrefAlign
align: Align
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, true)
}
......@@ -665,7 +663,7 @@ fn get_bytes_with_undef_and_ptr(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: AbiAndPrefAlign
align: Align
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, false)
}
......@@ -676,7 +674,7 @@ fn get_bytes_mut(
&mut self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: AbiAndPrefAlign,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_align(ptr.into(), align)?;
......@@ -749,9 +747,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
src_align: AbiAndPrefAlign,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: AbiAndPrefAlign,
dest_align: Align,
size: Size,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
......@@ -761,9 +759,9 @@ pub fn copy(
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
src_align: AbiAndPrefAlign,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: AbiAndPrefAlign,
dest_align: Align,
size: Size,
length: u64,
nonoverlapping: bool,
......@@ -865,7 +863,7 @@ pub fn check_bytes(
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
......@@ -883,7 +881,7 @@ pub fn check_bytes(
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(&[]);
......@@ -893,7 +891,7 @@ pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
let align = Align::from_bytes(1).unwrap();
if src.is_empty() {
self.check_align(ptr, align)?;
return Ok(());
......@@ -910,7 +908,7 @@ pub fn write_repeat(
count: Size
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
let align = Align::from_bytes(1).unwrap();
if count.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
......@@ -926,7 +924,7 @@ pub fn write_repeat(
pub fn read_scalar(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: AbiAndPrefAlign,
ptr_align: Align,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
// get_bytes_unchecked tests alignment and relocation edges
......@@ -963,7 +961,7 @@ pub fn read_scalar(
pub fn read_ptr_sized(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: AbiAndPrefAlign
ptr_align: Align
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
......@@ -972,7 +970,7 @@ pub fn read_ptr_sized(
pub fn write_scalar(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: AbiAndPrefAlign,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>,
type_size: Size,
) -> EvalResult<'tcx> {
......@@ -1019,14 +1017,14 @@ pub fn write_scalar(
pub fn write_ptr_sized(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: AbiAndPrefAlign,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>
) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> AbiAndPrefAlign {
fn int_align(&self, size: Size) -> Align {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
let ity = match size.bytes() {
......@@ -1037,7 +1035,7 @@ fn int_align(&self, size: Size) -> AbiAndPrefAlign {
16 => layout::I128,
_ => bug!("bad integer size: {}", size.bytes()),
};
ity.align(self)
ity.align(self).abi
}
}
......
......@@ -285,7 +285,7 @@ pub(super) fn try_read_immediate_from_mplace(
let (a, b) = (&a.value, &b.value);
let (a_size, b_size) = (a.size(self), b.size(self));
let a_ptr = ptr;
let b_offset = a_size.abi_align(b.align(self));
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
......
......@@ -18,8 +18,7 @@
use rustc::hir;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Size, Align,
AbiAndPrefAlign, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use super::{
GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
......@@ -33,7 +32,7 @@ pub struct MemPlace<Tag=(), Id=AllocId> {
/// be turned back into a reference before ever being dereferenced.
/// However, it may never be undef.
pub ptr: Scalar<Tag, Id>,
pub align: AbiAndPrefAlign,
pub align: Align,
/// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g. `extern type`).
......@@ -117,7 +116,7 @@ pub fn erase_tag(self) -> MemPlace
}
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: AbiAndPrefAlign) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
MemPlace {
ptr,
align,
......@@ -128,17 +127,16 @@ pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: AbiAndPrefAlign) -> Self {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx),
AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1).unwrap())
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: AbiAndPrefAlign) -> Self {
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Self::from_scalar_ptr(ptr.into(), align)
}
#[inline(always)]
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, AbiAndPrefAlign) {
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
assert!(self.meta.is_none());
(self.ptr, self.align)
}
......@@ -170,7 +168,7 @@ pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
MPlaceTy {
mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()),
layout.align
layout.align.abi
),
layout
}
......@@ -178,7 +176,7 @@ pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
#[inline]
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout }
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
}
#[inline]
......@@ -232,12 +230,12 @@ pub fn null(cx: &impl HasDataLayout) -> Self {
}
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: AbiAndPrefAlign) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
Place::Ptr(MemPlace::from_scalar_ptr(ptr, align))
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: AbiAndPrefAlign) -> Self {
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Place::Ptr(MemPlace::from_ptr(ptr, align))
}
......@@ -251,7 +249,7 @@ pub fn to_mem_place(self) -> MemPlace<Tag> {
}
#[inline]
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, AbiAndPrefAlign) {
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
self.to_mem_place().to_scalar_ptr_align()
}
......@@ -289,7 +287,7 @@ pub fn ref_to_mplace(
let mplace = MemPlace {
ptr: val.to_scalar_ptr()?,
align: layout.align,
align: layout.align.abi,
meta: val.to_meta()?,
};
Ok(MPlaceTy { mplace, layout })
......@@ -358,11 +356,11 @@ pub fn mplace_field(
// FIXME: Once we have made decisions for how to handle size and alignment
// of `extern type`, this should be adapted. It is just a temporary hack
// to get some code to work that probably ought to work.
field_layout.align,
field_layout.align.abi,
None =>
bug!("Cannot compute offset for extern type field at non-0 offset"),
};
(base.meta, offset.abi_align(align))
(base.meta, offset.align_to(align))
} else {
// base.meta could be present; we might be accessing a sized field of an unsized
// struct.
......@@ -370,10 +368,10 @@ pub fn mplace_field(
};
let ptr = base.ptr.ptr_offset(offset, self)?;
let align = AbiAndPrefAlign::new(base.align.abi
let align = base.align
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
.restrict_for_offset(offset)); // must be last thing that happens
.restrict_for_offset(offset); // must be last thing that happens
Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout })
}
......@@ -732,7 +730,7 @@ fn write_immediate_to_mplace_no_validate(
}
self.memory.write_scalar(
ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size
ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size
)
}
Immediate::ScalarPair(a_val, b_val) => {
......@@ -742,8 +740,8 @@ fn write_immediate_to_mplace_no_validate(
dest.layout)
};
let (a_size, b_size) = (a.size(self), b.size(self));
let (a_align, b_align) = (a.align(self), b.align(self));
let b_offset = a_size.abi_align(b_align);
let (a_align, b_align) = (a.align(self).abi, b.align(self).abi);
let b_offset = a_size.align_to(b_align);
let b_ptr = ptr.offset(b_offset, self)?.into();
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
......@@ -901,7 +899,7 @@ pub fn allocate(
// FIXME: What should we do here? We should definitely also tag!
Ok(MPlaceTy::dangling(layout, self))
} else {
let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
let ptr = M::tag_new_allocation(self, ptr, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
}
......@@ -1001,7 +999,7 @@ pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(size, layout.size);
// only ABI alignment is preserved
assert_eq!(align.abi, layout.align.abi);
assert_eq!(align, layout.align.abi);
}
let mplace = MPlaceTy {
......
......@@ -16,7 +16,7 @@
};
use rustc::ty::{self, TyCtxt};
use rustc::ty::layout::AbiAndPrefAlign;
use rustc::ty::layout::Align;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
......@@ -276,7 +276,7 @@ struct AllocationSnapshot<'a> {
bytes: &'a [u8],
relocations: Relocations<(), AllocIdSnapshot<'a>>,
undef_mask: &'a UndefMask,
align: &'a AbiAndPrefAlign,
align: &'a Align,
mutability: &'a Mutability,
}
......
......@@ -401,7 +401,7 @@ fn eval_fn_call(
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let ptr_align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.deref_operand(args[0])?;
let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized(
......
......@@ -9,7 +9,7 @@
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, AbiAndPrefAlign, LayoutOf};
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use super::{EvalContext, Machine, MemoryKind};
......@@ -45,7 +45,7 @@ pub fn get_vtable(
let align = layout.align.abi.bytes();
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let ptr_align = self.tcx.data_layout.pointer_align.abi;
// /////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in rust_codegen_llvm/meth.rs
......@@ -87,7 +87,7 @@ pub fn read_drop_type_from_vtable(
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
let drop_instance = self.memory.get_fn(drop_fn)?;
trace!("Found drop fn: {:?}", drop_instance);
......@@ -101,15 +101,15 @@ pub fn read_drop_type_from_vtable(
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, AbiAndPrefAlign)> {
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), AbiAndPrefAlign::new(Align::from_bytes(align).unwrap())))
Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
}
}
......@@ -13,7 +13,7 @@
use std::ops::RangeInclusive;
use syntax_pos::symbol::Symbol;
use rustc::ty::layout::{self, Size, Align, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx};
use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx};
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
......@@ -355,7 +355,7 @@ fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> EvalResult<'t
// for the purpose of validity, consider foreign types to have
// alignment and size determined by the layout (size will be 0,
// alignment should take attributes into account).
.unwrap_or_else(|| (layout.size, layout.align));
.unwrap_or_else(|| (layout.size, layout.align.abi));
match self.ecx.memory.check_align(ptr, align) {
Ok(_) => {},
Err(err) => {
......@@ -463,7 +463,7 @@ fn visit_scalar(
// for function pointers.
let non_null =
self.ecx.memory.check_align(
Scalar::Ptr(ptr), AbiAndPrefAlign::new(Align::from_bytes(1).unwrap())
Scalar::Ptr(ptr), Align::from_bytes(1).unwrap()
).is_ok() ||
self.ecx.memory.get_fn(ptr).is_ok();
if !non_null {
......
......@@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {
unit: Reg::i32(),
total: size
});
if !offset.is_abi_aligned(align) {
if !offset.is_aligned(align) {
arg.pad_with(Reg::i32());
}
} else {
arg.extend_integer_width_to(32);
}
*offset = offset.abi_align(align) + size.abi_align(align);
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
......
......@@ -118,9 +118,9 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
// We only care about aligned doubles
if let abi::Abi::Scalar(ref scalar) = field.abi {
if let abi::Float(abi::FloatTy::F64) = scalar.value {
if offset.is_abi_aligned(dl.f64_align) {
if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_abi_aligned(dl.f64_align));
assert!(last_offset.is_aligned(dl.f64_align.abi));
for _ in 0..((offset - last_offset).bits() / 64)
.min((prefix.len() - prefix_index) as u64) {
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{self, Abi, AbiAndPrefAlign, FieldPlacement, Size};
use abi::{self, Abi, Align, FieldPlacement, Size};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
......@@ -80,7 +80,7 @@ pub struct ArgAttribute: u16 {
pub struct ArgAttributes {
pub regular: ArgAttribute,
pub pointee_size: Size,
pub pointee_align: Option<AbiAndPrefAlign>
pub pointee_align: Option<Align>
}
impl ArgAttributes {
......@@ -137,28 +137,28 @@ impl Reg {
}
impl Reg {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
let dl = cx.data_layout();
match self.kind {
RegKind::Integer => {
match self.size.bits() {
1 => dl.i1_align,
2..=8 => dl.i8_align,
9..=16 => dl.i16_align,
17..=32 => dl.i32_align,
33..=64 => dl.i64_align,
65..=128 => dl.i128_align,
1 => dl.i1_align.abi,
2..=8 => dl.i8_align.abi,
9..=16 => dl.i16_align.abi,
17..=32 => dl.i32_align.abi,
33..=64 => dl.i64_align.abi,
65..=128 => dl.i128_align.abi,
_ => panic!("unsupported integer: {:?}", self)
}
}
RegKind::Float => {
match self.size.bits() {
32 => dl.f32_align,
64 => dl.f64_align,
32 => dl.f32_align.abi,
64 => dl.f64_align.abi,
_ => panic!("unsupported float: {:?}", self)
}
}
RegKind::Vector => dl.vector_align(self.size)
RegKind::Vector => dl.vector_align(self.size).abi,
}
}
}
......@@ -188,7 +188,7 @@ fn from(unit: Reg) -> Uniform {
}
impl Uniform {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.unit.align(cx)
}
}
......@@ -227,13 +227,13 @@ pub fn pair(a: Reg, b: Reg) -> CastTarget {
pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
(self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
.abi_align(self.rest.align(cx)) + self.rest.total
.align_to(self.rest.align(cx)) + self.rest.total
}
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
.fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)),
|acc, align| acc.max(align))
}
}
......@@ -369,7 +369,7 @@ pub fn make_indirect(&mut self) {
attrs.pointee_size = self.layout.size;
// FIXME(eddyb) We should be doing this, but at least on
// i686-pc-windows-msvc, it results in wrong stack offsets.
// attrs.pointee_align = Some(self.layout.align);
// attrs.pointee_align = Some(self.layout.align.abi);
let extra_attrs = if self.layout.is_unsized() {
Some(ArgAttributes::new())
......
......@@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {
unit: Reg::i32(),
total: size
});
if !offset.is_abi_aligned(align) {
if !offset.is_aligned(align) {
arg.pad_with(Reg::i32());
}
} else {
arg.extend_integer_width_to(32);
}
*offset = offset.abi_align(align) + size.abi_align(align);
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
......
......@@ -121,7 +121,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
// Aggregates larger than a doubleword should be padded
// at the tail to fill out a whole number of doublewords.
let reg_i64 = Reg::i64();
(reg_i64, size.abi_align(reg_i64.align(cx)))
(reg_i64, size.align_to(reg_i64.align(cx)))
};
arg.cast_to(Uniform {
......
......@@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {
unit: Reg::i32(),
total: size
});
if !offset.is_abi_aligned(align) {
if !offset.is_aligned(align) {
arg.pad_with(Reg::i32());
}
} else {
arg.extend_integer_width_to(32);
}
*offset = offset.abi_align(align) + size.abi_align(align);
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
......
......@@ -41,7 +41,7 @@ fn classify<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>,
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !off.is_abi_aligned(layout.align) {
if !off.is_aligned(layout.align.abi) {
if !layout.is_zst() {
return Err(Memory);
}
......
......@@ -277,14 +277,14 @@ pub fn bits(self) -> u64 {
}
#[inline]
pub fn abi_align(self, align: AbiAndPrefAlign) -> Size {
let mask = align.abi.bytes() - 1;
pub fn align_to(self, align: Align) -> Size {
let mask = align.bytes() - 1;
Size::from_bytes((self.bytes() + mask) & !mask)
}
#[inline]
pub fn is_abi_aligned(self, align: AbiAndPrefAlign) -> bool {
let mask = align.abi.bytes() - 1;
pub fn is_aligned(self, align: Align) -> bool {
let mask = align.bytes() - 1;
self.bytes() & mask == 0
}
......@@ -425,7 +425,6 @@ pub fn restrict_for_offset(self, offset: Size) -> Align {
/// A pair of aligments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
#[derive(PartialOrd, Ord)] // FIXME(eddyb) remove (error prone/incorrect)
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
......@@ -510,10 +509,9 @@ pub fn fit_unsigned(x: u128) -> Integer {
}
/// Find the smallest integer with the given alignment.
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Option<Integer> {
pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi;
for &candidate in &[I8, I16, I32, I64, I128] {
if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
return Some(candidate);
......@@ -523,10 +521,9 @@ pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Option
}
/// Find the largest integer with the given alignment or less.
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Integer {
pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
let dl = cx.data_layout();
let wanted = align.abi;
// FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
for &candidate in &[I64, I32, I16] {
if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册