提交 5b2f757d 编写于 作者: A Andreas Liljeqvist

Make `abi::Abi` `Copy` and remove a *lot* of refs

fix

fix

Remove more refs and clones

fix

more

fix
上级 86ff6aeb
...@@ -92,9 +92,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { ...@@ -92,9 +92,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> { fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
match self.mode { match self.mode {
PassMode::Ignore => smallvec![], PassMode::Ignore => smallvec![],
PassMode::Direct(attrs) => match &self.layout.abi { PassMode::Direct(attrs) => match self.layout.abi {
Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param( Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())), AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs attrs
)], )],
Abi::Vector { .. } => { Abi::Vector { .. } => {
...@@ -103,10 +103,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { ...@@ -103,10 +103,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
} }
_ => unreachable!("{:?}", self.layout.abi), _ => unreachable!("{:?}", self.layout.abi),
}, },
PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi { PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
Abi::ScalarPair(a, b) => { Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone()); let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b.clone()); let b = scalar_to_clif_type(tcx, b);
smallvec![ smallvec![
apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a), apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b), apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
...@@ -139,9 +139,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { ...@@ -139,9 +139,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) { fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
match self.mode { match self.mode {
PassMode::Ignore => (None, vec![]), PassMode::Ignore => (None, vec![]),
PassMode::Direct(_) => match &self.layout.abi { PassMode::Direct(_) => match self.layout.abi {
Abi::Scalar(scalar) => { Abi::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))]) (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
} }
Abi::Vector { .. } => { Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap(); let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
...@@ -149,10 +149,10 @@ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) ...@@ -149,10 +149,10 @@ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>)
} }
_ => unreachable!("{:?}", self.layout.abi), _ => unreachable!("{:?}", self.layout.abi),
}, },
PassMode::Pair(_, _) => match &self.layout.abi { PassMode::Pair(_, _) => match self.layout.abi {
Abi::ScalarPair(a, b) => { Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone()); let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b.clone()); let b = scalar_to_clif_type(tcx, b);
(None, vec![AbiParam::new(a), AbiParam::new(b)]) (None, vec![AbiParam::new(a), AbiParam::new(b)])
} }
_ => unreachable!("{:?}", self.layout.abi), _ => unreachable!("{:?}", self.layout.abi),
......
...@@ -143,8 +143,8 @@ ...@@ -143,8 +143,8 @@
} }
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> { pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
let (element, count) = match &layout.abi { let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element.clone(), *count), Abi::Vector { element, count } => (element, count),
_ => unreachable!(), _ => unreachable!(),
}; };
......
...@@ -49,11 +49,7 @@ fn codegen_field<'tcx>( ...@@ -49,11 +49,7 @@ fn codegen_field<'tcx>(
} }
} }
fn scalar_pair_calculate_b_offset( fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
tcx: TyCtxt<'_>,
a_scalar: &Scalar,
b_scalar: &Scalar,
) -> Offset32 {
let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi); let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap()) Offset32::new(b_offset.bytes().try_into().unwrap())
} }
...@@ -124,12 +120,10 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value { ...@@ -124,12 +120,10 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
match self.0 { match self.0 {
CValueInner::ByRef(ptr, None) => { CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi { let clif_ty = match layout.abi {
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()), Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { ref element, count } => { Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
scalar_to_clif_type(fx.tcx, element.clone()) .by(u16::try_from(count).unwrap())
.by(u16::try_from(count).unwrap()) .unwrap(),
.unwrap()
}
_ => unreachable!("{:?}", layout.ty), _ => unreachable!("{:?}", layout.ty),
}; };
let mut flags = MemFlags::new(); let mut flags = MemFlags::new();
...@@ -147,13 +141,13 @@ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Valu ...@@ -147,13 +141,13 @@ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Valu
let layout = self.1; let layout = self.1;
match self.0 { match self.0 {
CValueInner::ByRef(ptr, None) => { CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match &layout.abi { let (a_scalar, b_scalar) = match layout.abi {
Abi::ScalarPair(a, b) => (a, b), Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self), _ => unreachable!("load_scalar_pair({:?})", self),
}; };
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone()); let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone()); let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
let mut flags = MemFlags::new(); let mut flags = MemFlags::new();
flags.set_notrap(); flags.set_notrap();
let val1 = ptr.load(fx, clif_ty1, flags); let val1 = ptr.load(fx, clif_ty1, flags);
...@@ -564,7 +558,7 @@ fn transmute_value<'tcx>( ...@@ -564,7 +558,7 @@ fn transmute_value<'tcx>(
to_ptr.store(fx, val, flags); to_ptr.store(fx, val, flags);
return; return;
} }
Abi::ScalarPair(ref a_scalar, ref b_scalar) => { Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx); let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, flags); to_ptr.store(fx, value, flags);
......
...@@ -536,13 +536,13 @@ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll V ...@@ -536,13 +536,13 @@ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll V
} }
_ => {} _ => {}
} }
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately // If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected // become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier. // by the LLVM verifier.
if let Int(..) = scalar.value { if let Int(..) = scalar.value {
if !scalar.is_bool() && !scalar.is_always_valid_for(bx) { if !scalar.is_bool() && !scalar.is_always_valid_for(bx) {
bx.range_metadata(callsite, &scalar.valid_range); bx.range_metadata(callsite, scalar.valid_range);
} }
} }
} }
......
...@@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll ...@@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type. /// the equivalent integer type.
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type { fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
match scalar.value { match scalar.value {
Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I16, _) => cx.type_i16(),
...@@ -812,7 +812,7 @@ fn llvm_fixup_input( ...@@ -812,7 +812,7 @@ fn llvm_fixup_input(
reg: InlineAsmRegClass, reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>, layout: &TyAndLayout<'tcx>,
) -> &'ll Value { ) -> &'ll Value {
match (reg, &layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.value {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
...@@ -835,7 +835,7 @@ fn llvm_fixup_input( ...@@ -835,7 +835,7 @@ fn llvm_fixup_input(
Abi::Vector { element, count }, Abi::Vector { element, count },
) if layout.size.bytes() == 8 => { ) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element); let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count); let vec_ty = bx.cx.type_vector(elem_ty, count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
} }
...@@ -890,7 +890,7 @@ fn llvm_fixup_output( ...@@ -890,7 +890,7 @@ fn llvm_fixup_output(
reg: InlineAsmRegClass, reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>, layout: &TyAndLayout<'tcx>,
) -> &'ll Value { ) -> &'ll Value {
match (reg, &layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.value {
bx.extract_element(value, bx.const_i32(0)) bx.extract_element(value, bx.const_i32(0))
...@@ -910,8 +910,8 @@ fn llvm_fixup_output( ...@@ -910,8 +910,8 @@ fn llvm_fixup_output(
Abi::Vector { element, count }, Abi::Vector { element, count },
) if layout.size.bytes() == 8 => { ) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element); let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count * 2); let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect(); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
} }
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
...@@ -965,7 +965,7 @@ fn llvm_fixup_output_type( ...@@ -965,7 +965,7 @@ fn llvm_fixup_output_type(
reg: InlineAsmRegClass, reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>, layout: &TyAndLayout<'tcx>,
) -> &'ll Type { ) -> &'ll Type {
match (reg, &layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value { if let Primitive::Int(Integer::I8, _) = s.value {
cx.type_vector(cx.type_i8(), 8) cx.type_vector(cx.type_i8(), 8)
......
...@@ -382,7 +382,7 @@ fn from_immediate(&mut self, val: Self::Value) -> Self::Value { ...@@ -382,7 +382,7 @@ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
val val
} }
} }
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value { fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() { if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1()); return self.trunc(val, self.cx().type_i1());
} }
...@@ -460,12 +460,12 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx ...@@ -460,12 +460,12 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx
fn scalar_load_metadata<'a, 'll, 'tcx>( fn scalar_load_metadata<'a, 'll, 'tcx>(
bx: &mut Builder<'a, 'll, 'tcx>, bx: &mut Builder<'a, 'll, 'tcx>,
load: &'ll Value, load: &'ll Value,
scalar: &abi::Scalar, scalar: abi::Scalar,
) { ) {
match scalar.value { match scalar.value {
abi::Int(..) => { abi::Int(..) => {
if !scalar.is_always_valid_for(bx) { if !scalar.is_always_valid_for(bx) {
bx.range_metadata(load, &scalar.valid_range); bx.range_metadata(load, scalar.valid_range);
} }
} }
abi::Pointer if !scalar.valid_range.contains(0) => { abi::Pointer if !scalar.valid_range.contains(0) => {
...@@ -488,17 +488,17 @@ fn scalar_load_metadata<'a, 'll, 'tcx>( ...@@ -488,17 +488,17 @@ fn scalar_load_metadata<'a, 'll, 'tcx>(
} }
let llval = const_llval.unwrap_or_else(|| { let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.layout.llvm_type(self), place.llval, place.align); let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi { if let abi::Abi::Scalar(scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar); scalar_load_metadata(self, load, scalar);
} }
load load
}); });
OperandValue::Immediate(self.to_immediate(llval, place.layout)) OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi); let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let pair_ty = place.layout.llvm_type(self); let pair_ty = place.layout.llvm_type(self);
let mut load = |i, scalar: &abi::Scalar, align| { let mut load = |i, scalar: abi::Scalar, align| {
let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align); let load = self.load(llty, llptr, align);
...@@ -554,7 +554,7 @@ fn write_operand_repeatedly( ...@@ -554,7 +554,7 @@ fn write_operand_repeatedly(
next_bx next_bx
} }
fn range_metadata(&mut self, load: &'ll Value, range: &WrappingRange) { fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
if self.sess().target.arch == "amdgpu" { if self.sess().target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks an i64 value is // amdgpu/LLVM does something weird and thinks an i64 value is
// split into a v2i32, halving the bitwidth LLVM expects, // split into a v2i32, halving the bitwidth LLVM expects,
......
...@@ -228,7 +228,7 @@ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> { ...@@ -228,7 +228,7 @@ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
}) })
} }
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value { fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv { match cv {
Scalar::Int(ScalarInt::ZST) => { Scalar::Int(ScalarInt::ZST) => {
......
...@@ -111,7 +111,7 @@ fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>( ...@@ -111,7 +111,7 @@ fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx, &cx.tcx,
), ),
&Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
cx.type_i8p_ext(address_space), cx.type_i8p_ext(address_space),
)); ));
next_offset = offset + pointer_size; next_offset = offset + pointer_size;
......
...@@ -1656,7 +1656,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDes ...@@ -1656,7 +1656,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDes
Variants::Multiple { Variants::Multiple {
tag_encoding: tag_encoding:
TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant }, TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
ref tag, tag,
ref variants, ref variants,
tag_field, tag_field,
} => { } => {
...@@ -2082,10 +2082,8 @@ fn prepare_enum_metadata( ...@@ -2082,10 +2082,8 @@ fn prepare_enum_metadata(
let layout = cx.layout_of(enum_type); let layout = cx.layout_of(enum_type);
if let ( if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
&Abi::Scalar(_), (layout.abi, &layout.variants)
&Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
) = (&layout.abi, &layout.variants)
{ {
return FinalMetadata(discriminant_type_metadata(tag.value)); return FinalMetadata(discriminant_type_metadata(tag.value));
} }
...@@ -2093,8 +2091,8 @@ fn prepare_enum_metadata( ...@@ -2093,8 +2091,8 @@ fn prepare_enum_metadata(
if use_enum_fallback(cx) { if use_enum_fallback(cx) {
let discriminant_type_metadata = match layout.variants { let discriminant_type_metadata = match layout.variants {
Variants::Single { .. } => None, Variants::Single { .. } => None,
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. } Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
| Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => { | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
Some(discriminant_type_metadata(tag.value)) Some(discriminant_type_metadata(tag.value))
} }
}; };
...@@ -2146,9 +2144,7 @@ fn prepare_enum_metadata( ...@@ -2146,9 +2144,7 @@ fn prepare_enum_metadata(
// A single-variant enum has no discriminant. // A single-variant enum has no discriminant.
Variants::Single { .. } => None, Variants::Single { .. } => None,
Variants::Multiple { Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
} => {
// Find the integer type of the correct size. // Find the integer type of the correct size.
let size = tag.value.size(cx); let size = tag.value.size(cx);
let align = tag.value.align(cx); let align = tag.value.align(cx);
...@@ -2179,7 +2175,7 @@ fn prepare_enum_metadata( ...@@ -2179,7 +2175,7 @@ fn prepare_enum_metadata(
} }
} }
Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => { Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
let discr_type = tag.value.to_ty(cx.tcx); let discr_type = tag.value.to_ty(cx.tcx);
let (size, align) = cx.size_and_align_of(discr_type); let (size, align) = cx.size_and_align_of(discr_type);
......
...@@ -133,7 +133,7 @@ fn codegen_intrinsic_call( ...@@ -133,7 +133,7 @@ fn codegen_intrinsic_call(
} }
sym::va_arg => { sym::va_arg => {
match fn_abi.ret.layout.abi { match fn_abi.ret.layout.abi {
abi::Abi::Scalar(ref scalar) => { abi::Abi::Scalar(scalar) => {
match scalar.value { match scalar.value {
Primitive::Int(..) => { Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 { if self.cx().size_of(ret_ty).bytes() < 4 {
......
...@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>( ...@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
) -> &'a Type { ) -> &'a Type {
match layout.abi { match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { ref element, count } => { Abi::Vector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
return cx.type_vector(element, count); return cx.type_vector(element, count);
} }
...@@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> { ...@@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> {
fn scalar_llvm_type_at<'a>( fn scalar_llvm_type_at<'a>(
&self, &self,
cx: &CodegenCx<'a, 'tcx>, cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar, scalar: Scalar,
offset: Size, offset: Size,
) -> &'a Type; ) -> &'a Type;
fn scalar_pair_element_llvm_type<'a>( fn scalar_pair_element_llvm_type<'a>(
...@@ -218,7 +218,7 @@ fn is_llvm_scalar_pair(&self) -> bool { ...@@ -218,7 +218,7 @@ fn is_llvm_scalar_pair(&self) -> bool {
/// of that field's type - this is useful for taking the address of /// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment. /// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi { if let Abi::Scalar(scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs // Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers). // can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
...@@ -286,7 +286,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { ...@@ -286,7 +286,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
} }
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi { if let Abi::Scalar(scalar) = self.abi {
if scalar.is_bool() { if scalar.is_bool() {
return cx.type_i1(); return cx.type_i1();
} }
...@@ -297,7 +297,7 @@ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { ...@@ -297,7 +297,7 @@ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
fn scalar_llvm_type_at<'a>( fn scalar_llvm_type_at<'a>(
&self, &self,
cx: &CodegenCx<'a, 'tcx>, cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar, scalar: Scalar,
offset: Size, offset: Size,
) -> &'a Type { ) -> &'a Type {
match scalar.value { match scalar.value {
...@@ -337,7 +337,7 @@ fn scalar_pair_element_llvm_type<'a>( ...@@ -337,7 +337,7 @@ fn scalar_pair_element_llvm_type<'a>(
} }
let (a, b) = match self.abi { let (a, b) = match self.abi {
Abi::ScalarPair(ref a, ref b) => (a, b), Abi::ScalarPair(a, b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
}; };
let scalar = [a, b][index]; let scalar = [a, b][index];
......
...@@ -404,7 +404,7 @@ fn msvc_enum_fallback( ...@@ -404,7 +404,7 @@ fn msvc_enum_fallback(
// calculate the range of values for the dataful variant // calculate the range of values for the dataful variant
let dataful_discriminant_range = let dataful_discriminant_range =
&dataful_variant_layout.largest_niche.as_ref().unwrap().scalar.valid_range; dataful_variant_layout.largest_niche.unwrap().scalar.valid_range;
let min = dataful_discriminant_range.start; let min = dataful_discriminant_range.start;
let min = tag.value.size(&tcx).truncate(min); let min = tag.value.size(&tcx).truncate(min);
......
...@@ -1102,9 +1102,9 @@ fn codegen_argument( ...@@ -1102,9 +1102,9 @@ fn codegen_argument(
// the load would just produce `OperandValue::Ref` instead // the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call. // of the `OperandValue::Immediate` we need for the call.
llval = bx.load(bx.backend_type(arg.layout), llval, align); llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if scalar.is_bool() { if scalar.is_bool() {
bx.range_metadata(llval, &WrappingRange { start: 0, end: 1 }); bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
} }
} }
// We store bools as `i8` so we need to truncate to `i1`. // We store bools as `i8` so we need to truncate to `i1`.
...@@ -1424,7 +1424,7 @@ fn codegen_transmute_into( ...@@ -1424,7 +1424,7 @@ fn codegen_transmute_into(
let src = self.codegen_operand(bx, src); let src = self.codegen_operand(bx, src);
// Special-case transmutes between scalars as simple bitcasts. // Special-case transmutes between scalars as simple bitcasts.
match (&src.layout.abi, &dst.layout.abi) { match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => { (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers. // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) { if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) {
......
...@@ -68,7 +68,7 @@ pub fn simd_shuffle_indices( ...@@ -68,7 +68,7 @@ pub fn simd_shuffle_indices(
if let Some(prim) = field.val.try_to_scalar() { if let Some(prim) = field.val.try_to_scalar() {
let layout = bx.layout_of(field_ty); let layout = bx.layout_of(field_ty);
let scalar = match layout.abi { let scalar = match layout.abi {
Abi::Scalar(ref x) => x, Abi::Scalar(x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout), _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
}; };
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout)) bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
......
...@@ -79,7 +79,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -79,7 +79,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let val = match val { let val = match val {
ConstValue::Scalar(x) => { ConstValue::Scalar(x) => {
let scalar = match layout.abi { let scalar = match layout.abi {
Abi::Scalar(ref x) => x, Abi::Scalar(x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout), _ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
}; };
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
...@@ -87,7 +87,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -87,7 +87,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
} }
ConstValue::Slice { data, start, end } => { ConstValue::Slice { data, start, end } => {
let a_scalar = match layout.abi { let a_scalar = match layout.abi {
Abi::ScalarPair(ref a, _) => a, Abi::ScalarPair(a, _) => a,
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout), _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout),
}; };
let a = Scalar::from_pointer( let a = Scalar::from_pointer(
...@@ -162,7 +162,7 @@ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -162,7 +162,7 @@ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
llval: V, llval: V,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
) -> Self { ) -> Self {
let val = if let Abi::ScalarPair(ref a, ref b) = layout.abi { let val = if let Abi::ScalarPair(a, b) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
// Deconstruct the immediate aggregate. // Deconstruct the immediate aggregate.
...@@ -185,7 +185,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -185,7 +185,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let field = self.layout.field(bx.cx(), i); let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i); let offset = self.layout.fields.offset(i);
let mut val = match (self.val, &self.layout.abi) { let mut val = match (self.val, self.layout.abi) {
// If the field is ZST, it has no data. // If the field is ZST, it has no data.
_ if field.is_zst() => { _ if field.is_zst() => {
return OperandRef::new_zst(bx, field); return OperandRef::new_zst(bx, field);
...@@ -200,7 +200,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -200,7 +200,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
} }
// Extract a scalar component from a pair. // Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), &Abi::ScalarPair(ref a, ref b)) => { (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
if offset.bytes() == 0 { if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(bx.cx())); assert_eq!(field.size, a.value.size(bx.cx()));
OperandValue::Immediate(a_llval) OperandValue::Immediate(a_llval)
...@@ -212,14 +212,14 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -212,14 +212,14 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
} }
// `#[repr(simd)]` types are also immediate. // `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &Abi::Vector { .. }) => { (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64))) OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
} }
_ => bug!("OperandRef::extract_field({:?}): not applicable", self), _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
}; };
match (&mut val, &field.abi) { match (&mut val, field.abi) {
(OperandValue::Immediate(llval), _) => { (OperandValue::Immediate(llval), _) => {
// Bools in union fields needs to be truncated. // Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field); *llval = bx.to_immediate(*llval, field);
...@@ -308,7 +308,7 @@ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -308,7 +308,7 @@ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
} }
OperandValue::Pair(a, b) => { OperandValue::Pair(a, b) => {
let (a_scalar, b_scalar) = match dest.layout.abi { let (a_scalar, b_scalar) = match dest.layout.abi {
Abi::ScalarPair(ref a, ref b) => (a, b), Abi::ScalarPair(a, b) => (a, b),
_ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout), _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout),
}; };
let ty = bx.backend_type(dest.layout); let ty = bx.backend_type(dest.layout);
......
...@@ -99,7 +99,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -99,7 +99,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
// Also handles the first field of Scalar, ScalarPair, and Vector layouts. // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
self.llval self.llval
} }
Abi::ScalarPair(ref a, ref b) Abi::ScalarPair(a, b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
{ {
// Offset matches second field. // Offset matches second field.
...@@ -222,7 +222,7 @@ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( ...@@ -222,7 +222,7 @@ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
.map_or(index.as_u32() as u128, |discr| discr.val); .map_or(index.as_u32() as u128, |discr| discr.val);
return bx.cx().const_uint_big(cast_to, discr_val); return bx.cx().const_uint_big(cast_to, discr_val);
} }
Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => { Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field) (tag, tag_encoding, tag_field)
} }
}; };
......
...@@ -124,13 +124,13 @@ fn checked_binop( ...@@ -124,13 +124,13 @@ fn checked_binop(
fn from_immediate(&mut self, val: Self::Value) -> Self::Value; fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value { fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
if let Abi::Scalar(ref scalar) = layout.abi { if let Abi::Scalar(scalar) = layout.abi {
self.to_immediate_scalar(val, scalar) self.to_immediate_scalar(val, scalar)
} else { } else {
val val
} }
} }
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &Scalar) -> Self::Value; fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
...@@ -156,7 +156,7 @@ fn write_operand_repeatedly( ...@@ -156,7 +156,7 @@ fn write_operand_repeatedly(
dest: PlaceRef<'tcx, Self::Value>, dest: PlaceRef<'tcx, Self::Value>,
) -> Self; ) -> Self;
fn range_metadata(&mut self, load: Self::Value, range: &WrappingRange); fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value); fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
......
...@@ -28,7 +28,7 @@ pub trait ConstMethods<'tcx>: BackendTypes { ...@@ -28,7 +28,7 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value; fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value;
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: Self::Type) -> Self::Value; fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
fn from_const_alloc( fn from_const_alloc(
&self, &self,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
......
...@@ -194,7 +194,7 @@ pub fn emulate_intrinsic( ...@@ -194,7 +194,7 @@ pub fn emulate_intrinsic(
let val = self.read_scalar(&args[0])?.check_init()?; let val = self.read_scalar(&args[0])?.check_init()?;
let bits = val.to_bits(layout_of.size)?; let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi { let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value, Abi::Scalar(scalar) => scalar.value,
_ => span_bug!( _ => span_bug!(
self.cur_span(), self.cur_span(),
"{} called on invalid type {:?}", "{} called on invalid type {:?}",
......
...@@ -274,11 +274,11 @@ fn try_read_immediate_from_mplace( ...@@ -274,11 +274,11 @@ fn try_read_immediate_from_mplace(
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?; let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })) Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
} }
Abi::ScalarPair(ref a, ref b) => { Abi::ScalarPair(a, b) => {
// We checked `ptr_align` above, so all fields will have the alignment they need. // We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy. // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (&a.value, &b.value); let (a, b) = (a.value, b.value);
let (a_size, b_size) = (a.size(self), b.size(self)); let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi); let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
...@@ -648,7 +648,7 @@ pub fn read_discriminant( ...@@ -648,7 +648,7 @@ pub fn read_discriminant(
}; };
return Ok((discr, index)); return Ok((discr, index));
} }
Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => { Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field) (tag, tag_encoding, tag_field)
} }
}; };
......
...@@ -752,7 +752,7 @@ fn write_immediate_to_mplace_no_validate( ...@@ -752,7 +752,7 @@ fn write_immediate_to_mplace_no_validate(
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy. // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi { let (a, b) = match dest.layout.abi {
Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), Abi::ScalarPair(a, b) => (a.value, b.value),
_ => span_bug!( _ => span_bug!(
self.cur_span(), self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
...@@ -1000,7 +1000,7 @@ pub fn write_discriminant( ...@@ -1000,7 +1000,7 @@ pub fn write_discriminant(
} }
Variants::Multiple { Variants::Multiple {
tag_encoding: TagEncoding::Direct, tag_encoding: TagEncoding::Direct,
tag: ref tag_layout, tag: tag_layout,
tag_field, tag_field,
.. ..
} => { } => {
...@@ -1022,7 +1022,7 @@ pub fn write_discriminant( ...@@ -1022,7 +1022,7 @@ pub fn write_discriminant(
Variants::Multiple { Variants::Multiple {
tag_encoding: tag_encoding:
TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
tag: ref tag_layout, tag: tag_layout,
tag_field, tag_field,
.. ..
} => { } => {
......
...@@ -187,17 +187,14 @@ fn check_argument_compat( ...@@ -187,17 +187,14 @@ fn check_argument_compat(
return false; return false;
} }
// Compare layout // Compare layout
match (&caller.abi, &callee.abi) { match (caller.abi, callee.abi) {
// Different valid ranges are okay (once we enforce validity, // Different valid ranges are okay (once we enforce validity,
// that will take care to make it UB to leave the range, just // that will take care to make it UB to leave the range, just
// like for transmute). // like for transmute).
(abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => { (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => caller.value == callee.value,
caller.value == callee.value (abi::Abi::ScalarPair(caller1, caller2), abi::Abi::ScalarPair(callee1, callee2)) => {
caller1.value == callee1.value && caller2.value == callee2.value
} }
(
abi::Abi::ScalarPair(ref caller1, ref caller2),
abi::Abi::ScalarPair(ref callee1, ref callee2),
) => caller1.value == callee1.value && caller2.value == callee2.value,
// Be conservative // Be conservative
_ => false, _ => false,
} }
......
...@@ -618,7 +618,7 @@ fn try_visit_primitive( ...@@ -618,7 +618,7 @@ fn try_visit_primitive(
fn visit_scalar( fn visit_scalar(
&mut self, &mut self,
op: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: &ScalarAbi, scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
if scalar_layout.valid_range.is_full_for(op.layout.size) { if scalar_layout.valid_range.is_full_for(op.layout.size) {
// Nothing to check // Nothing to check
...@@ -784,7 +784,7 @@ fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> ...@@ -784,7 +784,7 @@ fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{ "a value of uninhabited type {:?}", op.layout.ty } { "a value of uninhabited type {:?}", op.layout.ty }
); );
} }
Abi::Scalar(ref scalar_layout) => { Abi::Scalar(scalar_layout) => {
self.visit_scalar(op, scalar_layout)?; self.visit_scalar(op, scalar_layout)?;
} }
Abi::ScalarPair { .. } | Abi::Vector { .. } => { Abi::ScalarPair { .. } | Abi::Vector { .. } => {
......
...@@ -1327,10 +1327,7 @@ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { ...@@ -1327,10 +1327,7 @@ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
}; };
let (variants, tag) = match layout.variants { let (variants, tag) = match layout.variants {
Variants::Multiple { Variants::Multiple {
tag_encoding: TagEncoding::Direct, tag_encoding: TagEncoding::Direct, tag, ref variants, ..
ref tag,
ref variants,
..
} => (variants, tag), } => (variants, tag),
_ => return, _ => return,
}; };
......
...@@ -290,9 +290,9 @@ fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout { ...@@ -290,9 +290,9 @@ fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
// HACK(nox): We iter on `b` and then `a` because `max_by_key` // HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum. // returns the last maximum.
let largest_niche = Niche::from_scalar(dl, b_offset, b.clone()) let largest_niche = Niche::from_scalar(dl, b_offset, b)
.into_iter() .into_iter()
.chain(Niche::from_scalar(dl, Size::ZERO, a.clone())) .chain(Niche::from_scalar(dl, Size::ZERO, a))
.max_by_key(|niche| niche.available(dl)); .max_by_key(|niche| niche.available(dl));
Layout { Layout {
...@@ -401,7 +401,7 @@ fn univariant_uninterned( ...@@ -401,7 +401,7 @@ fn univariant_uninterned(
offsets[i as usize] = offset; offsets[i as usize] = offset;
if !repr.hide_niche() { if !repr.hide_niche() {
if let Some(mut niche) = field.largest_niche.clone() { if let Some(mut niche) = field.largest_niche {
let available = niche.available(dl); let available = niche.available(dl);
if available > largest_niche_available { if available > largest_niche_available {
largest_niche_available = available; largest_niche_available = available;
...@@ -449,12 +449,12 @@ fn univariant_uninterned( ...@@ -449,12 +449,12 @@ fn univariant_uninterned(
// For plain scalars, or vectors of them, we can't unpack // For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs. // newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => { Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi.clone(); abi = field.abi;
} }
// But scalar pairs are Rust-specific and get // But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway. // treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => { Abi::ScalarPair(..) => {
abi = field.abi.clone(); abi = field.abi;
} }
_ => {} _ => {}
} }
...@@ -463,14 +463,14 @@ fn univariant_uninterned( ...@@ -463,14 +463,14 @@ fn univariant_uninterned(
// Two non-ZST fields, and they're both scalars. // Two non-ZST fields, and they're both scalars.
( (
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })), Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })), Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
None, None,
) => { ) => {
// Order by the memory placement, not source order. // Order by the memory placement, not source order.
let ((i, a), (j, b)) = let ((i, a), (j, b)) =
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) }; if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
let pair = self.scalar_pair(a.clone(), b.clone()); let pair = self.scalar_pair(a, b);
let pair_offsets = match pair.fields { let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => { FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]); assert_eq!(memory_index, &[0, 1]);
...@@ -609,7 +609,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -609,7 +609,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Abi::Aggregate { sized: true } Abi::Aggregate { sized: true }
}; };
let largest_niche = if count != 0 { element.largest_niche.clone() } else { None }; let largest_niche = if count != 0 { element.largest_niche } else { None };
tcx.intern_layout(Layout { tcx.intern_layout(Layout {
variants: Variants::Single { index: VariantIdx::new(0) }, variants: Variants::Single { index: VariantIdx::new(0) },
...@@ -768,8 +768,8 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -768,8 +768,8 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// Compute the ABI of the element type: // Compute the ABI of the element type:
let e_ly = self.layout_of(e_ty)?; let e_ly = self.layout_of(e_ty)?;
let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi { let e_abi = if let Abi::Scalar(scalar) = e_ly.abi {
scalar.clone() scalar
} else { } else {
// This error isn't caught in typeck, e.g., if // This error isn't caught in typeck, e.g., if
// the element type of the vector is generic. // the element type of the vector is generic.
...@@ -796,7 +796,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -796,7 +796,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
variants: Variants::Single { index: VariantIdx::new(0) }, variants: Variants::Single { index: VariantIdx::new(0) },
fields, fields,
abi: Abi::Vector { element: e_abi, count: e_len }, abi: Abi::Vector { element: e_abi, count: e_len },
largest_niche: e_ly.largest_niche.clone(), largest_niche: e_ly.largest_niche,
size, size,
align, align,
}) })
...@@ -843,13 +843,13 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -843,13 +843,13 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// If all non-ZST fields have the same ABI, forward this ABI // If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() { if optimize && !field.is_zst() {
// Normalize scalar_unit to the maximal valid range // Normalize scalar_unit to the maximal valid range
let field_abi = match &field.abi { let field_abi = match field.abi {
Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
Abi::ScalarPair(x, y) => { Abi::ScalarPair(x, y) => {
Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
} }
Abi::Vector { element: x, count } => { Abi::Vector { element: x, count } => {
Abi::Vector { element: scalar_unit(x.value), count: *count } Abi::Vector { element: scalar_unit(x.value), count }
} }
Abi::Uninhabited | Abi::Aggregate { .. } => { Abi::Uninhabited | Abi::Aggregate { .. } => {
Abi::Aggregate { sized: true } Abi::Aggregate { sized: true }
...@@ -970,7 +970,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -970,7 +970,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Niche::from_scalar(dl, Size::ZERO, scalar.clone()) Niche::from_scalar(dl, Size::ZERO, scalar.clone())
}; };
if let Some(niche) = niche { if let Some(niche) = niche {
match &st.largest_niche { match st.largest_niche {
Some(largest_niche) => { Some(largest_niche) => {
// Replace the existing niche even if they're equal, // Replace the existing niche even if they're equal,
// because this one is at a lower offset. // because this one is at a lower offset.
...@@ -1045,7 +1045,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1045,7 +1045,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
let niche_candidate = variants[i] let niche_candidate = variants[i]
.iter() .iter()
.enumerate() .enumerate()
.filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?))) .filter_map(|(j, field)| Some((j, field.largest_niche?)))
.max_by_key(|(_, niche)| niche.available(dl)); .max_by_key(|(_, niche)| niche.available(dl));
if let Some((field_index, niche, (niche_start, niche_scalar))) = if let Some((field_index, niche, (niche_start, niche_scalar))) =
...@@ -1078,31 +1078,24 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1078,31 +1078,24 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Abi::Uninhabited Abi::Uninhabited
} else { } else {
match st[i].abi { match st[i].abi {
Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), Abi::Scalar(_) => Abi::Scalar(niche_scalar),
Abi::ScalarPair(ref first, ref second) => { Abi::ScalarPair(first, second) => {
// We need to use scalar_unit to reset the // We need to use scalar_unit to reset the
// valid range to the maximal one for that // valid range to the maximal one for that
// primitive, because only the niche is // primitive, because only the niche is
// guaranteed to be initialised, not the // guaranteed to be initialised, not the
// other primitive. // other primitive.
if offset.bytes() == 0 { if offset.bytes() == 0 {
Abi::ScalarPair( Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
niche_scalar.clone(),
scalar_unit(second.value),
)
} else { } else {
Abi::ScalarPair( Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
scalar_unit(first.value),
niche_scalar.clone(),
)
} }
} }
_ => Abi::Aggregate { sized: true }, _ => Abi::Aggregate { sized: true },
} }
}; };
let largest_niche = let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
Niche::from_scalar(dl, offset, niche_scalar.clone());
niche_filling_layout = Some(Layout { niche_filling_layout = Some(Layout {
variants: Variants::Multiple { variants: Variants::Multiple {
...@@ -1283,7 +1276,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1283,7 +1276,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
}; };
let mut abi = Abi::Aggregate { sized: true }; let mut abi = Abi::Aggregate { sized: true };
if tag.value.size(dl) == size { if tag.value.size(dl) == size {
abi = Abi::Scalar(tag.clone()); abi = Abi::Scalar(tag);
} else { } else {
// Try to use a ScalarPair for all tagged enums. // Try to use a ScalarPair for all tagged enums.
let mut common_prim = None; let mut common_prim = None;
...@@ -1303,7 +1296,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1303,7 +1296,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
} }
}; };
let prim = match field.abi { let prim = match field.abi {
Abi::Scalar(ref scalar) => scalar.value, Abi::Scalar(scalar) => scalar.value,
_ => { _ => {
common_prim = None; common_prim = None;
break; break;
...@@ -1323,7 +1316,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1323,7 +1316,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
} }
} }
if let Some((prim, offset)) = common_prim { if let Some((prim, offset)) = common_prim {
let pair = self.scalar_pair(tag.clone(), scalar_unit(prim)); let pair = self.scalar_pair(tag, scalar_unit(prim));
let pair_offsets = match pair.fields { let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => { FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]); assert_eq!(memory_index, &[0, 1]);
...@@ -1347,7 +1340,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1347,7 +1340,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
abi = Abi::Uninhabited; abi = Abi::Uninhabited;
} }
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone()); let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
let tagged_layout = Layout { let tagged_layout = Layout {
variants: Variants::Multiple { variants: Variants::Multiple {
...@@ -1372,8 +1365,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<' ...@@ -1372,8 +1365,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// pick the layout with the larger niche; otherwise, // pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen. // pick tagged as it has simpler codegen.
cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
let niche_size = let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
(layout.size, cmp::Reverse(niche_size)) (layout.size, cmp::Reverse(niche_size))
}) })
} }
...@@ -1560,7 +1552,7 @@ fn generator_layout( ...@@ -1560,7 +1552,7 @@ fn generator_layout(
value: Primitive::Int(discr_int, false), value: Primitive::Int(discr_int, false),
valid_range: WrappingRange { start: 0, end: max_discr }, valid_range: WrappingRange { start: 0, end: max_discr },
}; };
let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone())); let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout }; let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
let promoted_layouts = ineligible_locals let promoted_layouts = ineligible_locals
...@@ -1832,7 +1824,7 @@ fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) { ...@@ -1832,7 +1824,7 @@ fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
} }
} }
Variants::Multiple { ref tag, ref tag_encoding, .. } => { Variants::Multiple { tag, ref tag_encoding, .. } => {
debug!( debug!(
"print-type-size `{:#?}` adt general variants def {}", "print-type-size `{:#?}` adt general variants def {}",
layout.ty, layout.ty,
...@@ -2240,7 +2232,7 @@ fn field_ty_or_layout( ...@@ -2240,7 +2232,7 @@ fn field_ty_or_layout(
i: usize, i: usize,
) -> TyMaybeWithLayout<'tcx> { ) -> TyMaybeWithLayout<'tcx> {
let tcx = cx.tcx(); let tcx = cx.tcx();
let tag_layout = |tag: &Scalar| -> TyAndLayout<'tcx> { let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
let layout = Layout::scalar(cx, tag.clone()); let layout = Layout::scalar(cx, tag.clone());
TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) } TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
}; };
...@@ -2329,7 +2321,7 @@ fn field_ty_or_layout( ...@@ -2329,7 +2321,7 @@ fn field_ty_or_layout(
.nth(i) .nth(i)
.unwrap(), .unwrap(),
), ),
Variants::Multiple { ref tag, tag_field, .. } => { Variants::Multiple { tag, tag_field, .. } => {
if i == tag_field { if i == tag_field {
return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
} }
...@@ -2347,7 +2339,7 @@ fn field_ty_or_layout( ...@@ -2347,7 +2339,7 @@ fn field_ty_or_layout(
} }
// Discriminant field for enums (where applicable). // Discriminant field for enums (where applicable).
Variants::Multiple { ref tag, .. } => { Variants::Multiple { tag, .. } => {
assert_eq!(i, 0); assert_eq!(i, 0);
return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
} }
...@@ -2906,7 +2898,7 @@ fn new_internal( ...@@ -2906,7 +2898,7 @@ fn new_internal(
// Handle safe Rust thin and fat pointers. // Handle safe Rust thin and fat pointers.
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
scalar: &Scalar, scalar: Scalar,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
offset: Size, offset: Size,
is_return: bool| { is_return: bool| {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) { fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips // Always sign extend u32 values on 64-bit mips
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value { if let abi::Int(i, signed) = scalar.value {
if !signed && i.size().bits() == 32 { if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode { if let PassMode::Direct(ref mut attrs) = arg.mode {
...@@ -23,7 +23,7 @@ fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg> ...@@ -23,7 +23,7 @@ fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
C: HasDataLayout, C: HasDataLayout,
{ {
match ret.layout.field(cx, i).abi { match ret.layout.field(cx, i).abi {
abi::Abi::Scalar(ref scalar) => match scalar.value { abi::Abi::Scalar(scalar) => match scalar.value {
abi::F32 => Some(Reg::f32()), abi::F32 => Some(Reg::f32()),
abi::F64 => Some(Reg::f64()), abi::F64 => Some(Reg::f64()),
_ => None, _ => None,
...@@ -107,7 +107,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) ...@@ -107,7 +107,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
let offset = arg.layout.fields.offset(i); let offset = arg.layout.fields.offset(i);
// We only care about aligned doubles // We only care about aligned doubles
if let abi::Abi::Scalar(ref scalar) = field.abi { if let abi::Abi::Scalar(scalar) = field.abi {
if let abi::F64 = scalar.value { if let abi::F64 = scalar.value {
if offset.is_aligned(dl.f64_align.abi) { if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset) // Insert enough integers to cover [last_offset, offset)
......
...@@ -322,7 +322,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H ...@@ -322,7 +322,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
Abi::Uninhabited => Err(Heterogeneous), Abi::Uninhabited => Err(Heterogeneous),
// The primitive for this algorithm. // The primitive for this algorithm.
Abi::Scalar(ref scalar) => { Abi::Scalar(scalar) => {
let kind = match scalar.value { let kind = match scalar.value {
abi::Int(..) | abi::Pointer => RegKind::Integer, abi::Int(..) | abi::Pointer => RegKind::Integer,
abi::F32 | abi::F64 => RegKind::Float, abi::F32 | abi::F64 => RegKind::Float,
...@@ -450,9 +450,9 @@ impl<'a, Ty> ArgAbi<'a, Ty> { ...@@ -450,9 +450,9 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn new( pub fn new(
cx: &impl HasDataLayout, cx: &impl HasDataLayout,
layout: TyAndLayout<'a, Ty>, layout: TyAndLayout<'a, Ty>,
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, &abi::Scalar, Size) -> ArgAttributes, scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
) -> Self { ) -> Self {
let mode = match &layout.abi { let mode = match layout.abi {
Abi::Uninhabited => PassMode::Ignore, Abi::Uninhabited => PassMode::Ignore,
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)), Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
Abi::ScalarPair(a, b) => PassMode::Pair( Abi::ScalarPair(a, b) => PassMode::Pair(
...@@ -504,7 +504,7 @@ pub fn make_indirect_byval(&mut self) { ...@@ -504,7 +504,7 @@ pub fn make_indirect_byval(&mut self) {
pub fn extend_integer_width_to(&mut self, bits: u64) { pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness // Only integers have signedness
if let Abi::Scalar(ref scalar) = self.layout.abi { if let Abi::Scalar(scalar) = self.layout.abi {
if let abi::Int(i, signed) = scalar.value { if let abi::Int(i, signed) = scalar.value {
if i.size().bits() < bits { if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode { if let PassMode::Direct(ref mut attrs) = self.mode {
......
...@@ -44,7 +44,7 @@ fn should_use_fp_conv_helper<'a, Ty, C>( ...@@ -44,7 +44,7 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
Ty: TyAbiInterface<'a, C> + Copy, Ty: TyAbiInterface<'a, C> + Copy,
{ {
match arg_layout.abi { match arg_layout.abi {
Abi::Scalar(ref scalar) => match scalar.value { Abi::Scalar(scalar) => match scalar.value {
abi::Int(..) | abi::Pointer => { abi::Int(..) | abi::Pointer => {
if arg_layout.size.bits() > xlen { if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv); return Err(CannotUseFpConv);
...@@ -297,7 +297,7 @@ fn classify_arg<'a, Ty, C>( ...@@ -297,7 +297,7 @@ fn classify_arg<'a, Ty, C>(
} }
fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) { fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
if let Abi::Scalar(ref scalar) = arg.layout.abi { if let Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, _) = scalar.value { if let abi::Int(i, _) = scalar.value {
// 32-bit integers are always sign-extended // 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 { if i.size().bits() == 32 && xlen > 32 {
......
...@@ -18,7 +18,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool ...@@ -18,7 +18,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
C: HasDataLayout, C: HasDataLayout,
{ {
match layout.abi { match layout.abi {
abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), abi::Abi::Scalar(scalar) => scalar.value.is_float(),
abi::Abi::Aggregate { .. } => { abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0)) is_single_fp_element(cx, layout.field(cx, 0))
......
...@@ -14,7 +14,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool ...@@ -14,7 +14,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
C: HasDataLayout, C: HasDataLayout,
{ {
match layout.abi { match layout.abi {
abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), abi::Abi::Scalar(scalar) => scalar.value.is_float(),
abi::Abi::Aggregate { .. } => { abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0)) is_single_fp_element(cx, layout.field(cx, 0))
......
...@@ -49,7 +49,7 @@ fn classify<'a, Ty, C>( ...@@ -49,7 +49,7 @@ fn classify<'a, Ty, C>(
let mut c = match layout.abi { let mut c = match layout.abi {
Abi::Uninhabited => return Ok(()), Abi::Uninhabited => return Ok(()),
Abi::Scalar(ref scalar) => match scalar.value { Abi::Scalar(scalar) => match scalar.value {
abi::Int(..) | abi::Pointer => Class::Int, abi::Int(..) | abi::Pointer => Class::Int,
abi::F32 | abi::F64 => Class::Sse, abi::F32 | abi::F64 => Class::Sse,
}, },
......
...@@ -955,7 +955,7 @@ impl AddressSpace { ...@@ -955,7 +955,7 @@ impl AddressSpace {
/// Describes how values of the type are passed by target ABIs, /// Describes how values of the type are passed by target ABIs,
/// in terms of categories of C types there are ABI rules for. /// in terms of categories of C types there are ABI rules for.
#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum Abi { pub enum Abi {
Uninhabited, Uninhabited,
Scalar(Scalar), Scalar(Scalar),
...@@ -983,8 +983,8 @@ pub fn is_unsized(&self) -> bool { ...@@ -983,8 +983,8 @@ pub fn is_unsized(&self) -> bool {
/// Returns `true` if this is a single signed integer scalar /// Returns `true` if this is a single signed integer scalar
#[inline] #[inline]
pub fn is_signed(&self) -> bool { pub fn is_signed(&self) -> bool {
match *self { match self {
Abi::Scalar(ref scal) => match scal.value { Abi::Scalar(scal) => match scal.value {
Primitive::Int(_, signed) => signed, Primitive::Int(_, signed) => signed,
_ => false, _ => false,
}, },
...@@ -1053,7 +1053,7 @@ pub enum TagEncoding { ...@@ -1053,7 +1053,7 @@ pub enum TagEncoding {
}, },
} }
#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct Niche { pub struct Niche {
pub offset: Size, pub offset: Size,
pub scalar: Scalar, pub scalar: Scalar,
...@@ -1259,7 +1259,7 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool ...@@ -1259,7 +1259,7 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
Ty: TyAbiInterface<'a, C>, Ty: TyAbiInterface<'a, C>,
C: HasDataLayout, C: HasDataLayout,
{ {
let scalar_allows_raw_init = move |s: &Scalar| -> bool { let scalar_allows_raw_init = move |s: Scalar| -> bool {
if zero { if zero {
// The range must contain 0. // The range must contain 0.
s.valid_range.contains(0) s.valid_range.contains(0)
...@@ -1270,11 +1270,11 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool ...@@ -1270,11 +1270,11 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
}; };
// Check the ABI. // Check the ABI.
let valid = match &self.abi { let valid = match self.abi {
Abi::Uninhabited => false, // definitely UB Abi::Uninhabited => false, // definitely UB
Abi::Scalar(s) => scalar_allows_raw_init(s), Abi::Scalar(s) => scalar_allows_raw_init(s),
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s), Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
Abi::Aggregate { .. } => true, // Fields are checked below. Abi::Aggregate { .. } => true, // Fields are checked below.
}; };
if !valid { if !valid {
......
...@@ -465,9 +465,9 @@ fn virtual_call_violation_for_method<'tcx>( ...@@ -465,9 +465,9 @@ fn virtual_call_violation_for_method<'tcx>(
let param_env = tcx.param_env(method.def_id); let param_env = tcx.param_env(method.def_id);
let abi_of_ty = |ty: Ty<'tcx>| -> Option<&Abi> { let abi_of_ty = |ty: Ty<'tcx>| -> Option<Abi> {
match tcx.layout_of(param_env.and(ty)) { match tcx.layout_of(param_env.and(ty)) {
Ok(layout) => Some(&layout.abi), Ok(layout) => Some(layout.abi),
Err(err) => { Err(err) => {
// #78372 // #78372
tcx.sess.delay_span_bug( tcx.sess.delay_span_bug(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册