提交 5b2f757d 编写于 作者: A Andreas Liljeqvist

Make `abi::Abi` `Copy` and remove a *lot* of refs

fix

fix

Remove more refs and clones

fix

more

fix
上级 86ff6aeb
......@@ -92,9 +92,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
match self.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(attrs) => match &self.layout.abi {
PassMode::Direct(attrs) => match self.layout.abi {
Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs
)],
Abi::Vector { .. } => {
......@@ -103,10 +103,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi {
PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone());
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
smallvec![
apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
......@@ -139,9 +139,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
match self.mode {
PassMode::Ignore => (None, vec![]),
PassMode::Direct(_) => match &self.layout.abi {
PassMode::Direct(_) => match self.layout.abi {
Abi::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
......@@ -149,10 +149,10 @@ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>)
}
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Pair(_, _) => match &self.layout.abi {
PassMode::Pair(_, _) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone());
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
(None, vec![AbiParam::new(a), AbiParam::new(b)])
}
_ => unreachable!("{:?}", self.layout.abi),
......
......@@ -143,8 +143,8 @@
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
let (element, count) = match &layout.abi {
Abi::Vector { element, count } => (element.clone(), *count),
let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element, count),
_ => unreachable!(),
};
......
......@@ -49,11 +49,7 @@ fn codegen_field<'tcx>(
}
}
fn scalar_pair_calculate_b_offset(
tcx: TyCtxt<'_>,
a_scalar: &Scalar,
b_scalar: &Scalar,
) -> Offset32 {
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap())
}
......@@ -124,12 +120,10 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => {
scalar_to_clif_type(fx.tcx, element.clone())
.by(u16::try_from(count).unwrap())
.unwrap()
}
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u16::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
};
let mut flags = MemFlags::new();
......@@ -147,13 +141,13 @@ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Valu
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match &layout.abi {
let (a_scalar, b_scalar) = match layout.abi {
Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
let mut flags = MemFlags::new();
flags.set_notrap();
let val1 = ptr.load(fx, clif_ty1, flags);
......@@ -564,7 +558,7 @@ fn transmute_value<'tcx>(
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, flags);
......
......@@ -536,13 +536,13 @@ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll V
}
_ => {}
}
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
if !scalar.is_bool() && !scalar.is_always_valid_for(bx) {
bx.range_metadata(callsite, &scalar.valid_range);
bx.range_metadata(callsite, scalar.valid_range);
}
}
}
......
......@@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type.
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
match scalar.value {
Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(),
......@@ -812,7 +812,7 @@ fn llvm_fixup_input(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
......@@ -835,7 +835,7 @@ fn llvm_fixup_input(
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count);
let vec_ty = bx.cx.type_vector(elem_ty, count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
......@@ -890,7 +890,7 @@ fn llvm_fixup_output(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
bx.extract_element(value, bx.const_i32(0))
......@@ -910,8 +910,8 @@ fn llvm_fixup_output(
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
......@@ -965,7 +965,7 @@ fn llvm_fixup_output_type(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Type {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
cx.type_vector(cx.type_i8(), 8)
......
......@@ -382,7 +382,7 @@ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
val
}
}
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1());
}
......@@ -460,12 +460,12 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx
fn scalar_load_metadata<'a, 'll, 'tcx>(
bx: &mut Builder<'a, 'll, 'tcx>,
load: &'ll Value,
scalar: &abi::Scalar,
scalar: abi::Scalar,
) {
match scalar.value {
abi::Int(..) => {
if !scalar.is_always_valid_for(bx) {
bx.range_metadata(load, &scalar.valid_range);
bx.range_metadata(load, scalar.valid_range);
}
}
abi::Pointer if !scalar.valid_range.contains(0) => {
......@@ -488,17 +488,17 @@ fn scalar_load_metadata<'a, 'll, 'tcx>(
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
if let abi::Abi::Scalar(scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
load
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let pair_ty = place.layout.llvm_type(self);
let mut load = |i, scalar: &abi::Scalar, align| {
let mut load = |i, scalar: abi::Scalar, align| {
let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
......@@ -554,7 +554,7 @@ fn write_operand_repeatedly(
next_bx
}
fn range_metadata(&mut self, load: &'ll Value, range: &WrappingRange) {
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
if self.sess().target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks an i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
......
......@@ -228,7 +228,7 @@ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
})
}
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Int(ScalarInt::ZST) => {
......
......@@ -111,7 +111,7 @@ fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
&Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;
......
......@@ -1656,7 +1656,7 @@ fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDes
Variants::Multiple {
tag_encoding:
TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
ref tag,
tag,
ref variants,
tag_field,
} => {
......@@ -2082,10 +2082,8 @@ fn prepare_enum_metadata(
let layout = cx.layout_of(enum_type);
if let (
&Abi::Scalar(_),
&Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
) = (&layout.abi, &layout.variants)
if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
(layout.abi, &layout.variants)
{
return FinalMetadata(discriminant_type_metadata(tag.value));
}
......@@ -2093,8 +2091,8 @@ fn prepare_enum_metadata(
if use_enum_fallback(cx) {
let discriminant_type_metadata = match layout.variants {
Variants::Single { .. } => None,
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. }
| Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => {
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
| Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
Some(discriminant_type_metadata(tag.value))
}
};
......@@ -2146,9 +2144,7 @@ fn prepare_enum_metadata(
// A single-variant enum has no discriminant.
Variants::Single { .. } => None,
Variants::Multiple {
tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
} => {
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
// Find the integer type of the correct size.
let size = tag.value.size(cx);
let align = tag.value.align(cx);
......@@ -2179,7 +2175,7 @@ fn prepare_enum_metadata(
}
}
Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => {
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
let discr_type = tag.value.to_ty(cx.tcx);
let (size, align) = cx.size_and_align_of(discr_type);
......
......@@ -133,7 +133,7 @@ fn codegen_intrinsic_call(
}
sym::va_arg => {
match fn_abi.ret.layout.abi {
abi::Abi::Scalar(ref scalar) => {
abi::Abi::Scalar(scalar) => {
match scalar.value {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
......
......@@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
) -> &'a Type {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { ref element, count } => {
Abi::Vector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
return cx.type_vector(element, count);
}
......@@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> {
fn scalar_llvm_type_at<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar,
scalar: Scalar,
offset: Size,
) -> &'a Type;
fn scalar_pair_element_llvm_type<'a>(
......@@ -218,7 +218,7 @@ fn is_llvm_scalar_pair(&self) -> bool {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi {
if let Abi::Scalar(scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
......@@ -286,7 +286,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
}
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi {
if let Abi::Scalar(scalar) = self.abi {
if scalar.is_bool() {
return cx.type_i1();
}
......@@ -297,7 +297,7 @@ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
fn scalar_llvm_type_at<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar,
scalar: Scalar,
offset: Size,
) -> &'a Type {
match scalar.value {
......@@ -337,7 +337,7 @@ fn scalar_pair_element_llvm_type<'a>(
}
let (a, b) = match self.abi {
Abi::ScalarPair(ref a, ref b) => (a, b),
Abi::ScalarPair(a, b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
};
let scalar = [a, b][index];
......
......@@ -404,7 +404,7 @@ fn msvc_enum_fallback(
// calculate the range of values for the dataful variant
let dataful_discriminant_range =
&dataful_variant_layout.largest_niche.as_ref().unwrap().scalar.valid_range;
dataful_variant_layout.largest_niche.unwrap().scalar.valid_range;
let min = dataful_discriminant_range.start;
let min = tag.value.size(&tcx).truncate(min);
......
......@@ -1102,9 +1102,9 @@ fn codegen_argument(
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if scalar.is_bool() {
bx.range_metadata(llval, &WrappingRange { start: 0, end: 1 });
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}
}
// We store bools as `i8` so we need to truncate to `i1`.
......@@ -1424,7 +1424,7 @@ fn codegen_transmute_into(
let src = self.codegen_operand(bx, src);
// Special-case transmutes between scalars as simple bitcasts.
match (&src.layout.abi, &dst.layout.abi) {
match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) {
......
......@@ -68,7 +68,7 @@ pub fn simd_shuffle_indices(
if let Some(prim) = field.val.try_to_scalar() {
let layout = bx.layout_of(field_ty);
let scalar = match layout.abi {
Abi::Scalar(ref x) => x,
Abi::Scalar(x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
......
......@@ -79,7 +79,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let val = match val {
ConstValue::Scalar(x) => {
let scalar = match layout.abi {
Abi::Scalar(ref x) => x,
Abi::Scalar(x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout),
};
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
......@@ -87,7 +87,7 @@ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
ConstValue::Slice { data, start, end } => {
let a_scalar = match layout.abi {
Abi::ScalarPair(ref a, _) => a,
Abi::ScalarPair(a, _) => a,
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout),
};
let a = Scalar::from_pointer(
......@@ -162,7 +162,7 @@ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
llval: V,
layout: TyAndLayout<'tcx>,
) -> Self {
let val = if let Abi::ScalarPair(ref a, ref b) = layout.abi {
let val = if let Abi::ScalarPair(a, b) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
// Deconstruct the immediate aggregate.
......@@ -185,7 +185,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
let mut val = match (self.val, &self.layout.abi) {
let mut val = match (self.val, self.layout.abi) {
// If the field is ZST, it has no data.
_ if field.is_zst() => {
return OperandRef::new_zst(bx, field);
......@@ -200,7 +200,7 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), &Abi::ScalarPair(ref a, ref b)) => {
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(bx.cx()));
OperandValue::Immediate(a_llval)
......@@ -212,14 +212,14 @@ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &Abi::Vector { .. }) => {
(OperandValue::Immediate(llval), Abi::Vector { .. }) => {
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
};
match (&mut val, &field.abi) {
match (&mut val, field.abi) {
(OperandValue::Immediate(llval), _) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
......@@ -308,7 +308,7 @@ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
OperandValue::Pair(a, b) => {
let (a_scalar, b_scalar) = match dest.layout.abi {
Abi::ScalarPair(ref a, ref b) => (a, b),
Abi::ScalarPair(a, b) => (a, b),
_ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout),
};
let ty = bx.backend_type(dest.layout);
......
......@@ -99,7 +99,7 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
// Also handles the first field of Scalar, ScalarPair, and Vector layouts.
self.llval
}
Abi::ScalarPair(ref a, ref b)
Abi::ScalarPair(a, b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
{
// Offset matches second field.
......@@ -222,7 +222,7 @@ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
.map_or(index.as_u32() as u128, |discr| discr.val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
}
};
......
......@@ -124,13 +124,13 @@ fn checked_binop(
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
if let Abi::Scalar(ref scalar) = layout.abi {
if let Abi::Scalar(scalar) = layout.abi {
self.to_immediate_scalar(val, scalar)
} else {
val
}
}
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &Scalar) -> Self::Value;
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
......@@ -156,7 +156,7 @@ fn write_operand_repeatedly(
dest: PlaceRef<'tcx, Self::Value>,
) -> Self;
fn range_metadata(&mut self, load: Self::Value, range: &WrappingRange);
fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
......
......@@ -28,7 +28,7 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value;
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: Self::Type) -> Self::Value;
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
fn from_const_alloc(
&self,
layout: TyAndLayout<'tcx>,
......
......@@ -194,7 +194,7 @@ pub fn emulate_intrinsic(
let val = self.read_scalar(&args[0])?.check_init()?;
let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value,
Abi::Scalar(scalar) => scalar.value,
_ => span_bug!(
self.cur_span(),
"{} called on invalid type {:?}",
......
......@@ -274,11 +274,11 @@ fn try_read_immediate_from_mplace(
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
}
Abi::ScalarPair(ref a, ref b) => {
Abi::ScalarPair(a, b) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (&a.value, &b.value);
let (a, b) = (a.value, b.value);
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
......@@ -648,7 +648,7 @@ pub fn read_discriminant(
};
return Ok((discr, index));
}
Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
}
};
......
......@@ -752,7 +752,7 @@ fn write_immediate_to_mplace_no_validate(
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi {
Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
Abi::ScalarPair(a, b) => (a.value, b.value),
_ => span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
......@@ -1000,7 +1000,7 @@ pub fn write_discriminant(
}
Variants::Multiple {
tag_encoding: TagEncoding::Direct,
tag: ref tag_layout,
tag: tag_layout,
tag_field,
..
} => {
......@@ -1022,7 +1022,7 @@ pub fn write_discriminant(
Variants::Multiple {
tag_encoding:
TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
tag: ref tag_layout,
tag: tag_layout,
tag_field,
..
} => {
......
......@@ -187,17 +187,14 @@ fn check_argument_compat(
return false;
}
// Compare layout
match (&caller.abi, &callee.abi) {
match (caller.abi, callee.abi) {
// Different valid ranges are okay (once we enforce validity,
// that will take care to make it UB to leave the range, just
// like for transmute).
(abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => {
caller.value == callee.value
(abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => caller.value == callee.value,
(abi::Abi::ScalarPair(caller1, caller2), abi::Abi::ScalarPair(callee1, callee2)) => {
caller1.value == callee1.value && caller2.value == callee2.value
}
(
abi::Abi::ScalarPair(ref caller1, ref caller2),
abi::Abi::ScalarPair(ref callee1, ref callee2),
) => caller1.value == callee1.value && caller2.value == callee2.value,
// Be conservative
_ => false,
}
......
......@@ -618,7 +618,7 @@ fn try_visit_primitive(
fn visit_scalar(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: &ScalarAbi,
scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> {
if scalar_layout.valid_range.is_full_for(op.layout.size) {
// Nothing to check
......@@ -784,7 +784,7 @@ fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{ "a value of uninhabited type {:?}", op.layout.ty }
);
}
Abi::Scalar(ref scalar_layout) => {
Abi::Scalar(scalar_layout) => {
self.visit_scalar(op, scalar_layout)?;
}
Abi::ScalarPair { .. } | Abi::Vector { .. } => {
......
......@@ -1327,10 +1327,7 @@ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
};
let (variants, tag) = match layout.variants {
Variants::Multiple {
tag_encoding: TagEncoding::Direct,
ref tag,
ref variants,
..
tag_encoding: TagEncoding::Direct, tag, ref variants, ..
} => (variants, tag),
_ => return,
};
......
......@@ -290,9 +290,9 @@ fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum.
let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
let largest_niche = Niche::from_scalar(dl, b_offset, b)
.into_iter()
.chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
.chain(Niche::from_scalar(dl, Size::ZERO, a))
.max_by_key(|niche| niche.available(dl));
Layout {
......@@ -401,7 +401,7 @@ fn univariant_uninterned(
offsets[i as usize] = offset;
if !repr.hide_niche() {
if let Some(mut niche) = field.largest_niche.clone() {
if let Some(mut niche) = field.largest_niche {
let available = niche.available(dl);
if available > largest_niche_available {
largest_niche_available = available;
......@@ -449,12 +449,12 @@ fn univariant_uninterned(
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi.clone();
abi = field.abi;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi.clone();
abi = field.abi;
}
_ => {}
}
......@@ -463,14 +463,14 @@ fn univariant_uninterned(
// Two non-ZST fields, and they're both scalars.
(
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
None,
) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) =
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
let pair = self.scalar_pair(a.clone(), b.clone());
let pair = self.scalar_pair(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
......@@ -609,7 +609,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Abi::Aggregate { sized: true }
};
let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
let largest_niche = if count != 0 { element.largest_niche } else { None };
tcx.intern_layout(Layout {
variants: Variants::Single { index: VariantIdx::new(0) },
......@@ -768,8 +768,8 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// Compute the ABI of the element type:
let e_ly = self.layout_of(e_ty)?;
let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
scalar.clone()
let e_abi = if let Abi::Scalar(scalar) = e_ly.abi {
scalar
} else {
// This error isn't caught in typeck, e.g., if
// the element type of the vector is generic.
......@@ -796,7 +796,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
variants: Variants::Single { index: VariantIdx::new(0) },
fields,
abi: Abi::Vector { element: e_abi, count: e_len },
largest_niche: e_ly.largest_niche.clone(),
largest_niche: e_ly.largest_niche,
size,
align,
})
......@@ -843,13 +843,13 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() {
// Normalize scalar_unit to the maximal valid range
let field_abi = match &field.abi {
let field_abi = match field.abi {
Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
Abi::ScalarPair(x, y) => {
Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
}
Abi::Vector { element: x, count } => {
Abi::Vector { element: scalar_unit(x.value), count: *count }
Abi::Vector { element: scalar_unit(x.value), count }
}
Abi::Uninhabited | Abi::Aggregate { .. } => {
Abi::Aggregate { sized: true }
......@@ -970,7 +970,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Niche::from_scalar(dl, Size::ZERO, scalar.clone())
};
if let Some(niche) = niche {
match &st.largest_niche {
match st.largest_niche {
Some(largest_niche) => {
// Replace the existing niche even if they're equal,
// because this one is at a lower offset.
......@@ -1045,7 +1045,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
let niche_candidate = variants[i]
.iter()
.enumerate()
.filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
.filter_map(|(j, field)| Some((j, field.largest_niche?)))
.max_by_key(|(_, niche)| niche.available(dl));
if let Some((field_index, niche, (niche_start, niche_scalar))) =
......@@ -1078,31 +1078,24 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
Abi::Uninhabited
} else {
match st[i].abi {
Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
Abi::ScalarPair(ref first, ref second) => {
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
Abi::ScalarPair(first, second) => {
// We need to use scalar_unit to reset the
// valid range to the maximal one for that
// primitive, because only the niche is
// guaranteed to be initialised, not the
// other primitive.
if offset.bytes() == 0 {
Abi::ScalarPair(
niche_scalar.clone(),
scalar_unit(second.value),
)
Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
} else {
Abi::ScalarPair(
scalar_unit(first.value),
niche_scalar.clone(),
)
Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
}
}
_ => Abi::Aggregate { sized: true },
}
};
let largest_niche =
Niche::from_scalar(dl, offset, niche_scalar.clone());
let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
niche_filling_layout = Some(Layout {
variants: Variants::Multiple {
......@@ -1283,7 +1276,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
};
let mut abi = Abi::Aggregate { sized: true };
if tag.value.size(dl) == size {
abi = Abi::Scalar(tag.clone());
abi = Abi::Scalar(tag);
} else {
// Try to use a ScalarPair for all tagged enums.
let mut common_prim = None;
......@@ -1303,7 +1296,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
}
};
let prim = match field.abi {
Abi::Scalar(ref scalar) => scalar.value,
Abi::Scalar(scalar) => scalar.value,
_ => {
common_prim = None;
break;
......@@ -1323,7 +1316,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
}
}
if let Some((prim, offset)) = common_prim {
let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
let pair = self.scalar_pair(tag, scalar_unit(prim));
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
......@@ -1347,7 +1340,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
abi = Abi::Uninhabited;
}
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
let tagged_layout = Layout {
variants: Variants::Multiple {
......@@ -1372,8 +1365,7 @@ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'
// pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen.
cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
let niche_size =
layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
(layout.size, cmp::Reverse(niche_size))
})
}
......@@ -1560,7 +1552,7 @@ fn generator_layout(
value: Primitive::Int(discr_int, false),
valid_range: WrappingRange { start: 0, end: max_discr },
};
let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
let promoted_layouts = ineligible_locals
......@@ -1832,7 +1824,7 @@ fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
}
}
Variants::Multiple { ref tag, ref tag_encoding, .. } => {
Variants::Multiple { tag, ref tag_encoding, .. } => {
debug!(
"print-type-size `{:#?}` adt general variants def {}",
layout.ty,
......@@ -2240,7 +2232,7 @@ fn field_ty_or_layout(
i: usize,
) -> TyMaybeWithLayout<'tcx> {
let tcx = cx.tcx();
let tag_layout = |tag: &Scalar| -> TyAndLayout<'tcx> {
let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
let layout = Layout::scalar(cx, tag.clone());
TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
};
......@@ -2329,7 +2321,7 @@ fn field_ty_or_layout(
.nth(i)
.unwrap(),
),
Variants::Multiple { ref tag, tag_field, .. } => {
Variants::Multiple { tag, tag_field, .. } => {
if i == tag_field {
return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
}
......@@ -2347,7 +2339,7 @@ fn field_ty_or_layout(
}
// Discriminant field for enums (where applicable).
Variants::Multiple { ref tag, .. } => {
Variants::Multiple { tag, .. } => {
assert_eq!(i, 0);
return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
}
......@@ -2906,7 +2898,7 @@ fn new_internal(
// Handle safe Rust thin and fat pointers.
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
scalar: &Scalar,
scalar: Scalar,
layout: TyAndLayout<'tcx>,
offset: Size,
is_return: bool| {
......
......@@ -3,7 +3,7 @@
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
......@@ -23,7 +23,7 @@ fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
C: HasDataLayout,
{
match ret.layout.field(cx, i).abi {
abi::Abi::Scalar(ref scalar) => match scalar.value {
abi::Abi::Scalar(scalar) => match scalar.value {
abi::F32 => Some(Reg::f32()),
abi::F64 => Some(Reg::f64()),
_ => None,
......@@ -107,7 +107,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
let offset = arg.layout.fields.offset(i);
// We only care about aligned doubles
if let abi::Abi::Scalar(ref scalar) = field.abi {
if let abi::Abi::Scalar(scalar) = field.abi {
if let abi::F64 = scalar.value {
if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset)
......
......@@ -322,7 +322,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
Abi::Uninhabited => Err(Heterogeneous),
// The primitive for this algorithm.
Abi::Scalar(ref scalar) => {
Abi::Scalar(scalar) => {
let kind = match scalar.value {
abi::Int(..) | abi::Pointer => RegKind::Integer,
abi::F32 | abi::F64 => RegKind::Float,
......@@ -450,9 +450,9 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn new(
cx: &impl HasDataLayout,
layout: TyAndLayout<'a, Ty>,
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, &abi::Scalar, Size) -> ArgAttributes,
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
) -> Self {
let mode = match &layout.abi {
let mode = match layout.abi {
Abi::Uninhabited => PassMode::Ignore,
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
Abi::ScalarPair(a, b) => PassMode::Pair(
......@@ -504,7 +504,7 @@ pub fn make_indirect_byval(&mut self) {
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
if let Abi::Scalar(ref scalar) = self.layout.abi {
if let Abi::Scalar(scalar) = self.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode {
......
......@@ -44,7 +44,7 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
Ty: TyAbiInterface<'a, C> + Copy,
{
match arg_layout.abi {
Abi::Scalar(ref scalar) => match scalar.value {
Abi::Scalar(scalar) => match scalar.value {
abi::Int(..) | abi::Pointer => {
if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv);
......@@ -297,7 +297,7 @@ fn classify_arg<'a, Ty, C>(
}
fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
if let Abi::Scalar(ref scalar) = arg.layout.abi {
if let Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, _) = scalar.value {
// 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 {
......
......@@ -18,7 +18,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
C: HasDataLayout,
{
match layout.abi {
abi::Abi::Scalar(ref scalar) => scalar.value.is_float(),
abi::Abi::Scalar(scalar) => scalar.value.is_float(),
abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
......
......@@ -14,7 +14,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
C: HasDataLayout,
{
match layout.abi {
abi::Abi::Scalar(ref scalar) => scalar.value.is_float(),
abi::Abi::Scalar(scalar) => scalar.value.is_float(),
abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
......
......@@ -49,7 +49,7 @@ fn classify<'a, Ty, C>(
let mut c = match layout.abi {
Abi::Uninhabited => return Ok(()),
Abi::Scalar(ref scalar) => match scalar.value {
Abi::Scalar(scalar) => match scalar.value {
abi::Int(..) | abi::Pointer => Class::Int,
abi::F32 | abi::F64 => Class::Sse,
},
......
......@@ -955,7 +955,7 @@ impl AddressSpace {
/// Describes how values of the type are passed by target ABIs,
/// in terms of categories of C types there are ABI rules for.
#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum Abi {
Uninhabited,
Scalar(Scalar),
......@@ -983,8 +983,8 @@ pub fn is_unsized(&self) -> bool {
/// Returns `true` if this is a single signed integer scalar
#[inline]
pub fn is_signed(&self) -> bool {
match *self {
Abi::Scalar(ref scal) => match scal.value {
match self {
Abi::Scalar(scal) => match scal.value {
Primitive::Int(_, signed) => signed,
_ => false,
},
......@@ -1053,7 +1053,7 @@ pub enum TagEncoding {
},
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct Niche {
pub offset: Size,
pub scalar: Scalar,
......@@ -1259,7 +1259,7 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
Ty: TyAbiInterface<'a, C>,
C: HasDataLayout,
{
let scalar_allows_raw_init = move |s: &Scalar| -> bool {
let scalar_allows_raw_init = move |s: Scalar| -> bool {
if zero {
// The range must contain 0.
s.valid_range.contains(0)
......@@ -1270,11 +1270,11 @@ pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
};
// Check the ABI.
let valid = match &self.abi {
let valid = match self.abi {
Abi::Uninhabited => false, // definitely UB
Abi::Scalar(s) => scalar_allows_raw_init(s),
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s),
Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
Abi::Aggregate { .. } => true, // Fields are checked below.
};
if !valid {
......
......@@ -465,9 +465,9 @@ fn virtual_call_violation_for_method<'tcx>(
let param_env = tcx.param_env(method.def_id);
let abi_of_ty = |ty: Ty<'tcx>| -> Option<&Abi> {
let abi_of_ty = |ty: Ty<'tcx>| -> Option<Abi> {
match tcx.layout_of(param_env.and(ty)) {
Ok(layout) => Some(&layout.abi),
Ok(layout) => Some(layout.abi),
Err(err) => {
// #78372
tcx.sess.delay_span_bug(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册