提交 d00d42d0 编写于 作者: E Eduard-Mihai Burtescu

rustc_target: pass contexts by reference, not value.

上级 ca4fa6f5
......@@ -783,11 +783,11 @@ pub fn current_lint_root(&self) -> ast::NodeId {
}
}
impl<'a, 'tcx> LayoutOf for &'a LateContext<'a, 'tcx> {
impl<'a, 'tcx> LayoutOf for LateContext<'a, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
}
}
......
......@@ -84,7 +84,7 @@ fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>) {
// `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(self.tcx.global_tcx(), from);
if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
if size_to == Pointer.size(self.tcx) {
if size_to == Pointer.size(&self.tcx) {
struct_span_err!(self.tcx.sess, span, E0591,
"can't transmute zero-sized type")
.note(&format!("source type: {}", from))
......
......@@ -86,18 +86,18 @@ pub trait PointerArithmetic: layout::HasDataLayout {
// These are not supposed to be overridden.
#[inline(always)]
fn pointer_size(self) -> Size {
fn pointer_size(&self) -> Size {
self.data_layout().pointer_size
}
//// Trunace the given value to the pointer size; also return whether there was an overflow
fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
fn truncate_to_ptr(&self, val: u128) -> (u64, bool) {
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
}
// Overflow checking only works properly on the range from -u64 to +u64.
fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
......@@ -109,23 +109,23 @@ fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
}
}
fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
let (res, over1) = val.overflowing_add(i);
let (res, over2) = self.truncate_to_ptr(res as u128);
(res, over1 || over2)
}
fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i as i128);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_offset(val, i);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
fn wrapping_signed_offset(&self, val: u64, i: i64) -> u64 {
self.overflowing_signed_offset(val, i as i128).0
}
}
......@@ -176,7 +176,7 @@ pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self {
Pointer { alloc_id, offset, tag }
}
pub fn wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
......@@ -184,12 +184,12 @@ pub fn wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
)
}
pub fn overflowing_signed_offset(self, i: i128, cx: impl HasDataLayout) -> (Self, bool) {
pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
......@@ -197,12 +197,12 @@ pub fn signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, S
))
}
pub fn overflowing_offset(self, i: Size, cx: impl HasDataLayout) -> (Self, bool) {
pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
......
......@@ -65,7 +65,7 @@ pub fn try_to_ptr(&self) -> Option<Pointer> {
pub fn new_slice(
val: Scalar,
len: u64,
cx: impl HasDataLayout
cx: &impl HasDataLayout
) -> Self {
ConstValue::ScalarPair(val, Scalar::Bits {
bits: len as u128,
......@@ -121,7 +121,7 @@ pub fn erase_tag(self) -> Scalar {
}
#[inline]
pub fn ptr_null(cx: impl HasDataLayout) -> Self {
pub fn ptr_null(cx: &impl HasDataLayout) -> Self {
Scalar::Bits {
bits: 0,
size: cx.data_layout().pointer_size.bytes() as u8,
......@@ -134,52 +134,52 @@ pub fn zst() -> Self {
}
#[inline]
pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.pointer_size.bytes());
assert_eq!(size as u64, dl.pointer_size.bytes());
Ok(Scalar::Bits {
bits: layout.signed_offset(bits as u64, i)? as u128,
bits: dl.signed_offset(bits as u64, i)? as u128,
size,
})
}
Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr),
Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr),
}
}
#[inline]
pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.pointer_size.bytes());
assert_eq!(size as u64, dl.pointer_size.bytes());
Ok(Scalar::Bits {
bits: layout.offset(bits as u64, i.bytes())? as u128,
bits: dl.offset(bits as u64, i.bytes())? as u128,
size,
})
}
Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr),
Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr),
}
}
#[inline]
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
let layout = cx.data_layout();
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.pointer_size.bytes());
assert_eq!(size as u64, dl.pointer_size.bytes());
Scalar::Bits {
bits: layout.wrapping_signed_offset(bits as u64, i) as u128,
bits: dl.wrapping_signed_offset(bits as u64, i) as u128,
size,
}
}
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)),
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)),
}
}
#[inline]
pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
pub fn is_null_ptr(self, cx: &impl HasDataLayout) -> bool {
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
......@@ -301,7 +301,7 @@ pub fn to_u64(self) -> EvalResult<'static, u64> {
Ok(b as u64)
}
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64)
......@@ -331,7 +331,7 @@ pub fn to_i64(self) -> EvalResult<'static, i64> {
Ok(b as i64)
}
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
let b = sign_extend(b, cx.data_layout().pointer_size) as i128;
assert_eq!(b as i64 as i128, b);
......
......@@ -428,7 +428,7 @@ fn on_unimplemented_note(
));
let tcx = self.tcx;
if let Some(len) = len.val.try_to_scalar().and_then(|scalar| {
scalar.to_usize(tcx).ok()
scalar.to_usize(&tcx).ok()
}) {
flags.push((
"_Self".to_owned(),
......
......@@ -30,7 +30,7 @@
pub trait IntegerExt {
fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
repr: &ReprOptions,
......@@ -56,7 +56,7 @@ fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>
}
/// Get the Integer type from an attr::IntType.
fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
let dl = cx.data_layout();
match ity {
......@@ -92,7 +92,7 @@ fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let min_default = I8;
if let Some(ity) = repr.int {
let discr = Integer::from_attr(tcx, ity);
let discr = Integer::from_attr(&tcx, ity);
let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
if discr < fit {
bug!("Integer::repr_discr: `#[repr]` hint too small for \
......@@ -202,14 +202,13 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) {
};
}
#[derive(Copy, Clone)]
pub struct LayoutCx<'tcx, C> {
pub tcx: C,
pub param_env: ty::ParamEnv<'tcx>
}
impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
fn layout_raw_uncached(self, ty: Ty<'tcx>)
fn layout_raw_uncached(&self, ty: Ty<'tcx>)
-> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
let tcx = self.tcx;
let param_env = self.param_env;
......@@ -899,7 +898,7 @@ enum StructKind {
let (mut min, mut max) = (i128::max_value(), i128::min_value());
let discr_type = def.repr.discr_type();
let bits = Integer::from_attr(tcx, discr_type).size().bits();
let bits = Integer::from_attr(self, discr_type).size().bits();
for (i, discr) in def.discriminants(tcx).enumerate() {
if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
continue;
......@@ -1141,7 +1140,7 @@ enum StructKind {
/// This is invoked by the `layout_raw` query to record the final
/// layout of each type.
#[inline]
fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
// If we are running with `-Zprint-type-sizes`, record layouts for
// dumping later. Ignore layouts that are done with non-empty
// environments or non-monomorphic layouts, as the user only wants
......@@ -1158,7 +1157,7 @@ fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
self.record_layout_for_printing_outlined(layout)
}
fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
// (delay format until we actually need it)
let record = |kind, packed, opt_discr_size, variants| {
let type_desc = format!("{:?}", layout.ty);
......@@ -1478,7 +1477,7 @@ impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
let param_env = self.param_env.with_reveal_all();
let ty = self.tcx.normalize_erasing_regions(param_env, ty);
let details = self.tcx.layout_raw(param_env.and(ty))?;
......@@ -1505,7 +1504,7 @@ impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>>
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
let param_env = self.param_env.with_reveal_all();
let ty = self.tcx.normalize_erasing_regions(param_env, ty);
let details = self.tcx.layout_raw(param_env.and(ty))?;
......@@ -1563,7 +1562,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
C::TyLayout: MaybeResult<TyLayout<'tcx>>
{
fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: usize) -> TyLayout<'tcx> {
let details = match this.variants {
Variants::Single { index } if index == variant_index => this.details,
......@@ -1602,7 +1601,7 @@ fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'t
}
}
fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
let tcx = cx.tcx();
cx.layout_of(match this.ty.sty {
ty::Bool |
......@@ -1699,7 +1698,7 @@ fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
Variants::Tagged { tag: ref discr, .. } |
Variants::NicheFilling { niche: ref discr, .. } => {
assert_eq!(i, 0);
let layout = LayoutDetails::scalar(tcx, discr.clone());
let layout = LayoutDetails::scalar(cx, discr.clone());
return MaybeResult::from_ok(TyLayout {
details: tcx.intern_layout(layout),
ty: discr.value.to_ty(tcx)
......@@ -1725,7 +1724,7 @@ struct Niche {
impl Niche {
fn reserve<'a, 'tcx>(
&self,
cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
count: u128,
) -> Option<(u128, Scalar)> {
if count > self.available {
......@@ -1745,7 +1744,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
/// Find the offset of a niche leaf field, starting from
/// the given type and recursing through aggregates.
// FIXME(eddyb) traverse already optimized enums.
fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
let scalar_niche = |scalar: &Scalar, offset| {
let Scalar { value, valid_range: ref v } = *scalar;
......
......@@ -43,7 +43,7 @@ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.ty.sty {
ty::Int(ity) => {
let bits = ty::tls::with(|tcx| {
Integer::from_attr(tcx, SignedInt(ity)).size().bits()
Integer::from_attr(&tcx, SignedInt(ity)).size().bits()
});
let x = self.val as i128;
// sign extend the raw representation to be an i128
......@@ -62,8 +62,8 @@ pub fn wrap_incr<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self {
}
pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) {
let (int, signed) = match self.ty.sty {
Int(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
Uint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
_ => bug!("non integer discriminant"),
};
......
......@@ -446,29 +446,29 @@ pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
}
}
impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> {
impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> {
impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target.target
}
}
impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx
}
}
impl LayoutOf for &'a CodegenCx<'ll, 'tcx> {
impl LayoutOf for CodegenCx<'ll, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty))
.unwrap_or_else(|e| if let LayoutError::SizeOverflow(_) = e {
self.sess().fatal(&e.to_string())
......
......@@ -87,8 +87,8 @@ pub fn scalar_to_llvm(
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let layout = cx.data_layout();
let pointer_size = layout.pointer_size.bytes() as usize;
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
......@@ -99,7 +99,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
layout.endian,
dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
......
......@@ -377,13 +377,13 @@ fn report_bin_hex_error(
let (t, actually) = match ty {
ty::Int(t) => {
let ity = attr::IntType::SignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) as i128 >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
ty::Uint(t) => {
let ity = attr::IntType::UnsignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
......@@ -829,7 +829,7 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
Ok(layout) => {
let variants = &layout.variants;
if let layout::Variants::Tagged { ref variants, ref tag, .. } = variants {
let discr_size = tag.value.size(cx.tcx).bytes();
let discr_size = tag.value.size(&cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, layout.size.bytes(), layout);
......
......@@ -165,7 +165,7 @@ pub fn const_eval_literal(
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx)
ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &self.tcx)
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
......
......@@ -669,14 +669,14 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>,
}
ty::Int(ity) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
let bits = Integer::from_attr(cx.tcx, SignedInt(ity)).size().bits() as u128;
let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128;
let min = 1u128 << (bits - 1);
let max = (1u128 << (bits - 1)) - 1;
vec![ConstantRange(min, max, pcx.ty, RangeEnd::Included)]
}
ty::Uint(uty) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
let bits = Integer::from_attr(cx.tcx, UnsignedInt(uty)).size().bits() as u128;
let bits = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size().bits() as u128;
let max = !0u128 >> (128 - bits);
vec![ConstantRange(0, max, pcx.ty, RangeEnd::Included)]
}
......@@ -862,7 +862,7 @@ fn from_pat(tcx: TyCtxt<'_, 'tcx, 'tcx>,
fn signed_bias(tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>) -> u128 {
match ty.sty {
ty::Int(ity) => {
let bits = Integer::from_attr(tcx, SignedInt(ity)).size().bits() as u128;
let bits = Integer::from_attr(&tcx, SignedInt(ity)).size().bits() as u128;
1u128 << (bits - 1)
}
_ => 0
......
......@@ -1313,7 +1313,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind,
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx)
ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx)
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
......
......@@ -331,7 +331,7 @@ fn unsize_into_ptr(
let val = Immediate::new_slice(
ptr,
length.unwrap_usize(self.tcx.tcx),
self.tcx.tcx,
self,
);
self.write_immediate(val, dest)
}
......@@ -394,7 +394,7 @@ fn unsize_into(
src_field.into()
}
Err(..) => {
let src_field_layout = src.layout.field(&self, i)?;
let src_field_layout = src.layout.field(self, i)?;
// this must be a field covering the entire thing
assert_eq!(src.layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field_layout.size, src.layout.size);
......
......@@ -139,8 +139,8 @@ pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand<Tag>> {
}
}
impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for &'b EvalContext<'a, 'mir, 'tcx, M>
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for EvalContext<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
......@@ -148,16 +148,7 @@ fn data_layout(&self) -> &layout::TargetDataLayout {
}
}
impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'b, 'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'b EvalContext<'a, 'mir, 'tcx, M>
impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'a, 'mir, 'tcx>
{
#[inline]
......@@ -166,40 +157,19 @@ fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> {
}
}
impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> layout::HasTyCtxt<'tcx>
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
{
#[inline]
fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> {
*self.tcx
}
}
impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
for &'b EvalContext<'a, 'mir, 'tcx, M>
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
for EvalContext<'a, 'mir, 'tcx, M>
{
type Ty = Ty<'tcx>;
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
#[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
.map_err(|layout| EvalErrorKind::Layout(layout).into())
}
}
impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
{
type Ty = Ty<'tcx>;
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
#[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(&**self).layout_of(ty)
}
}
impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
pub fn new(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
......@@ -335,7 +305,7 @@ pub fn layout_of_local(
pub fn str_to_immediate(&mut self, s: &str) -> EvalResult<'tcx, Immediate<M::PointerTag>> {
let ptr = self.memory.allocate_static_bytes(s.as_bytes()).with_default_tag();
Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self))
}
/// Return the actual dynamic size and alignment of the place at the given type.
......
......@@ -77,16 +77,8 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for &'b Memory<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for &'b &'c mut Memory<'a, 'mir, 'tcx, M>
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
for Memory<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
......
......@@ -112,7 +112,7 @@ pub fn to_u64(self) -> EvalResult<'tcx, u64> {
}
#[inline(always)]
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_usize(cx)
}
......@@ -132,7 +132,7 @@ pub fn to_i64(self) -> EvalResult<'tcx, i64> {
}
#[inline(always)]
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> {
pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_isize(cx)
}
}
......@@ -178,7 +178,7 @@ pub fn erase_tag(self) -> Immediate
pub fn new_slice(
val: Scalar<Tag>,
len: u64,
cx: impl HasDataLayout
cx: &impl HasDataLayout
) -> Self {
Immediate::ScalarPair(
val.into(),
......@@ -743,7 +743,7 @@ pub fn read_discriminant(
.ty_adt_def().expect("tagged layout corresponds to adt")
.repr
.discr_type();
let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
let discr_ty = layout::Integer::from_attr(self, discr_ty);
let shift = 128 - discr_ty.size().bits();
let truncatee = sexted as u128;
(truncatee << shift) >> shift
......
......@@ -128,7 +128,7 @@ pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
pub fn null(cx: impl HasDataLayout) -> Self {
pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap())
}
......@@ -156,7 +156,7 @@ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else
#[inline]
pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self {
pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
MPlaceTy {
mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi(), cx.pointer_size()),
......@@ -172,7 +172,7 @@ fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
}
#[inline]
pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
pub(super) fn len(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.sty {
......@@ -217,7 +217,7 @@ pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
pub fn null(cx: impl HasDataLayout) -> Self {
pub fn null(cx: &impl HasDataLayout) -> Self {
Place::Ptr(MemPlace::null(cx))
}
......@@ -510,7 +510,7 @@ pub fn place_downcast(
Place::Ptr(mplace) =>
self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(),
Place::Local { .. } => {
let layout = base.layout.for_variant(&self, variant);
let layout = base.layout.for_variant(self, variant);
PlaceTy { layout, ..base }
}
})
......@@ -738,10 +738,10 @@ fn write_immediate_to_mplace_no_validate(
_ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout)
};
let (a_size, b_size) = (a.size(&self), b.size(&self));
let (a_align, b_align) = (a.align(&self), b.align(&self));
let (a_size, b_size) = (a.size(self), b.size(self));
let (a_align, b_align) = (a.align(self), b.align(self));
let b_offset = a_size.abi_align(b_align);
let b_ptr = ptr.offset(b_offset, &self)?.into();
let b_ptr = ptr.offset(b_offset, self)?.into();
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
......@@ -896,7 +896,7 @@ pub fn allocate(
if layout.is_unsized() {
assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type");
// FIXME: What should we do here? We should definitely also tag!
Ok(MPlaceTy::dangling(layout, &self))
Ok(MPlaceTy::dangling(layout, self))
} else {
let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
let ptr = M::tag_new_allocation(self, ptr, kind)?;
......@@ -923,7 +923,7 @@ pub fn write_discriminant_index(
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
let size = tag.value.size(self.tcx.tcx);
let size = tag.value.size(self);
let shift = 128 - size.bits();
let discr_val = (discr_val << shift) >> shift;
......
......@@ -217,7 +217,7 @@ fn eval_rvalue_into_place(
Repeat(ref operand, _) => {
let op = self.eval_operand(operand, None)?;
let dest = self.force_allocation(dest)?;
let length = dest.len(&self)?;
let length = dest.len(self)?;
if length > 0 {
// write the first
......@@ -227,7 +227,7 @@ fn eval_rvalue_into_place(
if length > 1 {
// copy the rest
let (dest, dest_align) = first.to_scalar_ptr_align();
let rest = dest.ptr_offset(first.layout.size, &self)?;
let rest = dest.ptr_offset(first.layout.size, self)?;
self.memory.copy_repeatedly(
dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
)?;
......@@ -239,7 +239,7 @@ fn eval_rvalue_into_place(
// FIXME(CTFE): don't allow computing the length of arrays in const eval
let src = self.eval_place(place)?;
let mplace = self.force_allocation(src)?;
let len = mplace.len(&self)?;
let len = mplace.len(self)?;
let size = self.pointer_size();
self.write_scalar(
Scalar::from_uint(len, size),
......
......@@ -405,7 +405,7 @@ fn eval_fn_call(
let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
vtable.offset(ptr_size * (idx as u64 + 3), self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
......@@ -416,7 +416,7 @@ fn eval_fn_call(
let mut args = args.to_vec();
let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?;
args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?;
args[0].op = Operand::Immediate(Immediate::Scalar(ptr.ptr.into())); // strip vtable
trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function
......@@ -455,7 +455,7 @@ fn drop_in_place(
};
let ty = self.tcx.mk_unit(); // return type is ()
let dest = MPlaceTy::dangling(self.layout_of(ty)?, &self);
let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
self.eval_fn_call(
instance,
......
......@@ -60,9 +60,9 @@ pub fn get_vtable(
let drop = self.memory.create_fn_alloc(drop).with_default_tag();
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
let size_ptr = vtable.offset(ptr_size, self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align,
Scalar::from_uint(align, ptr_size).into())?;
......@@ -70,7 +70,7 @@ pub fn get_vtable(
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
......
......@@ -87,23 +87,23 @@ struct ConstPropagator<'a, 'mir, 'tcx:'a+'mir> {
param_env: ParamEnv<'tcx>,
}
impl<'a, 'b, 'tcx> LayoutOf for &'a ConstPropagator<'a, 'b, 'tcx> {
impl<'a, 'b, 'tcx> LayoutOf for ConstPropagator<'a, 'b, 'tcx> {
type Ty = ty::Ty<'tcx>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: ty::Ty<'tcx>) -> Self::TyLayout {
fn layout_of(&self, ty: ty::Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
}
}
impl<'a, 'b, 'tcx> HasDataLayout for &'a ConstPropagator<'a, 'b, 'tcx> {
impl<'a, 'b, 'tcx> HasDataLayout for ConstPropagator<'a, 'b, 'tcx> {
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for &'a ConstPropagator<'a, 'b, 'tcx> {
impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for ConstPropagator<'a, 'b, 'tcx> {
#[inline]
fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
self.tcx
......
......@@ -11,7 +11,7 @@
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
......@@ -41,7 +41,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -75,7 +75,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -109,7 +109,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
arg.make_indirect();
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -11,21 +11,21 @@
use abi::call::{ArgType, FnType, };
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(_tuncx: C, ret: &mut ArgType<'a, Ty>)
fn classify_ret_ty<'a, Ty, C>(_cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.extend_integer_width_to(32);
}
fn classify_arg_ty<'a, Ty, C>(_cx: C, arg: &mut ArgType<'a, Ty>)
fn classify_arg_ty<'a, Ty, C>(_cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.extend_integer_width_to(32);
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -12,7 +12,7 @@
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
......@@ -42,7 +42,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -77,7 +77,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -101,7 +101,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
......
......@@ -16,7 +16,7 @@
// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -42,7 +42,7 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -11,7 +11,7 @@
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
......@@ -22,7 +22,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
......@@ -44,7 +44,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
......
......@@ -27,7 +27,7 @@ fn extend_integer_width_mips<Ty>(arg: &mut ArgType<Ty>, bits: u64) {
arg.extend_integer_width_to(bits);
}
fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -41,7 +41,7 @@ fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
}
}
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -83,7 +83,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -151,7 +151,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -137,7 +137,7 @@ impl Reg {
}
impl Reg {
pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
let dl = cx.data_layout();
match self.kind {
RegKind::Integer => {
......@@ -188,7 +188,7 @@ fn from(unit: Reg) -> Uniform {
}
impl Uniform {
pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.unit.align(cx)
}
}
......@@ -225,12 +225,12 @@ pub fn pair(a: Reg, b: Reg) -> CastTarget {
}
}
pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
(self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
.abi_align(self.rest.align(cx)) + self.rest.total
}
pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
......@@ -249,8 +249,8 @@ fn is_aggregate(&self) -> bool {
}
}
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
fn homogeneous_aggregate<C>(&self, cx: &C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self>
{
match self.abi {
Abi::Uninhabited => None,
......@@ -483,7 +483,7 @@ pub struct FnType<'a, Ty> {
}
impl<'a, Ty> FnType<'a, Ty> {
pub fn adjust_for_cabi<C>(&mut self, cx: C, abi: ::spec::abi::Abi) -> Result<(), String>
pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: ::spec::abi::Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
......
......@@ -11,7 +11,7 @@
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
......@@ -22,7 +22,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
......@@ -44,7 +44,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
......
......@@ -22,7 +22,7 @@ enum ABI {
}
use self::ABI::*;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
......@@ -52,7 +52,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: AB
})
}
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -95,7 +95,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -134,7 +134,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -24,7 +24,7 @@ fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType<Ty>)
}
}
fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C>,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -41,7 +41,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
}
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -67,7 +67,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -11,7 +11,7 @@
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
......@@ -22,7 +22,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
......@@ -44,7 +44,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
......
......@@ -13,7 +13,7 @@
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
......@@ -41,7 +41,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -69,7 +69,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -95,7 +95,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -18,7 +18,7 @@ pub enum Flavor {
Fastcall
}
fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......@@ -35,7 +35,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
......
......@@ -31,12 +31,12 @@ enum Class {
const LARGEST_VECTOR_SIZE: usize = 512;
const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>)
-> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
fn classify<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>,
cls: &mut [Option<Class>], off: Size) -> Result<(), Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
......@@ -178,7 +178,7 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
target
}
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
......
......@@ -203,11 +203,11 @@ pub fn vector_align(&self, vec_size: Size) -> Align {
}
}
pub trait HasDataLayout: Copy {
pub trait HasDataLayout {
fn data_layout(&self) -> &TargetDataLayout;
}
impl<'a> HasDataLayout for &'a TargetDataLayout {
impl HasDataLayout for TargetDataLayout {
fn data_layout(&self) -> &TargetDataLayout {
self
}
......@@ -267,7 +267,7 @@ pub fn is_abi_aligned(self, align: Align) -> bool {
}
#[inline]
pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
let dl = cx.data_layout();
let bytes = self.bytes().checked_add(offset.bytes())?;
......@@ -280,7 +280,7 @@ pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size>
}
#[inline]
pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
let dl = cx.data_layout();
let bytes = self.bytes().checked_mul(count)?;
......@@ -457,7 +457,7 @@ pub fn size(self) -> Size {
}
}
pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
let dl = cx.data_layout();
match self {
......@@ -492,7 +492,7 @@ pub fn fit_unsigned(x: u128) -> Integer {
}
/// Find the smallest integer with the given alignment.
pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi();
......@@ -505,7 +505,7 @@ pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
}
/// Find the largest integer with the given alignment or less.
pub fn approximate_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Integer {
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Integer {
let dl = cx.data_layout();
let wanted = align.abi();
......@@ -571,7 +571,7 @@ pub enum Primitive {
}
impl<'a, 'tcx> Primitive {
pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
let dl = cx.data_layout();
match self {
......@@ -582,7 +582,7 @@ pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
}
}
pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
let dl = cx.data_layout();
match self {
......@@ -642,7 +642,7 @@ pub fn is_bool(&self) -> bool {
/// Returns the valid range as a `x..y` range.
///
/// If `x` and `y` are equal, the range is full, not empty.
pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: C) -> Range<u128> {
pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything.
......@@ -854,7 +854,7 @@ pub struct LayoutDetails {
}
impl LayoutDetails {
pub fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let size = scalar.value.size(cx);
let align = scalar.value.align(cx);
LayoutDetails {
......@@ -891,20 +891,20 @@ pub trait LayoutOf {
type Ty;
type TyLayout;
fn layout_of(self, ty: Self::Ty) -> Self::TyLayout;
fn layout_of(&self, ty: Self::Ty) -> Self::TyLayout;
}
pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>;
fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout;
fn for_variant(this: TyLayout<'a, Self>, cx: &C, variant_index: usize) -> TyLayout<'a, Self>;
fn field(this: TyLayout<'a, Self>, cx: &C, i: usize) -> C::TyLayout;
}
impl<'a, Ty> TyLayout<'a, Ty> {
pub fn for_variant<C>(self, cx: C, variant_index: usize) -> Self
pub fn for_variant<C>(self, cx: &C, variant_index: usize) -> Self
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::for_variant(self, cx, variant_index)
}
pub fn field<C>(self, cx: C, i: usize) -> C::TyLayout
pub fn field<C>(self, cx: &C, i: usize) -> C::TyLayout
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::field(self, cx, i)
}
......
......@@ -444,11 +444,11 @@ pub struct Target {
pub options: TargetOptions,
}
pub trait HasTargetSpec: Copy {
pub trait HasTargetSpec {
fn target_spec(&self) -> &Target;
}
impl<'a> HasTargetSpec for &'a Target {
impl HasTargetSpec for Target {
fn target_spec(&self) -> &Target {
self
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册