提交 a25b1315 编写于 作者: B bors

Auto merge of #95576 - DrMeepster:box_erasure, r=oli-obk

Remove dereferencing of Box from codegen

Through #94043, #94414, #94873, and #95328, I've been fixing issues caused by Box being treated like a pointer when it is not a pointer. However, these PRs just introduced special cases for Box. This PR removes those special cases and instead transforms a deref of Box into a deref of the pointer it contains.

Hopefully, this is the end of the Box<T, A> ICEs.
......@@ -118,22 +118,20 @@ pub fn immediate(self) -> V {
}
pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
if self.layout.ty.is_box() {
bug!("dereferencing {:?} in codegen", self.layout.ty);
}
let projected_ty = self
.layout
.ty
.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
.ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, None),
OperandValue::Pair(llptr, llextra) => {
// if the box's allocator isn't a ZST, then "llextra" is actually the allocator
if self.layout.ty.is_box() && !self.layout.field(cx, 1).is_zst() {
(llptr, None)
} else {
(llptr, Some(llextra))
}
}
OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
};
let layout = cx.layout_of(projected_ty);
......
......@@ -446,16 +446,7 @@ pub fn codegen_place(
mir::PlaceRef { projection: &place_ref.projection[..elem.0], ..place_ref },
);
// a box with a non-zst allocator should not be directly dereferenced
if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
// Extract `Box<T>` -> `Unique<T>` -> `NonNull<T>` -> `*const T`
let ptr =
cg_base.extract_field(bx, 0).extract_field(bx, 0).extract_field(bx, 0);
ptr.deref(bx.cx())
} else {
cg_base.deref(bx.cx())
}
cg_base.deref(bx.cx())
} else {
bug!("using operand local {:?} as place", place_ref);
}
......@@ -463,18 +454,7 @@ pub fn codegen_place(
};
for elem in place_ref.projection[base..].iter() {
cg_base = match *elem {
mir::ProjectionElem::Deref => {
// a box with a non-zst allocator should not be directly dereferenced
if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
// Project `Box<T>` -> `Unique<T>` -> `NonNull<T>` -> `*const T`
let ptr =
cg_base.project_field(bx, 0).project_field(bx, 0).project_field(bx, 0);
bx.load_operand(ptr).deref(bx.cx())
} else {
bx.load_operand(cg_base).deref(bx.cx())
}
}
mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
mir::ProjectionElem::Field(ref field, _) => {
cg_base.project_field(bx, field.index())
}
......
......@@ -313,6 +313,11 @@ pub fn deref_operand(
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
if val.layout.ty.is_box() {
bug!("dereferencing {:?}", val.layout.ty);
}
let mplace = self.ref_to_mplace(&val)?;
self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
Ok(mplace)
......
......@@ -240,65 +240,79 @@ fn visit_projection_elem(
context: PlaceContext,
location: Location,
) {
if let ProjectionElem::Index(index) = elem {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
self.fail(location, format!("bad index ({:?} != usize)", index_ty))
match elem {
ProjectionElem::Index(index) => {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
self.fail(location, format!("bad index ({:?} != usize)", index_ty))
}
}
}
if let ProjectionElem::Field(f, ty) = elem {
let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
let fail_out_of_bounds = |this: &Self, location| {
this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
};
let check_equal = |this: &Self, location, f_ty| {
if !this.mir_assign_valid_types(ty, f_ty) {
this.fail(
ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => {
let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
if base_ty.is_box() {
self.fail(
location,
format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
)
}
}
ProjectionElem::Field(f, ty) => {
let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
let fail_out_of_bounds = |this: &Self, location| {
this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
};
let check_equal = |this: &Self, location, f_ty| {
if !this.mir_assign_valid_types(ty, f_ty) {
this.fail(
location,
format!(
"Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is {:?}",
parent, f, ty, f_ty
)
)
}
};
match parent_ty.ty.kind() {
ty::Tuple(fields) => {
let Some(f_ty) = fields.get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, *f_ty);
}
ty::Adt(adt_def, substs) => {
let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, field.ty(self.tcx, substs));
}
ty::Closure(_, substs) => {
let substs = substs.as_closure();
let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
ty::Generator(_, substs, _) => {
let substs = substs.as_generator();
let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
_ => {
self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
}
};
match parent_ty.ty.kind() {
ty::Tuple(fields) => {
let Some(f_ty) = fields.get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, *f_ty);
}
ty::Adt(adt_def, substs) => {
let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, field.ty(self.tcx, substs));
}
ty::Closure(_, substs) => {
let substs = substs.as_closure();
let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
ty::Generator(_, substs, _) => {
let substs = substs.as_generator();
let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
fail_out_of_bounds(self, location);
return;
};
check_equal(self, location, f_ty);
}
_ => {
self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
}
}
}
_ => {}
}
self.super_projection_elem(local, proj_base, elem, context, location);
}
......
......@@ -340,6 +340,12 @@ fn intersect(&mut self, other: &BitSet<T>) -> bool {
}
}
impl<T: Idx> From<GrowableBitSet<T>> for BitSet<T> {
fn from(bit_set: GrowableBitSet<T>) -> Self {
bit_set.bit_set
}
}
/// A fixed-size bitset type with a partially dense, partially sparse
/// representation. The bitset is broken into chunks, and chunks that are all
/// zeros or all ones are represented and handled very efficiently.
......@@ -1542,6 +1548,12 @@ pub fn contains(&self, elem: T) -> bool {
}
}
impl<T: Idx> From<BitSet<T>> for GrowableBitSet<T> {
fn from(bit_set: BitSet<T>) -> Self {
Self { bit_set }
}
}
/// A fixed-size 2D bit matrix type with a dense representation.
///
/// `R` and `C` are index types used to identify rows and columns respectively;
......
......@@ -195,6 +195,7 @@ pub enum MirPhase {
/// Beginning with this phase, the following variants are disallowed:
/// * [`TerminatorKind::Yield`](terminator::TerminatorKind::Yield)
/// * [`TerminatorKind::GeneratorDrop`](terminator::TerminatorKind::GeneratorDrop)
/// * [`ProjectionElem::Deref`] of `Box`
GeneratorsLowered = 6,
Optimized = 7,
}
......
......@@ -410,7 +410,18 @@ fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
fn open_drop_for_box(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
let interior = self.tcx().mk_place_deref(self.place);
// drop glue is sent straight to codegen
// box cannot be directly dereferenced
let unique_ty = adt.non_enum_variant().fields[0].ty(self.tcx(), substs);
let nonnull_ty =
unique_ty.ty_adt_def().unwrap().non_enum_variant().fields[0].ty(self.tcx(), substs);
let ptr_ty = self.tcx().mk_imm_ptr(substs[0].expect_ty());
let unique_place = self.tcx().mk_place_field(self.place, Field::new(0), unique_ty);
let nonnull_place = self.tcx().mk_place_field(unique_place, Field::new(0), nonnull_ty);
let ptr_place = self.tcx().mk_place_field(nonnull_place, Field::new(0), ptr_ty);
let interior = self.tcx().mk_place_deref(ptr_place);
let interior_path = self.elaborator.deref_subpath(self.path);
let succ = self.box_free_block(adt, substs, self.succ, self.unwind);
......
//! This pass transforms derefs of Box into a deref of the pointer inside Box.
//!
//! Box is not actually a pointer so it is incorrect to dereference it directly.
use crate::MirPass;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::visit::MutVisitor;
use rustc_middle::mir::*;
use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{Ty, TyCtxt};
/// Constructs the types used when accessing a Box's pointer
pub fn build_ptr_tys<'tcx>(
tcx: TyCtxt<'tcx>,
pointee: Ty<'tcx>,
unique_did: DefId,
nonnull_did: DefId,
) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
let substs = tcx.intern_substs(&[pointee.into()]);
let unique_ty = tcx.bound_type_of(unique_did).subst(tcx, substs);
let nonnull_ty = tcx.bound_type_of(nonnull_did).subst(tcx, substs);
let ptr_ty = tcx.mk_imm_ptr(pointee);
(unique_ty, nonnull_ty, ptr_ty)
}
// Constructs the projection needed to access a Box's pointer
pub fn build_projection<'tcx>(
unique_ty: Ty<'tcx>,
nonnull_ty: Ty<'tcx>,
ptr_ty: Ty<'tcx>,
) -> [PlaceElem<'tcx>; 3] {
[
PlaceElem::Field(Field::new(0), unique_ty),
PlaceElem::Field(Field::new(0), nonnull_ty),
PlaceElem::Field(Field::new(0), ptr_ty),
]
}
struct ElaborateBoxDerefVisitor<'tcx, 'a> {
tcx: TyCtxt<'tcx>,
unique_did: DefId,
nonnull_did: DefId,
local_decls: &'a mut LocalDecls<'tcx>,
patch: MirPatch<'tcx>,
}
impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_place(
&mut self,
place: &mut Place<'tcx>,
context: visit::PlaceContext,
location: Location,
) {
let tcx = self.tcx;
let base_ty = self.local_decls[place.local].ty;
// Derefer ensures that derefs are always the first projection
if place.projection.first() == Some(&PlaceElem::Deref) && base_ty.is_box() {
let source_info = self.local_decls[place.local].source_info;
let (unique_ty, nonnull_ty, ptr_ty) =
build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did);
let ptr_local = self.patch.new_temp(ptr_ty, source_info.span);
self.local_decls.push(LocalDecl::new(ptr_ty, source_info.span));
self.patch.add_statement(location, StatementKind::StorageLive(ptr_local));
self.patch.add_assign(
location,
Place::from(ptr_local),
Rvalue::Use(Operand::Copy(
Place::from(place.local)
.project_deeper(&build_projection(unique_ty, nonnull_ty, ptr_ty), tcx),
)),
);
place.local = ptr_local;
self.patch.add_statement(
Location { block: location.block, statement_index: location.statement_index + 1 },
StatementKind::StorageDead(ptr_local),
);
}
self.super_place(place, context, location);
}
}
pub struct ElaborateBoxDerefs;
impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if let Some(def_id) = tcx.lang_items().owned_box() {
let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[0].did;
let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else {
span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
};
let nonnull_did = nonnull_def.non_enum_variant().fields[0].did;
let patch = MirPatch::new(body);
let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let mut visitor =
ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch };
for (block, BasicBlockData { statements, terminator, .. }) in
basic_blocks.iter_enumerated_mut()
{
let mut index = 0;
for statement in statements {
let location = Location { block, statement_index: index };
visitor.visit_statement(statement, location);
index += 1;
}
if let Some(terminator) = terminator
&& !matches!(terminator.kind, TerminatorKind::Yield{..})
{
let location = Location { block, statement_index: index };
visitor.visit_terminator(terminator, location);
}
let location = Location { block, statement_index: index };
match terminator {
// yielding into a box is handled when lowering generators
Some(Terminator { kind: TerminatorKind::Yield { value, .. }, .. }) => {
visitor.visit_operand(value, location);
}
Some(terminator) => {
visitor.visit_terminator(terminator, location);
}
None => {}
}
}
visitor.patch.apply(body);
for debug_info in body.var_debug_info.iter_mut() {
if let VarDebugInfoContents::Place(place) = &mut debug_info.value {
let mut new_projections: Option<Vec<_>> = None;
let mut last_deref = 0;
for (i, (base, elem)) in place.iter_projections().enumerate() {
let base_ty = base.ty(&body.local_decls, tcx).ty;
if elem == PlaceElem::Deref && base_ty.is_box() {
let new_projections = new_projections.get_or_insert_default();
let (unique_ty, nonnull_ty, ptr_ty) =
build_ptr_tys(tcx, base_ty.boxed_ty(), unique_did, nonnull_did);
new_projections.extend_from_slice(&base.projection[last_deref..]);
new_projections.extend_from_slice(&build_projection(
unique_ty, nonnull_ty, ptr_ty,
));
new_projections.push(PlaceElem::Deref);
last_deref = i;
}
}
if let Some(mut new_projections) = new_projections {
new_projections.extend_from_slice(&place.projection[last_deref..]);
place.projection = tcx.intern_place_elems(&new_projections);
}
}
}
} else {
// box is not present, this pass doesn't need to do anything
}
}
}
......@@ -56,7 +56,7 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_index::bit_set::{BitMatrix, BitSet};
use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet};
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::dump_mir;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
......@@ -211,7 +211,7 @@ struct SuspensionPoint<'tcx> {
/// Which block to jump to if the generator is dropped in this state.
drop: Option<BasicBlock>,
/// Set of locals that have live storage while at this suspension point.
storage_liveness: BitSet<Local>,
storage_liveness: GrowableBitSet<Local>,
}
struct TransformVisitor<'tcx> {
......@@ -367,7 +367,7 @@ fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockDat
resume,
resume_arg,
drop,
storage_liveness: self.storage_liveness[block].clone().unwrap(),
storage_liveness: self.storage_liveness[block].clone().unwrap().into(),
});
VariantIdx::new(state)
......@@ -1182,6 +1182,8 @@ fn create_cases<'tcx>(
transform: &TransformVisitor<'tcx>,
operation: Operation,
) -> Vec<(usize, BasicBlock)> {
let tcx = transform.tcx;
let source_info = SourceInfo::outermost(body.span);
transform
......@@ -1214,13 +1216,85 @@ fn create_cases<'tcx>(
if operation == Operation::Resume {
// Move the resume argument to the destination place of the `Yield` terminator
let resume_arg = Local::new(2); // 0 = return, 1 = self
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
point.resume_arg,
Rvalue::Use(Operand::Move(resume_arg.into())),
))),
});
// handle `box yield` properly
let box_place = if let [projection @ .., ProjectionElem::Deref] =
&**point.resume_arg.projection
{
let box_place =
Place::from(point.resume_arg.local).project_deeper(projection, tcx);
let box_ty = box_place.ty(&body.local_decls, tcx).ty;
if box_ty.is_box() { Some((box_place, box_ty)) } else { None }
} else {
None
};
if let Some((box_place, box_ty)) = box_place {
let unique_did = box_ty
.ty_adt_def()
.expect("expected Box to be an Adt")
.non_enum_variant()
.fields[0]
.did;
let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else {
span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
};
let nonnull_did = nonnull_def.non_enum_variant().fields[0].did;
let (unique_ty, nonnull_ty, ptr_ty) =
crate::elaborate_box_derefs::build_ptr_tys(
tcx,
box_ty.boxed_ty(),
unique_did,
nonnull_did,
);
let ptr_local = body.local_decls.push(LocalDecl::new(ptr_ty, body.span));
statements.push(Statement {
source_info,
kind: StatementKind::StorageLive(ptr_local),
});
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
Place::from(ptr_local),
Rvalue::Use(Operand::Copy(box_place.project_deeper(
&crate::elaborate_box_derefs::build_projection(
unique_ty, nonnull_ty, ptr_ty,
),
tcx,
))),
))),
});
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
Place::from(ptr_local)
.project_deeper(&[ProjectionElem::Deref], tcx),
Rvalue::Use(Operand::Move(resume_arg.into())),
))),
});
statements.push(Statement {
source_info,
kind: StatementKind::StorageDead(ptr_local),
});
} else {
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
point.resume_arg,
Rvalue::Use(Operand::Move(resume_arg.into())),
))),
});
}
}
// Then jump to the real target
......
......@@ -57,6 +57,7 @@
mod dest_prop;
pub mod dump_mir;
mod early_otherwise_branch;
mod elaborate_box_derefs;
mod elaborate_drops;
mod function_item_references;
mod generator;
......@@ -427,6 +428,7 @@ fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tc
// `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
// but before optimizations begin.
&deref_separator::Derefer,
&elaborate_box_derefs::ElaborateBoxDerefs,
&add_retag::AddRetag,
&lower_intrinsics::LowerIntrinsics,
&simplify::SimplifyCfg::new("elaborate-drops"),
......
......@@ -23,8 +23,10 @@ use core::panic::PanicInfo;
extern crate libc;
struct Unique<T>(*mut T);
#[lang = "owned_box"]
pub struct Box<T>(*mut T);
pub struct Box<T>(Unique<T>);
#[lang = "exchange_malloc"]
unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
......
......@@ -28,93 +28,93 @@ pub fn ptr_alignment_helper(x: &&()) {}
// CHECK-LABEL: @load_ref
#[no_mangle]
pub fn load_ref<'a>(x: &&'a i32) -> &'a i32 {
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META:[0-9]+]], !noundef !{{[0-9]+}}
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META:[0-9]+]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_ref_higher_alignment
#[no_mangle]
pub fn load_ref_higher_alignment<'a>(x: &&'a Align16) -> &'a Align16 {
// CHECK: load {{%Align16\*|i128\*|ptr}}, {{%Align16\*\*|i128\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META:[0-9]+]], !noundef !{{[0-9]+}}
// CHECK: load {{%Align16\*|i128\*|ptr}}, {{%Align16\*\*|i128\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META:[0-9]+]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_scalar_pair
#[no_mangle]
pub fn load_scalar_pair<'a>(x: &(&'a i32, &'a Align16)) -> (&'a i32, &'a Align16) {
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}}
// CHECK: load {{i64\*|ptr}}, {{i64\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META]], !noundef !{{[0-9]+}}
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}}
// CHECK: load {{i64\*|ptr}}, {{i64\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_raw_pointer
#[no_mangle]
pub fn load_raw_pointer<'a>(x: &*const i32) -> *const i32 {
// loaded raw pointer should not have !nonnull, !align, or !noundef metadata
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]]{{$}}
// loaded raw pointer should not have !nonnull, !align, or !noundef metadata
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]]{{$}}
*x
}
// CHECK-LABEL: @load_box
#[no_mangle]
pub fn load_box<'a>(x: Box<Box<i32>>) -> Box<i32> {
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}}
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %{{.*}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_bool
#[no_mangle]
pub fn load_bool(x: &bool) -> bool {
// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE:[0-9]+]], !noundef !{{[0-9]+}}
// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE:[0-9]+]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_maybeuninit_bool
#[no_mangle]
pub fn load_maybeuninit_bool(x: &MaybeUninit<bool>) -> MaybeUninit<bool> {
// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}}
// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}}
*x
}
// CHECK-LABEL: @load_enum_bool
#[no_mangle]
pub fn load_enum_bool(x: &MyBool) -> MyBool {
// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE]], !noundef !{{[0-9]+}}
// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_maybeuninit_enum_bool
#[no_mangle]
pub fn load_maybeuninit_enum_bool(x: &MaybeUninit<MyBool>) -> MaybeUninit<MyBool> {
// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}}
// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}}
*x
}
// CHECK-LABEL: @load_int
#[no_mangle]
pub fn load_int(x: &u16) -> u16 {
// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}}
// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}}
*x
}
// CHECK-LABEL: @load_nonzero_int
#[no_mangle]
pub fn load_nonzero_int(x: &NonZeroU16) -> NonZeroU16 {
// CHECK: load i16, {{i16\*|ptr}} %x, align 2, !range ![[NONZEROU16_RANGE:[0-9]+]], !noundef !{{[0-9]+}}
// CHECK: load i16, {{i16\*|ptr}} %x, align 2, !range ![[NONZEROU16_RANGE:[0-9]+]], !noundef !{{[0-9]+}}
*x
}
// CHECK-LABEL: @load_option_nonzero_int
#[no_mangle]
pub fn load_option_nonzero_int(x: &Option<NonZeroU16>) -> Option<NonZeroU16> {
// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}}
// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}}
*x
}
// CHECK-LABEL: @borrow
#[no_mangle]
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, !nonnull
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, !nonnull
&x; // keep variable in an alloca
x
}
......@@ -122,7 +122,7 @@ pub fn borrow(x: &i32) -> &i32 {
// CHECK-LABEL: @_box
#[no_mangle]
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, !nonnull
// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, align [[PTR_ALIGNMENT]]
*x
}
......@@ -131,8 +131,8 @@ pub fn _box(x: Box<i32>) -> i32 {
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] {
// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
......@@ -141,8 +141,8 @@ pub fn _box(x: Box<i32>) -> i32 {
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: Bytes) -> Bytes {
// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
......
......@@ -10,6 +10,10 @@
let mut _5: usize; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _6: *mut u8; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _7: std::boxed::Box<i32>; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _8: *const i32; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _9: *const i32; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _10: *const i32; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
let mut _11: *const i32; // in scope 0 at $DIR/boxes.rs:12:14: 12:22
scope 1 {
debug x => _1; // in scope 1 at $DIR/boxes.rs:12:9: 12:10
}
......@@ -34,10 +38,16 @@
bb1: {
StorageLive(_7); // scope 0 at $DIR/boxes.rs:12:14: 12:22
_7 = ShallowInitBox(move _6, i32); // scope 0 at $DIR/boxes.rs:12:14: 12:22
(*_7) = const 42_i32; // scope 0 at $DIR/boxes.rs:12:19: 12:21
StorageLive(_8); // scope 0 at $DIR/boxes.rs:12:19: 12:21
_8 = (((_7.0: std::ptr::Unique<i32>).0: std::ptr::NonNull<i32>).0: *const i32); // scope 0 at $DIR/boxes.rs:12:19: 12:21
(*_8) = const 42_i32; // scope 0 at $DIR/boxes.rs:12:19: 12:21
StorageDead(_8); // scope 0 at $DIR/boxes.rs:12:14: 12:22
_3 = move _7; // scope 0 at $DIR/boxes.rs:12:14: 12:22
StorageDead(_7); // scope 0 at $DIR/boxes.rs:12:21: 12:22
_2 = (*_3); // scope 0 at $DIR/boxes.rs:12:13: 12:22
StorageLive(_9); // scope 0 at $DIR/boxes.rs:12:13: 12:22
_9 = (((_3.0: std::ptr::Unique<i32>).0: std::ptr::NonNull<i32>).0: *const i32); // scope 0 at $DIR/boxes.rs:12:13: 12:22
_2 = (*_9); // scope 0 at $DIR/boxes.rs:12:13: 12:22
StorageDead(_9); // scope 0 at $DIR/boxes.rs:12:13: 12:26
_1 = Add(move _2, const 0_i32); // scope 0 at $DIR/boxes.rs:12:13: 12:26
StorageDead(_2); // scope 0 at $DIR/boxes.rs:12:25: 12:26
drop(_3) -> [return: bb2, unwind: bb3]; // scope 0 at $DIR/boxes.rs:12:26: 12:27
......
......@@ -9,14 +9,16 @@
let mut _4: *mut u8; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _5: std::boxed::Box<std::vec::Vec<u32>>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _6: (); // in scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
+ let mut _7: &mut std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
let mut _7: *const std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _8: *const std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
+ let mut _9: &mut std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
scope 1 {
debug _x => _1; // in scope 1 at $DIR/inline-into-box-place.rs:8:9: 8:11
}
scope 2 {
}
+ scope 3 (inlined Vec::<u32>::new) { // at $DIR/inline-into-box-place.rs:8:33: 8:43
+ let mut _8: alloc::raw_vec::RawVec<u32>; // in scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ let mut _10: alloc::raw_vec::RawVec<u32>; // in scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ }
bb0: {
......@@ -32,11 +34,13 @@
bb1: {
StorageLive(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
_5 = ShallowInitBox(move _4, std::vec::Vec<u32>); // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
- (*_5) = Vec::<u32>::new() -> [return: bb2, unwind: bb5]; // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_8); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ _8 = const alloc::raw_vec::RawVec::<u32>::NEW; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
_7 = (((_5.0: std::ptr::Unique<std::vec::Vec<u32>>).0: std::ptr::NonNull<std::vec::Vec<u32>>).0: *const std::vec::Vec<u32>); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
- (*_7) = Vec::<u32>::new() -> [return: bb2, unwind: bb4]; // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_9); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ _9 = &mut (*_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_10); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ _10 = const alloc::raw_vec::RawVec::<u32>::NEW; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
// mir::Constant
- // + span: $DIR/inline-into-box-place.rs:8:33: 8:41
- // + user_ty: UserType(1)
......@@ -47,15 +51,16 @@
+ // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ // + user_ty: UserType(0)
+ // + literal: Const { ty: alloc::raw_vec::RawVec<u32>, val: Unevaluated(alloc::raw_vec::RawVec::<T>::NEW, [u32], None) }
+ Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_7).0: alloc::raw_vec::RawVec<u32>) = move _8; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_8); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ Deinit((*_9)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_9).0: alloc::raw_vec::RawVec<u32>) = move _10; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_9).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_10); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_9); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
_1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
_0 = const (); // scope 0 at $DIR/inline-into-box-place.rs:7:11: 9:2
- drop(_1) -> [return: bb3, unwind: bb4]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
- drop(_1) -> [return: bb3, unwind: bb5]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
+ drop(_1) -> [return: bb2, unwind: bb3]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
}
......@@ -66,15 +71,16 @@
}
- bb4 (cleanup): {
+ bb3 (cleanup): {
resume; // scope 0 at $DIR/inline-into-box-place.rs:7:1: 9:2
- }
-
- bb5 (cleanup): {
- _6 = alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>(move (_5.0: std::ptr::Unique<std::vec::Vec<u32>>), move (_5.1: std::alloc::Global)) -> bb4; // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
- StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
- _6 = alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>(move (_5.0: std::ptr::Unique<std::vec::Vec<u32>>), move (_5.1: std::alloc::Global)) -> bb5; // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
- // mir::Constant
- // + span: $DIR/inline-into-box-place.rs:8:42: 8:43
- // + literal: Const { ty: unsafe fn(Unique<Vec<u32>>, std::alloc::Global) {alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>}, val: Value(Scalar(<ZST>)) }
- }
-
- bb5 (cleanup): {
+ bb3 (cleanup): {
resume; // scope 0 at $DIR/inline-into-box-place.rs:7:1: 9:2
}
}
......@@ -9,14 +9,16 @@
let mut _4: *mut u8; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _5: std::boxed::Box<std::vec::Vec<u32>>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _6: (); // in scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
+ let mut _7: &mut std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
let mut _7: *const std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
let mut _8: *const std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
+ let mut _9: &mut std::vec::Vec<u32>; // in scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
scope 1 {
debug _x => _1; // in scope 1 at $DIR/inline-into-box-place.rs:8:9: 8:11
}
scope 2 {
}
+ scope 3 (inlined Vec::<u32>::new) { // at $DIR/inline-into-box-place.rs:8:33: 8:43
+ let mut _8: alloc::raw_vec::RawVec<u32>; // in scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ let mut _10: alloc::raw_vec::RawVec<u32>; // in scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ }
bb0: {
......@@ -32,11 +34,13 @@
bb1: {
StorageLive(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
_5 = ShallowInitBox(move _4, std::vec::Vec<u32>); // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
- (*_5) = Vec::<u32>::new() -> [return: bb2, unwind: bb5]; // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_8); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ _8 = const alloc::raw_vec::RawVec::<u32>::NEW; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
_7 = (((_5.0: std::ptr::Unique<std::vec::Vec<u32>>).0: std::ptr::NonNull<std::vec::Vec<u32>>).0: *const std::vec::Vec<u32>); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
- (*_7) = Vec::<u32>::new() -> [return: bb2, unwind: bb4]; // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_9); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ _9 = &mut (*_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ StorageLive(_10); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ _10 = const alloc::raw_vec::RawVec::<u32>::NEW; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
// mir::Constant
- // + span: $DIR/inline-into-box-place.rs:8:33: 8:41
- // + user_ty: UserType(1)
......@@ -47,15 +51,16 @@
+ // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ // + user_ty: UserType(0)
+ // + literal: Const { ty: alloc::raw_vec::RawVec<u32>, val: Unevaluated(alloc::raw_vec::RawVec::<T>::NEW, [u32], None) }
+ Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_7).0: alloc::raw_vec::RawVec<u32>) = move _8; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_8); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
+ Deinit((*_9)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_9).0: alloc::raw_vec::RawVec<u32>) = move _10; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ ((*_9).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_10); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL
+ StorageDead(_9); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
_1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43
StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
_0 = const (); // scope 0 at $DIR/inline-into-box-place.rs:7:11: 9:2
- drop(_1) -> [return: bb3, unwind: bb4]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
- drop(_1) -> [return: bb3, unwind: bb5]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
+ drop(_1) -> [return: bb2, unwind: bb3]; // scope 0 at $DIR/inline-into-box-place.rs:9:1: 9:2
}
......@@ -66,15 +71,16 @@
}
- bb4 (cleanup): {
+ bb3 (cleanup): {
resume; // scope 0 at $DIR/inline-into-box-place.rs:7:1: 9:2
- }
-
- bb5 (cleanup): {
- _6 = alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>(move (_5.0: std::ptr::Unique<std::vec::Vec<u32>>), move (_5.1: std::alloc::Global)) -> bb4; // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
- StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43
- _6 = alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>(move (_5.0: std::ptr::Unique<std::vec::Vec<u32>>), move (_5.1: std::alloc::Global)) -> bb5; // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43
- // mir::Constant
- // + span: $DIR/inline-into-box-place.rs:8:42: 8:43
- // + literal: Const { ty: unsafe fn(Unique<Vec<u32>>, std::alloc::Global) {alloc::alloc::box_free::<Vec<u32>, std::alloc::Global>}, val: Value(Scalar(<ZST>)) }
- }
-
- bb5 (cleanup): {
+ bb3 (cleanup): {
resume; // scope 0 at $DIR/inline-into-box-place.rs:7:1: 9:2
}
}
......@@ -11,6 +11,7 @@ fn b(_1: &mut Box<T>) -> &mut T {
let mut _5: &mut T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
let mut _6: &mut T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
let mut _7: std::boxed::Box<T>; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
let mut _8: *const T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
}
bb0: {
......@@ -22,7 +23,10 @@ fn b(_1: &mut Box<T>) -> &mut T {
StorageLive(_6); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageLive(_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_7 = move (*_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_6 = &mut (*_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageLive(_8); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_8 = (((_7.0: std::ptr::Unique<T>).0: std::ptr::NonNull<T>).0: *const T); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_6 = &mut (*_8); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageDead(_8); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageDead(_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_5 = &mut (*_6); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_3 = &mut (*_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
......
......@@ -8,6 +8,7 @@ fn d(_1: &Box<T>) -> &T {
scope 1 (inlined <Box<T> as AsRef<T>>::as_ref) { // at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15
debug self => _3; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
let mut _4: std::boxed::Box<T>; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
let mut _5: *const T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
}
bb0: {
......@@ -16,7 +17,10 @@ fn d(_1: &Box<T>) -> &T {
_3 = &(*_1); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15
StorageLive(_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_4 = move (*_3); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_2 = &(*_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageLive(_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_5 = (((_4.0: std::ptr::Unique<T>).0: std::ptr::NonNull<T>).0: *const T); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_2 = &(*_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageDead(_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
StorageDead(_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL
_0 = &(*_2); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15
StorageDead(_3); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:14: 18:15
......
// build-pass
// compile-flags: -Z mir-opt-level=4
#![crate_type="lib"]
#![crate_type = "lib"]
#![feature(lang_items)]
#![no_std]
struct NonNull<T: ?Sized>(*mut T);
struct Unique<T: ?Sized>(NonNull<T>);
#[lang = "owned_box"]
pub struct Box<T: ?Sized>(*mut T, ());
pub struct Box<T: ?Sized>(Unique<T>);
impl<T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
}
fn drop(&mut self) {}
}
#[lang = "box_free"]
#[inline(always)]
unsafe fn box_free<T: ?Sized>(ptr: *mut T, _: ()) {
dealloc(ptr)
unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
dealloc(ptr.0.0)
}
#[inline(never)]
fn dealloc<T: ?Sized>(_: *mut T) {
}
fn dealloc<T: ?Sized>(_: *mut T) {}
pub struct Foo<T>(T);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册