提交 45cde975 编写于 作者: B bors 提交者: GitHub

Auto merge of #34189 - eddyb:mir-trans-imm, r=nagisa

trans: generalize immediate temporaries to all MIR locals.

Added `Mir::local_index` which gives you an unified index for `Arg`, `Var`, `Temp` and `ReturnPointer`.
Also available is `Mir::count_locals` which returns the total number of the above locals.
This simplifies a lot of the code which can treat all of the local lvalues in the same manner.
If we had `-> impl Iterator`, I could have added a bunch of useful `Ty` or `Lvalue` iterators for all locals.
We could of course manually write such iterators as they are needed.

The only place which currently takes advantage of unified locals is trans' alloca elision.
Currently it's not as good as it could be, due to our usage of `llvm.dbg.declare` in debug mode.
But passing some arguments and variables as immediates has some effect on release-mode `libsyntax`:

Old trans:
```
time: 11.500; rss: 710MB        translation
time: 0.002; rss: 710MB assert dep graph
time: 0.000; rss: 710MB serialize dep graph
  time: 4.410; rss: 628MB       llvm function passes [0]
  time: 84.485; rss: 633MB      llvm module passes [0]
  time: 23.898; rss: 634MB      codegen passes [0]
  time: 0.002; rss: 634MB       codegen passes [0]
time: 113.408; rss: 634MB       LLVM passes
```
`-Z orbit`, previously:
```
time: 12.588; rss: 723MB        translation
time: 0.002; rss: 723MB assert dep graph
time: 0.000; rss: 723MB serialize dep graph
  time: 4.597; rss: 642MB       llvm function passes [0]
  time: 77.347; rss: 646MB      llvm module passes [0]
  time: 24.703; rss: 648MB      codegen passes [0]
  time: 0.002; rss: 615MB       codegen passes [0]
time: 107.233; rss: 615MB       LLVM passes
```
`-Z orbit`, after this PR:
```
time: 13.820; rss: 672MB        translation
time: 0.002; rss: 672MB assert dep graph
time: 0.000; rss: 672MB serialize dep graph
  time: 3.969; rss: 591MB       llvm function passes [0]
  time: 72.294; rss: 595MB      llvm module passes [0]
  time: 24.610; rss: 597MB      codegen passes [0]
  time: 0.002; rss: 597MB       codegen passes [0]
time: 101.439; rss: 597MB       LLVM passes
```
......@@ -144,6 +144,40 @@ pub fn predecessors(&self) -> Ref<IndexVec<BasicBlock, Vec<BasicBlock>>> {
pub fn predecessors_for(&self, bb: BasicBlock) -> Ref<Vec<BasicBlock>> {
Ref::map(self.predecessors(), |p| &p[bb])
}
/// Maps locals (Arg's, Var's, Temp's and ReturnPointer, in that order)
/// to their index in the whole list of locals. This is useful if you
/// want to treat all locals the same instead of repeating yourself.
pub fn local_index(&self, lvalue: &Lvalue<'tcx>) -> Option<Local> {
let idx = match *lvalue {
Lvalue::Arg(arg) => arg.index(),
Lvalue::Var(var) => {
self.arg_decls.len() +
var.index()
}
Lvalue::Temp(temp) => {
self.arg_decls.len() +
self.var_decls.len() +
temp.index()
}
Lvalue::ReturnPointer => {
self.arg_decls.len() +
self.var_decls.len() +
self.temp_decls.len()
}
Lvalue::Static(_) |
Lvalue::Projection(_) => return None
};
Some(Local::new(idx))
}
/// Counts the number of locals, such that that local_index
/// will always return an index smaller than this count.
pub fn count_locals(&self) -> usize {
self.arg_decls.len() +
self.var_decls.len() +
self.temp_decls.len() + 1
}
}
impl<'tcx> Index<BasicBlock> for Mir<'tcx> {
......@@ -663,6 +697,7 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
newtype_index!(Var, "var");
newtype_index!(Temp, "tmp");
newtype_index!(Arg, "arg");
newtype_index!(Local, "local");
/// A path to a value; something that can be evaluated without
/// changing or disturbing program state.
......
......@@ -492,6 +492,13 @@ pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> {
ty::FnDiverging => def
}
}
pub fn maybe_converging(self) -> Option<Ty<'tcx>> {
match self {
ty::FnConverging(t) => Some(t),
ty::FnDiverging => None
}
}
}
pub type PolyFnOutput<'tcx> = Binder<FnOutput<'tcx>>;
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An analysis to determine which temporaries require allocas and
//! An analysis to determine which locals require allocas and
//! which do not.
use rustc_data_structures::bitvec::BitVector;
......@@ -18,18 +18,23 @@
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc::mir::traversal;
use common::{self, Block, BlockAndBuilder};
use glue;
use super::rvalue;
pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>) -> BitVector {
pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>) -> BitVector {
let bcx = bcx.build();
let mut analyzer = TempAnalyzer::new(mir, &bcx, mir.temp_decls.len());
let mut analyzer = LocalAnalyzer::new(mir, &bcx);
analyzer.visit_mir(mir);
for (index, temp_decl) in mir.temp_decls.iter().enumerate() {
let ty = bcx.monomorphize(&temp_decl.ty);
debug!("temp {:?} has type {:?}", index, ty);
let local_types = mir.arg_decls.iter().map(|a| a.ty)
.chain(mir.var_decls.iter().map(|v| v.ty))
.chain(mir.temp_decls.iter().map(|t| t.ty))
.chain(mir.return_ty.maybe_converging());
for (index, ty) in local_types.enumerate() {
let ty = bcx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty);
if ty.is_scalar() ||
ty.is_unique() ||
ty.is_region_ptr() ||
......@@ -49,66 +54,87 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
// (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways
// (gep vs extractvalue etc).
analyzer.mark_as_lvalue(index);
analyzer.mark_as_lvalue(mir::Local::new(index));
}
}
analyzer.lvalue_temps
analyzer.lvalue_locals
}
struct TempAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> {
struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> {
mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
lvalue_temps: BitVector,
lvalue_locals: BitVector,
seen_assigned: BitVector
}
impl<'mir, 'bcx, 'tcx> TempAnalyzer<'mir, 'bcx, 'tcx> {
impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> {
fn new(mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
temp_count: usize) -> TempAnalyzer<'mir, 'bcx, 'tcx> {
TempAnalyzer {
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>)
-> LocalAnalyzer<'mir, 'bcx, 'tcx> {
let local_count = mir.count_locals();
LocalAnalyzer {
mir: mir,
bcx: bcx,
lvalue_temps: BitVector::new(temp_count),
seen_assigned: BitVector::new(temp_count)
lvalue_locals: BitVector::new(local_count),
seen_assigned: BitVector::new(local_count)
}
}
fn mark_as_lvalue(&mut self, temp: usize) {
debug!("marking temp {} as lvalue", temp);
self.lvalue_temps.insert(temp);
fn mark_as_lvalue(&mut self, local: mir::Local) {
debug!("marking {:?} as lvalue", local);
self.lvalue_locals.insert(local.index());
}
fn mark_assigned(&mut self, temp: usize) {
if !self.seen_assigned.insert(temp) {
self.mark_as_lvalue(temp);
fn mark_assigned(&mut self, local: mir::Local) {
if !self.seen_assigned.insert(local.index()) {
self.mark_as_lvalue(local);
}
}
}
impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for TempAnalyzer<'mir, 'bcx, 'tcx> {
impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
lvalue: &mir::Lvalue<'tcx>,
rvalue: &mir::Rvalue<'tcx>) {
debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
match *lvalue {
mir::Lvalue::Temp(temp) => {
self.mark_assigned(temp.index());
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
self.mark_as_lvalue(temp.index());
}
}
_ => {
self.visit_lvalue(lvalue, LvalueContext::Store);
if let Some(index) = self.mir.local_index(lvalue) {
self.mark_assigned(index);
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
self.mark_as_lvalue(index);
}
} else {
self.visit_lvalue(lvalue, LvalueContext::Store);
}
self.visit_rvalue(rvalue);
}
fn visit_terminator_kind(&mut self,
block: mir::BasicBlock,
kind: &mir::TerminatorKind<'tcx>) {
match *kind {
mir::TerminatorKind::Call {
func: mir::Operand::Constant(mir::Constant {
literal: mir::Literal::Item { def_id, .. }, ..
}),
ref args, ..
} if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => {
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
if let mir::Operand::Consume(ref lvalue) = args[0] {
self.visit_lvalue(lvalue, LvalueContext::Drop);
}
}
_ => {}
}
self.super_terminator_kind(block, kind);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>,
context: LvalueContext) {
......@@ -116,9 +142,9 @@ fn visit_lvalue(&mut self,
// Allow uses of projections of immediate pair fields.
if let mir::Lvalue::Projection(ref proj) = *lvalue {
if let mir::Lvalue::Temp(temp) = proj.base {
let ty = self.mir.temp_decls[temp].ty;
let ty = self.bcx.monomorphize(&ty);
if self.mir.local_index(&proj.base).is_some() {
let ty = self.mir.lvalue_ty(self.bcx.tcx(), &proj.base);
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
if common::type_is_imm_pair(self.bcx.ccx(), ty) {
if let mir::ProjectionElem::Field(..) = proj.elem {
if let LvalueContext::Consume = context {
......@@ -129,25 +155,36 @@ fn visit_lvalue(&mut self,
}
}
match *lvalue {
mir::Lvalue::Temp(temp) => {
match context {
LvalueContext::Call => {
self.mark_assigned(temp.index());
}
LvalueContext::Consume => {
}
LvalueContext::Store |
LvalueContext::Drop |
LvalueContext::Inspect |
LvalueContext::Borrow { .. } |
LvalueContext::Slice { .. } |
LvalueContext::Projection => {
self.mark_as_lvalue(temp.index());
if let Some(index) = self.mir.local_index(lvalue) {
match context {
LvalueContext::Call => {
self.mark_assigned(index);
}
LvalueContext::Consume => {
}
LvalueContext::Store |
LvalueContext::Inspect |
LvalueContext::Borrow { .. } |
LvalueContext::Slice { .. } |
LvalueContext::Projection => {
self.mark_as_lvalue(index);
}
LvalueContext::Drop => {
let ty = self.mir.lvalue_ty(self.bcx.tcx(), lvalue);
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
// Only need the lvalue if we're actually dropping it.
if glue::type_needs_drop(self.bcx.tcx(), ty) {
self.mark_as_lvalue(index);
}
}
}
_ => {
}
// A deref projection only reads the pointer, never needs the lvalue.
if let mir::Lvalue::Projection(ref proj) = *lvalue {
if let mir::ProjectionElem::Deref = proj.elem {
return self.visit_lvalue(&proj.base, LvalueContext::Consume);
}
}
......
......@@ -32,7 +32,7 @@
use rustc_data_structures::fnv::FnvHashMap;
use syntax::parse::token;
use super::{MirContext, TempRef};
use super::{MirContext, LocalRef};
use super::analyze::CleanupKind;
use super::constant::Const;
use super::lvalue::{LvalueRef, load_fat_ptr};
......@@ -186,9 +186,43 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
}
mir::TerminatorKind::Return => {
bcx.with_block(|bcx| {
self.fcx.build_return_block(bcx, debug_loc);
})
let ret = bcx.fcx().fn_ty.ret;
if ret.is_ignore() || ret.is_indirect() {
bcx.ret_void();
return;
}
let llval = if let Some(cast_ty) = ret.cast {
let index = mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
let op = match self.locals[index] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
ty: tr_lvalue.ty.to_ty(bcx.tcx())
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret");
self.store_operand(&bcx, llscratch, op);
llscratch
}
Ref(llval) => llval
};
let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), ret.ty);
unsafe {
llvm::LLVMSetAlignment(load, llalign);
}
load
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::ReturnPointer);
op.pack_if_pair(&bcx).immediate()
};
bcx.ret(llval);
}
mir::TerminatorKind::Unreachable => {
......@@ -196,13 +230,16 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
}
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let lvalue = self.trans_lvalue(&bcx, location);
let ty = lvalue.ty.to_ty(bcx.tcx());
let ty = mir.lvalue_ty(bcx.tcx(), location).to_ty(bcx.tcx());
let ty = bcx.monomorphize(&ty);
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
funclet_br(self, bcx, target);
return;
}
let lvalue = self.trans_lvalue(&bcx, location);
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
let llvalue = if drop_ty != ty {
......@@ -534,7 +571,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
mut op: OperandRef<'tcx>,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
......@@ -562,8 +599,6 @@ fn trans_argument(&mut self,
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
return;
}
op = op.pack_if_pair(bcx);
}
let arg = &fn_ty.args[*next_idx];
......@@ -580,14 +615,16 @@ fn trans_argument(&mut self,
// Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match op.val {
Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
bcx.store(llval, llscratch);
(llscratch, true)
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
self.store_operand(bcx, llscratch, op);
(llscratch, true)
} else {
(op.pack_if_pair(bcx).immediate(), false)
}
}
Immediate(llval) => (llval, false),
Ref(llval) => (llval, true),
Pair(..) => bug!("pairs handled above")
Ref(llval) => (llval, true)
};
if by_ref && !arg.is_indirect() {
......@@ -773,40 +810,39 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
if fn_ret_ty.is_ignore() {
return ReturnDest::Nothing;
}
let dest = match *dest {
mir::Lvalue::Temp(idx) => {
let ret_ty = self.lvalue_ty(dest);
match self.temps[idx] {
TempRef::Lvalue(dest) => dest,
TempRef::Operand(None) => {
// Handle temporary lvalues, specifically Operand ones, as
// they don't have allocas
return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, idx)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
ReturnDest::IndirectOperand(tmp, idx)
} else {
ReturnDest::DirectOperand(idx)
};
}
TempRef::Operand(Some(_)) => {
bug!("lvalue temp already assigned to");
}
let dest = if let Some(index) = self.mir.local_index(dest) {
let ret_ty = self.lvalue_ty(dest);
match self.locals[index] {
LocalRef::Lvalue(dest) => dest,
LocalRef::Operand(None) => {
// Handle temporary lvalues, specifically Operand ones, as
// they don't have allocas
return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, index)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
ReturnDest::IndirectOperand(tmp, index)
} else {
ReturnDest::DirectOperand(index)
};
}
LocalRef::Operand(Some(_)) => {
bug!("lvalue local already assigned to");
}
}
_ => self.trans_lvalue(bcx, dest)
} else {
self.trans_lvalue(bcx, dest)
};
if fn_ret_ty.is_indirect() {
llargs.push(dest.llval);
......@@ -850,11 +886,11 @@ fn store_return(&mut self,
match dest {
Nothing => (),
Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
IndirectOperand(tmp, idx) => {
IndirectOperand(tmp, index) => {
let op = self.trans_load(bcx, tmp, op.ty);
self.temps[idx] = TempRef::Operand(Some(op));
self.locals[index] = LocalRef::Operand(Some(op));
}
DirectOperand(idx) => {
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() {
let tmp = bcx.with_block(|bcx| {
......@@ -865,7 +901,7 @@ fn store_return(&mut self,
} else {
op.unpack_if_pair(bcx)
};
self.temps[idx] = TempRef::Operand(Some(op));
self.locals[index] = LocalRef::Operand(Some(op));
}
}
}
......@@ -876,8 +912,8 @@ enum ReturnDest {
Nothing,
// Store the return value to the pointer
Store(ValueRef),
// Stores an indirect return value to an operand temporary lvalue
IndirectOperand(ValueRef, mir::Temp),
// Stores a direct return value to an operand temporary lvalue
DirectOperand(mir::Temp)
// Stores an indirect return value to an operand local lvalue
IndirectOperand(ValueRef, mir::Local),
// Stores a direct return value to an operand local lvalue
DirectOperand(mir::Local)
}
......@@ -203,17 +203,8 @@ struct MirConstContext<'a, 'tcx: 'a> {
/// Type parameters for const fn and associated constants.
substs: &'tcx Substs<'tcx>,
/// Arguments passed to a const fn.
args: IndexVec<mir::Arg, Const<'tcx>>,
/// Variable values - specifically, argument bindings of a const fn.
vars: IndexVec<mir::Var, Option<Const<'tcx>>>,
/// Temp values.
temps: IndexVec<mir::Temp, Option<Const<'tcx>>>,
/// Value assigned to Return, which is the resulting constant.
return_value: Option<Const<'tcx>>
/// Values of locals in a constant or const fn.
locals: IndexVec<mir::Local, Option<Const<'tcx>>>
}
......@@ -223,15 +214,17 @@ fn new(ccx: &'a CrateContext<'a, 'tcx>,
substs: &'tcx Substs<'tcx>,
args: IndexVec<mir::Arg, Const<'tcx>>)
-> MirConstContext<'a, 'tcx> {
MirConstContext {
let mut context = MirConstContext {
ccx: ccx,
mir: mir,
substs: substs,
args: args,
vars: IndexVec::from_elem(None, &mir.var_decls),
temps: IndexVec::from_elem(None, &mir.temp_decls),
return_value: None
locals: (0..mir.count_locals()).map(|_| None).collect(),
};
for (i, arg) in args.into_iter().enumerate() {
let index = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(i))).unwrap();
context.locals[index] = Some(arg);
}
context
}
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
......@@ -302,9 +295,10 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
mir::TerminatorKind::Goto { target } => target,
mir::TerminatorKind::Return => {
failure?;
return Ok(self.return_value.unwrap_or_else(|| {
let index = self.mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
return Ok(self.locals[index].unwrap_or_else(|| {
span_bug!(span, "no returned value in constant");
}))
}));
}
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
......@@ -366,30 +360,28 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
}
fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) {
let dest = match *dest {
mir::Lvalue::Var(var) => &mut self.vars[var],
mir::Lvalue::Temp(temp) => &mut self.temps[temp],
mir::Lvalue::ReturnPointer => &mut self.return_value,
_ => span_bug!(span, "assignment to {:?} in constant", dest)
};
*dest = Some(value);
if let Some(index) = self.mir.local_index(dest) {
self.locals[index] = Some(value);
} else {
span_bug!(span, "assignment to {:?} in constant", dest);
}
}
fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
-> Result<ConstLvalue<'tcx>, ConstEvalFailure> {
let tcx = self.ccx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
return Ok(self.locals[index].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", lvalue)
}).as_lvalue());
}
let lvalue = match *lvalue {
mir::Lvalue::Var(var) => {
self.vars[var].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", var)
}).as_lvalue()
}
mir::Lvalue::Temp(temp) => {
self.temps[temp].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", temp)
}).as_lvalue()
}
mir::Lvalue::Arg(arg) => self.args[arg].as_lvalue(),
mir::Lvalue::Var(_) |
mir::Lvalue::Temp(_) |
mir::Lvalue::Arg(_) |
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
ConstLvalue {
base: Base::Static(consts::get_static(self.ccx, def_id).val),
......@@ -397,9 +389,6 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
ty: self.mir.lvalue_ty(tcx, lvalue).to_ty(tcx)
}
}
mir::Lvalue::ReturnPointer => {
span_bug!(span, "accessing Lvalue::ReturnPointer in constant")
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.const_lvalue(&projection.base, span)?;
let projected_ty = LvalueTy::Ty { ty: tr_base.ty }
......
......@@ -26,7 +26,8 @@
use std::ptr;
use super::{MirContext, TempRef};
use super::{MirContext, LocalRef};
use super::operand::OperandValue;
#[derive(Copy, Clone, Debug)]
pub struct LvalueRef<'tcx> {
......@@ -87,40 +88,50 @@ pub fn trans_lvalue(&mut self,
-> LvalueRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue);
let fcx = bcx.fcx();
let ccx = bcx.ccx();
let tcx = bcx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => {
return lvalue;
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as lvalue", lvalue);
}
}
}
let result = match *lvalue {
mir::Lvalue::Var(var) => self.vars[var],
mir::Lvalue::Temp(temp) => match self.temps[temp] {
TempRef::Lvalue(lvalue) =>
lvalue,
TempRef::Operand(..) =>
bug!("using operand temp {:?} as lvalue", lvalue),
},
mir::Lvalue::Arg(arg) => self.args[arg],
mir::Lvalue::Var(_) |
mir::Lvalue::Temp(_) |
mir::Lvalue::Arg(_) |
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
let const_ty = self.lvalue_ty(lvalue);
LvalueRef::new_sized(consts::get_static(ccx, def_id).val,
LvalueTy::from_ty(const_ty))
},
mir::Lvalue::ReturnPointer => {
let llval = if !fcx.fn_ty.ret.is_ignore() {
bcx.with_block(|bcx| {
fcx.get_ret_slot(bcx, "")
})
} else {
// This is a void return; that is, there’s no place to store the value and
// there cannot really be one (or storing into it doesn’t make sense, anyway).
// Ergo, we return an undef ValueRef, so we do not have to special-case every
// place using lvalues, and could use it the same way you use a regular
// ReturnPointer LValue (i.e. store into it, load from it etc).
C_undef(fcx.fn_ty.ret.original_ty.ptr_to())
mir::Lvalue::Projection(box mir::Projection {
ref base,
elem: mir::ProjectionElem::Deref
}) => {
// Load the pointer from its location.
let ptr = self.trans_consume(bcx, base);
let projected_ty = LvalueTy::from_ty(ptr.ty)
.projection_ty(tcx, &mir::ProjectionElem::Deref);
let projected_ty = bcx.monomorphize(&projected_ty);
let (llptr, llextra) = match ptr.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty)
};
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
},
LvalueRef {
llval: llptr,
llextra: llextra,
ty: projected_ty,
}
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
......@@ -138,15 +149,7 @@ pub fn trans_lvalue(&mut self,
};
let (llprojected, llextra) = match projection.elem {
mir::ProjectionElem::Deref => {
let base_ty = tr_base.ty.to_ty(tcx);
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
(base::load_ty_builder(bcx, tr_base.llval, base_ty),
ptr::null_mut())
} else {
load_fat_ptr(bcx, tr_base.llval)
}
}
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
let base_ty = tr_base.ty.to_ty(tcx);
let base_repr = adt::represent_type(ccx, base_ty);
......@@ -227,44 +230,41 @@ pub fn trans_lvalue(&mut self,
}
// Perform an action using the given Lvalue.
// If the Lvalue is an empty TempRef::Operand, then a temporary stack slot
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
// is created first, then used as an operand to update the Lvalue.
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
{
match *lvalue {
mir::Lvalue::Temp(temp) => {
match self.temps[temp] {
TempRef::Lvalue(lvalue) => f(self, lvalue),
TempRef::Operand(None) => {
let lvalue_ty = self.lvalue_ty(lvalue);
let lvalue = LvalueRef::alloca(bcx,
lvalue_ty,
"lvalue_temp");
let ret = f(self, lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
self.temps[temp] = TempRef::Operand(Some(op));
ret
}
TempRef::Operand(Some(_)) => {
// See comments in TempRef::new_operand as to why
// we always have Some in a ZST TempRef::Operand.
let ty = self.lvalue_ty(lvalue);
if common::type_is_zero_size(bcx.ccx(), ty) {
// Pass an undef pointer as no stores can actually occur.
let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to());
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
} else {
bug!("Lvalue temp already set");
}
if let Some(index) = self.mir.local_index(lvalue) {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => f(self, lvalue),
LocalRef::Operand(None) => {
let lvalue_ty = self.lvalue_ty(lvalue);
let lvalue = LvalueRef::alloca(bcx,
lvalue_ty,
"lvalue_temp");
let ret = f(self, lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
self.locals[index] = LocalRef::Operand(Some(op));
ret
}
LocalRef::Operand(Some(_)) => {
// See comments in LocalRef::new_operand as to why
// we always have Some in a ZST LocalRef::Operand.
let ty = self.lvalue_ty(lvalue);
if common::type_is_zero_size(bcx.ccx(), ty) {
// Pass an undef pointer as no stores can actually occur.
let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to());
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
} else {
bug!("Lvalue local already set");
}
}
}
_ => {
let lvalue = self.trans_lvalue(bcx, lvalue);
f(self, lvalue)
}
} else {
let lvalue = self.trans_lvalue(bcx, lvalue);
f(self, lvalue)
}
}
......
......@@ -84,16 +84,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
/// Cached unreachable block
unreachable_block: Option<Block<'bcx, 'tcx>>,
/// An LLVM alloca for each MIR `VarDecl`
vars: IndexVec<mir::Var, LvalueRef<'tcx>>,
/// The location where each MIR `TempDecl` is stored. This is
/// The location where each MIR arg/var/tmp/ret is stored. This is
/// usually an `LvalueRef` representing an alloca, but not always:
/// sometimes we can skip the alloca and just store the value
/// directly using an `OperandRef`, which makes for tighter LLVM
/// IR. The conditions for using an `OperandRef` are as follows:
///
/// - the type of the temporary must be judged "immediate" by `type_is_immediate`
/// - the type of the local must be judged "immediate" by `type_is_immediate`
/// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator
/// - nor should it appear in an lvalue path like `tmp.a`
......@@ -102,12 +99,7 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
///
/// Avoiding allocs can also be important for certain intrinsics,
/// notably `expect`.
temps: IndexVec<mir::Temp, TempRef<'tcx>>,
/// The arguments to the function; as args are lvalues, these are
/// always indirect, though we try to avoid creating an alloca
/// when we can (and just reuse the pointer the caller provided).
args: IndexVec<mir::Arg, LvalueRef<'tcx>>,
locals: IndexVec<mir::Local, LocalRef<'tcx>>,
/// Debug information for MIR scopes.
scopes: IndexVec<mir::VisibilityScope, DIScope>
......@@ -119,14 +111,14 @@ pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc {
}
}
enum TempRef<'tcx> {
enum LocalRef<'tcx> {
Lvalue(LvalueRef<'tcx>),
Operand(Option<OperandRef<'tcx>>),
}
impl<'tcx> TempRef<'tcx> {
impl<'tcx> LocalRef<'tcx> {
fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
ty: ty::Ty<'tcx>) -> TempRef<'tcx> {
ty: ty::Ty<'tcx>) -> LocalRef<'tcx> {
if common::type_is_zero_size(ccx, ty) {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
......@@ -142,9 +134,9 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
val: val,
ty: ty
};
TempRef::Operand(Some(op))
LocalRef::Operand(Some(op))
} else {
TempRef::Operand(None)
LocalRef::Operand(None)
}
}
}
......@@ -157,8 +149,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
// Analyze the temps to determine which must be lvalues
// FIXME
let (lvalue_temps, cleanup_kinds) = bcx.with_block(|bcx| {
(analyze::lvalue_temps(bcx, &mir),
let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| {
(analyze::lvalue_locals(bcx, &mir),
analyze::cleanup_kinds(bcx, &mir))
});
......@@ -166,37 +158,49 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let scopes = debuginfo::create_mir_scopes(fcx);
// Allocate variable and temp allocas
let args = arg_value_refs(&bcx, &mir, &scopes);
let vars = mir.var_decls.iter()
.map(|decl| (bcx.monomorphize(&decl.ty), decl))
.map(|(mty, decl)| {
let lvalue = LvalueRef::alloca(&bcx, mty, &decl.name.as_str());
let scope = scopes[decl.source_info.scope];
if !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo {
bcx.with_block(|bcx| {
declare_local(bcx, decl.name, mty, scope,
VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableKind::LocalVariable, decl.source_info.span);
});
}
let locals = {
let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals);
let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| {
let ty = bcx.monomorphize(&decl.ty);
let scope = scopes[decl.source_info.scope];
let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo;
let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap();
if !lvalue_locals.contains(local.index()) && !dbg {
return LocalRef::new_operand(bcx.ccx(), ty);
}
lvalue
}).collect();
let temps = mir.temp_decls.iter()
.map(|decl| bcx.monomorphize(&decl.ty))
.enumerate()
.map(|(i, mty)| if lvalue_temps.contains(i) {
TempRef::Lvalue(LvalueRef::alloca(&bcx,
mty,
&format!("temp{:?}", i)))
} else {
// If this is an immediate temp, we do not create an
// alloca in advance. Instead we wait until we see the
// definition and update the operand there.
TempRef::new_operand(bcx.ccx(), mty)
})
.collect();
let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str());
if dbg {
bcx.with_block(|bcx| {
declare_local(bcx, decl.name, ty, scope,
VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableKind::LocalVariable, decl.source_info.span);
});
}
LocalRef::Lvalue(lvalue)
});
let locals = mir.temp_decls.iter().enumerate().map(|(i, decl)| {
(mir::Lvalue::Temp(mir::Temp::new(i)), decl.ty)
}).chain(mir.return_ty.maybe_converging().map(|ty| (mir::Lvalue::ReturnPointer, ty)));
args.into_iter().chain(vars).chain(locals.map(|(lvalue, ty)| {
let ty = bcx.monomorphize(&ty);
let local = mir.local_index(&lvalue).unwrap();
if lvalue == mir::Lvalue::ReturnPointer && fcx.fn_ty.ret.is_indirect() {
let llretptr = llvm::get_param(fcx.llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
} else if lvalue_locals.contains(local.index()) {
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", lvalue)))
} else {
// If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the
// definition and update the operand there.
LocalRef::new_operand(bcx.ccx(), ty)
}
})).collect()
};
// Allocate a `Block` for every basic block
let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
......@@ -225,9 +229,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
unreachable_block: None,
cleanup_kinds: cleanup_kinds,
landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
vars: vars,
temps: temps,
args: args,
locals: locals,
scopes: scopes
};
......@@ -266,10 +268,11 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
/// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always
/// indirect.
fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir: &mir::Mir<'tcx>,
scopes: &IndexVec<mir::VisibilityScope, DIScope>)
-> IndexVec<mir::Arg, LvalueRef<'tcx>> {
scopes: &IndexVec<mir::VisibilityScope, DIScope>,
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let fcx = bcx.fcx();
let tcx = bcx.tcx();
let mut idx = 0;
......@@ -285,6 +288,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
let arg_ty = bcx.monomorphize(&arg_decl.ty);
let local = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(arg_index))).unwrap();
if arg_decl.spread {
// This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
......@@ -305,8 +309,8 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
let arg = &fcx.fn_ty.args[idx];
idx += 1;
if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
// We pass fat pointers as two words, but inside the tuple
// they are the two sub-fields of a single aggregate field.
// We pass fat pointers as two words, but inside the tuple
// they are the two sub-fields of a single aggregate field.
let meta = &fcx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst));
......@@ -335,7 +339,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
bcx.fcx().span.unwrap_or(DUMMY_SP));
}));
}
return LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty));
return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
}
let arg = &fcx.fn_ty.args[idx];
......@@ -345,9 +349,42 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
llarg
} else if !lvalue_locals.contains(local.index()) &&
!arg.is_indirect() && arg.cast.is_none() &&
arg_scope.is_none() {
if arg.is_ignore() {
return LocalRef::new_operand(bcx.ccx(), arg_ty);
}
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
let val = if common::type_is_fat_ptr(tcx, arg_ty) {
let meta = &fcx.fn_ty.args[idx];
idx += 1;
assert_eq!((meta.cast, meta.pad), (None, None));
let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
OperandValue::Pair(llarg, llmeta)
} else {
OperandValue::Immediate(llarg)
};
let operand = OperandRef {
val: val,
ty: arg_ty
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
......@@ -441,7 +478,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
bcx.fcx().span.unwrap_or(DUMMY_SP));
}
}));
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
}).collect()
}
......
......@@ -21,7 +21,7 @@
use std::fmt;
use super::{MirContext, TempRef};
use super::{MirContext, LocalRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
......@@ -112,6 +112,8 @@ pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>)
if let OperandValue::Immediate(llval) = self.val {
// Deconstruct the immediate aggregate.
if common::type_is_imm_pair(bcx.ccx(), self.ty) {
debug!("Operand::unpack_if_pair: unpacking {:?}", self);
let mut a = bcx.extract_value(llval, 0);
let mut b = bcx.extract_value(llval, 1);
......@@ -164,56 +166,65 @@ pub fn trans_load(&mut self,
OperandRef { val: val, ty: ty }
}
pub fn trans_operand(&mut self,
pub fn trans_consume(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>)
lvalue: &mir::Lvalue<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_operand(operand={:?})", operand);
debug!("trans_consume(lvalue={:?})", lvalue);
match *operand {
mir::Operand::Consume(ref lvalue) => {
// watch out for temporaries that do not have an
// alloca; they are handled somewhat differently
if let &mir::Lvalue::Temp(index) = lvalue {
match self.temps[index] {
TempRef::Operand(Some(o)) => {
return o;
}
TempRef::Operand(None) => {
bug!("use of {:?} before def", lvalue);
}
TempRef::Lvalue(..) => {
// use path below
}
}
// watch out for locals that do not have an
// alloca; they are handled somewhat differently
if let Some(index) = self.mir.local_index(lvalue) {
match self.locals[index] {
LocalRef::Operand(Some(o)) => {
return o;
}
LocalRef::Operand(None) => {
bug!("use of {:?} before def", lvalue);
}
LocalRef::Lvalue(..) => {
// use path below
}
}
}
// Moves out of pair fields are trivial.
if let &mir::Lvalue::Projection(ref proj) = lvalue {
if let mir::Lvalue::Temp(index) = proj.base {
let temp_ref = &self.temps[index];
if let &TempRef::Operand(Some(o)) = temp_ref {
match (o.val, &proj.elem) {
(OperandValue::Pair(a, b),
&mir::ProjectionElem::Field(ref f, ty)) => {
let llval = [a, b][f.index()];
return OperandRef {
val: OperandValue::Immediate(llval),
ty: bcx.monomorphize(&ty)
};
}
_ => {}
}
// Moves out of pair fields are trivial.
if let &mir::Lvalue::Projection(ref proj) = lvalue {
if let Some(index) = self.mir.local_index(&proj.base) {
if let LocalRef::Operand(Some(o)) = self.locals[index] {
match (o.val, &proj.elem) {
(OperandValue::Pair(a, b),
&mir::ProjectionElem::Field(ref f, ty)) => {
let llval = [a, b][f.index()];
return OperandRef {
val: OperandValue::Immediate(llval),
ty: bcx.monomorphize(&ty)
};
}
_ => {}
}
}
}
}
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
}
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
pub fn trans_operand(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_operand(operand={:?})", operand);
match *operand {
mir::Operand::Consume(ref lvalue) => {
self.trans_consume(bcx, lvalue)
}
mir::Operand::Constant(ref constant) => {
......
......@@ -13,7 +13,7 @@
use common::{self, BlockAndBuilder};
use super::MirContext;
use super::TempRef;
use super::LocalRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_statement(&mut self,
......@@ -27,37 +27,34 @@ pub fn trans_statement(&mut self,
debug_loc.apply(bcx.fcx());
match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
match *lvalue {
mir::Lvalue::Temp(index) => {
match self.temps[index] {
TempRef::Lvalue(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
}
TempRef::Operand(None) => {
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue,
debug_loc);
self.temps[index] = TempRef::Operand(Some(operand));
bcx
}
TempRef::Operand(Some(_)) => {
let ty = self.lvalue_ty(lvalue);
if let Some(index) = self.mir.local_index(lvalue) {
match self.locals[index] {
LocalRef::Lvalue(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
}
LocalRef::Operand(None) => {
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue,
debug_loc);
self.locals[index] = LocalRef::Operand(Some(operand));
bcx
}
LocalRef::Operand(Some(_)) => {
let ty = self.lvalue_ty(lvalue);
if !common::type_is_zero_size(bcx.ccx(), ty) {
span_bug!(statement.source_info.span,
"operand {:?} already assigned",
rvalue);
} else {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
}
if !common::type_is_zero_size(bcx.ccx(), ty) {
span_bug!(statement.source_info.span,
"operand {:?} already assigned",
rvalue);
} else {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
}
}
}
_ => {
let tr_dest = self.trans_lvalue(&bcx, lvalue);
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
}
} else {
let tr_dest = self.trans_lvalue(&bcx, lvalue);
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
}
}
}
......
......@@ -11,6 +11,7 @@
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
#![feature(rustc_attrs)]
pub struct Bytes {
a: u8,
......@@ -21,6 +22,7 @@ pub struct Bytes {
// CHECK-LABEL: @borrow
#[no_mangle]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
x
......@@ -28,6 +30,7 @@ pub fn borrow(x: &i32) -> &i32 {
// CHECK-LABEL: @_box
#[no_mangle]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
*x
......
......@@ -13,7 +13,7 @@
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
#![feature(naked_functions)]
#![feature(naked_functions, rustc_attrs)]
// CHECK: Function Attrs: naked uwtable
// CHECK-NEXT: define internal void @naked_empty()
......@@ -26,6 +26,7 @@ fn naked_empty() {
// CHECK: Function Attrs: naked uwtable
#[no_mangle]
#[naked]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
// CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}})
fn naked_with_args(a: isize) {
// CHECK: %a = alloca i{{[0-9]+}}
......@@ -45,6 +46,7 @@ fn naked_with_return() -> isize {
// CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}})
#[no_mangle]
#[naked]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
fn naked_with_args_and_return(a: isize) -> isize {
// CHECK: %a = alloca i{{[0-9]+}}
// CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
......
......@@ -10,10 +10,13 @@
// error-pattern: overflow representing the type `S`
#![feature(rustc_attrs)]
trait Mirror { type It: ?Sized; }
impl<T: ?Sized> Mirror for T { type It = Self; }
struct S(Option<<S as Mirror>::It>);
#[rustc_no_mir] // FIXME #27840 MIR tries to represent `std::option::Option<S>` first.
fn main() {
let _s = S(None);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册