提交 7279af86 编写于 作者: E Eduard Burtescu

trans: generalize immediate temporaries to all MIR locals.

上级 bec32eb4
...@@ -144,6 +144,40 @@ pub fn predecessors(&self) -> Ref<IndexVec<BasicBlock, Vec<BasicBlock>>> { ...@@ -144,6 +144,40 @@ pub fn predecessors(&self) -> Ref<IndexVec<BasicBlock, Vec<BasicBlock>>> {
pub fn predecessors_for(&self, bb: BasicBlock) -> Ref<Vec<BasicBlock>> { pub fn predecessors_for(&self, bb: BasicBlock) -> Ref<Vec<BasicBlock>> {
Ref::map(self.predecessors(), |p| &p[bb]) Ref::map(self.predecessors(), |p| &p[bb])
} }
/// Maps locals (Arg's, Var's, Temp's and ReturnPointer, in that order)
/// to their index in the whole list of locals. This is useful if you
/// want to treat all locals the same instead of repeating yourself.
pub fn local_index(&self, lvalue: &Lvalue<'tcx>) -> Option<Local> {
let idx = match *lvalue {
Lvalue::Arg(arg) => arg.index(),
Lvalue::Var(var) => {
self.arg_decls.len() +
var.index()
}
Lvalue::Temp(temp) => {
self.arg_decls.len() +
self.var_decls.len() +
temp.index()
}
Lvalue::ReturnPointer => {
self.arg_decls.len() +
self.var_decls.len() +
self.temp_decls.len()
}
Lvalue::Static(_) |
Lvalue::Projection(_) => return None
};
Some(Local::new(idx))
}
/// Counts the number of locals, such that that local_index
/// will always return an index smaller than this count.
pub fn count_locals(&self) -> usize {
self.arg_decls.len() +
self.var_decls.len() +
self.temp_decls.len() + 1
}
} }
impl<'tcx> Index<BasicBlock> for Mir<'tcx> { impl<'tcx> Index<BasicBlock> for Mir<'tcx> {
...@@ -663,6 +697,7 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { ...@@ -663,6 +697,7 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
newtype_index!(Var, "var"); newtype_index!(Var, "var");
newtype_index!(Temp, "tmp"); newtype_index!(Temp, "tmp");
newtype_index!(Arg, "arg"); newtype_index!(Arg, "arg");
newtype_index!(Local, "local");
/// A path to a value; something that can be evaluated without /// A path to a value; something that can be evaluated without
/// changing or disturbing program state. /// changing or disturbing program state.
......
...@@ -492,6 +492,13 @@ pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> { ...@@ -492,6 +492,13 @@ pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> {
ty::FnDiverging => def ty::FnDiverging => def
} }
} }
pub fn maybe_converging(self) -> Option<Ty<'tcx>> {
match self {
ty::FnConverging(t) => Some(t),
ty::FnDiverging => None
}
}
} }
pub type PolyFnOutput<'tcx> = Binder<FnOutput<'tcx>>; pub type PolyFnOutput<'tcx> = Binder<FnOutput<'tcx>>;
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
//! An analysis to determine which temporaries require allocas and //! An analysis to determine which locals require allocas and
//! which do not. //! which do not.
use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::bitvec::BitVector;
...@@ -21,16 +21,20 @@ ...@@ -21,16 +21,20 @@
use glue; use glue;
use super::rvalue; use super::rvalue;
pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>, pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>) -> BitVector { mir: &mir::Mir<'tcx>) -> BitVector {
let bcx = bcx.build(); let bcx = bcx.build();
let mut analyzer = TempAnalyzer::new(mir, &bcx, mir.temp_decls.len()); let mut analyzer = LocalAnalyzer::new(mir, &bcx);
analyzer.visit_mir(mir); analyzer.visit_mir(mir);
for (index, temp_decl) in mir.temp_decls.iter().enumerate() { let local_types = mir.arg_decls.iter().map(|a| a.ty)
let ty = bcx.monomorphize(&temp_decl.ty); .chain(mir.var_decls.iter().map(|v| v.ty))
debug!("temp {:?} has type {:?}", index, ty); .chain(mir.temp_decls.iter().map(|t| t.ty))
.chain(mir.return_ty.maybe_converging());
for (index, ty) in local_types.enumerate() {
let ty = bcx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty);
if ty.is_scalar() || if ty.is_scalar() ||
ty.is_unique() || ty.is_unique() ||
ty.is_region_ptr() || ty.is_region_ptr() ||
...@@ -50,66 +54,87 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>, ...@@ -50,66 +54,87 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
// (e.g. structs) into an alloca unconditionally, just so // (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways // that we don't have to deal with having two pathways
// (gep vs extractvalue etc). // (gep vs extractvalue etc).
analyzer.mark_as_lvalue(index); analyzer.mark_as_lvalue(mir::Local::new(index));
} }
} }
analyzer.lvalue_temps analyzer.lvalue_locals
} }
struct TempAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> { struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> {
mir: &'mir mir::Mir<'tcx>, mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>, bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
lvalue_temps: BitVector, lvalue_locals: BitVector,
seen_assigned: BitVector seen_assigned: BitVector
} }
impl<'mir, 'bcx, 'tcx> TempAnalyzer<'mir, 'bcx, 'tcx> { impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> {
fn new(mir: &'mir mir::Mir<'tcx>, fn new(mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>, bcx: &'mir BlockAndBuilder<'bcx, 'tcx>)
temp_count: usize) -> TempAnalyzer<'mir, 'bcx, 'tcx> { -> LocalAnalyzer<'mir, 'bcx, 'tcx> {
TempAnalyzer { let local_count = mir.count_locals();
LocalAnalyzer {
mir: mir, mir: mir,
bcx: bcx, bcx: bcx,
lvalue_temps: BitVector::new(temp_count), lvalue_locals: BitVector::new(local_count),
seen_assigned: BitVector::new(temp_count) seen_assigned: BitVector::new(local_count)
} }
} }
fn mark_as_lvalue(&mut self, temp: usize) { fn mark_as_lvalue(&mut self, local: mir::Local) {
debug!("marking temp {} as lvalue", temp); debug!("marking {:?} as lvalue", local);
self.lvalue_temps.insert(temp); self.lvalue_locals.insert(local.index());
} }
fn mark_assigned(&mut self, temp: usize) { fn mark_assigned(&mut self, local: mir::Local) {
if !self.seen_assigned.insert(temp) { if !self.seen_assigned.insert(local.index()) {
self.mark_as_lvalue(temp); self.mark_as_lvalue(local);
} }
} }
} }
impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for TempAnalyzer<'mir, 'bcx, 'tcx> { impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
fn visit_assign(&mut self, fn visit_assign(&mut self,
block: mir::BasicBlock, block: mir::BasicBlock,
lvalue: &mir::Lvalue<'tcx>, lvalue: &mir::Lvalue<'tcx>,
rvalue: &mir::Rvalue<'tcx>) { rvalue: &mir::Rvalue<'tcx>) {
debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue); debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
match *lvalue { if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::Temp(temp) => { self.mark_assigned(index);
self.mark_assigned(temp.index()); if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) { self.mark_as_lvalue(index);
self.mark_as_lvalue(temp.index());
}
}
_ => {
self.visit_lvalue(lvalue, LvalueContext::Store);
} }
} else {
self.visit_lvalue(lvalue, LvalueContext::Store);
} }
self.visit_rvalue(rvalue); self.visit_rvalue(rvalue);
} }
fn visit_terminator_kind(&mut self,
block: mir::BasicBlock,
kind: &mir::TerminatorKind<'tcx>) {
match *kind {
mir::TerminatorKind::Call {
func: mir::Operand::Constant(mir::Constant {
literal: mir::Literal::Item { def_id, .. }, ..
}),
ref args, ..
} if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => {
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
if let mir::Operand::Consume(ref lvalue) = args[0] {
self.visit_lvalue(lvalue, LvalueContext::Drop);
}
}
_ => {}
}
self.super_terminator_kind(block, kind);
}
fn visit_lvalue(&mut self, fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>, lvalue: &mir::Lvalue<'tcx>,
context: LvalueContext) { context: LvalueContext) {
...@@ -117,9 +142,9 @@ fn visit_lvalue(&mut self, ...@@ -117,9 +142,9 @@ fn visit_lvalue(&mut self,
// Allow uses of projections of immediate pair fields. // Allow uses of projections of immediate pair fields.
if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::Lvalue::Projection(ref proj) = *lvalue {
if let mir::Lvalue::Temp(temp) = proj.base { if self.mir.local_index(&proj.base).is_some() {
let ty = self.mir.temp_decls[temp].ty; let ty = self.mir.lvalue_ty(self.bcx.tcx(), &proj.base);
let ty = self.bcx.monomorphize(&ty); let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
if common::type_is_imm_pair(self.bcx.ccx(), ty) { if common::type_is_imm_pair(self.bcx.ccx(), ty) {
if let mir::ProjectionElem::Field(..) = proj.elem { if let mir::ProjectionElem::Field(..) = proj.elem {
if let LvalueContext::Consume = context { if let LvalueContext::Consume = context {
...@@ -130,34 +155,30 @@ fn visit_lvalue(&mut self, ...@@ -130,34 +155,30 @@ fn visit_lvalue(&mut self,
} }
} }
match *lvalue { if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::Temp(temp) => { match context {
match context { LvalueContext::Call => {
LvalueContext::Call => { self.mark_assigned(index);
self.mark_assigned(temp.index()); }
} LvalueContext::Consume => {
LvalueContext::Consume => { }
} LvalueContext::Store |
LvalueContext::Store | LvalueContext::Inspect |
LvalueContext::Inspect | LvalueContext::Borrow { .. } |
LvalueContext::Borrow { .. } | LvalueContext::Slice { .. } |
LvalueContext::Slice { .. } | LvalueContext::Projection => {
LvalueContext::Projection => { self.mark_as_lvalue(index);
self.mark_as_lvalue(temp.index()); }
} LvalueContext::Drop => {
LvalueContext::Drop => { let ty = self.mir.lvalue_ty(self.bcx.tcx(), lvalue);
let ty = self.mir.temp_decls[index as usize].ty; let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
let ty = self.bcx.monomorphize(&ty);
// Only need the lvalue if we're actually dropping it. // Only need the lvalue if we're actually dropping it.
if glue::type_needs_drop(self.bcx.tcx(), ty) { if glue::type_needs_drop(self.bcx.tcx(), ty) {
self.mark_as_lvalue(index as usize); self.mark_as_lvalue(index);
}
} }
} }
} }
_ => {
}
} }
// A deref projection only reads the pointer, never needs the lvalue. // A deref projection only reads the pointer, never needs the lvalue.
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
use rustc_data_structures::fnv::FnvHashMap; use rustc_data_structures::fnv::FnvHashMap;
use syntax::parse::token; use syntax::parse::token;
use super::{MirContext, TempRef}; use super::{MirContext, LocalRef};
use super::analyze::CleanupKind; use super::analyze::CleanupKind;
use super::constant::Const; use super::constant::Const;
use super::lvalue::{LvalueRef, load_fat_ptr}; use super::lvalue::{LvalueRef, load_fat_ptr};
...@@ -186,9 +186,43 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -186,9 +186,43 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
} }
mir::TerminatorKind::Return => { mir::TerminatorKind::Return => {
bcx.with_block(|bcx| { let ret = bcx.fcx().fn_ty.ret;
self.fcx.build_return_block(bcx, debug_loc); if ret.is_ignore() || ret.is_indirect() {
}) bcx.ret_void();
return;
}
let llval = if let Some(cast_ty) = ret.cast {
let index = mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
let op = match self.locals[index] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
ty: tr_lvalue.ty.to_ty(bcx.tcx())
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret");
self.store_operand(&bcx, llscratch, op);
llscratch
}
Ref(llval) => llval
};
let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), ret.ty);
unsafe {
llvm::LLVMSetAlignment(load, llalign);
}
load
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::ReturnPointer);
op.pack_if_pair(&bcx).immediate()
};
bcx.ret(llval);
} }
mir::TerminatorKind::Unreachable => { mir::TerminatorKind::Unreachable => {
...@@ -537,7 +571,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -537,7 +571,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
fn trans_argument(&mut self, fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>, bcx: &BlockAndBuilder<'bcx, 'tcx>,
mut op: OperandRef<'tcx>, op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>, llargs: &mut Vec<ValueRef>,
fn_ty: &FnType, fn_ty: &FnType,
next_idx: &mut usize, next_idx: &mut usize,
...@@ -565,8 +599,6 @@ fn trans_argument(&mut self, ...@@ -565,8 +599,6 @@ fn trans_argument(&mut self,
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
return; return;
} }
op = op.pack_if_pair(bcx);
} }
let arg = &fn_ty.args[*next_idx]; let arg = &fn_ty.args[*next_idx];
...@@ -583,14 +615,16 @@ fn trans_argument(&mut self, ...@@ -583,14 +615,16 @@ fn trans_argument(&mut self,
// Force by-ref if we have to load through a cast pointer. // Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match op.val { let (mut llval, by_ref) = match op.val {
Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => { Immediate(_) | Pair(..) => {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); if arg.is_indirect() || arg.cast.is_some() {
bcx.store(llval, llscratch); let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
(llscratch, true) self.store_operand(bcx, llscratch, op);
(llscratch, true)
} else {
(op.pack_if_pair(bcx).immediate(), false)
}
} }
Immediate(llval) => (llval, false), Ref(llval) => (llval, true)
Ref(llval) => (llval, true),
Pair(..) => bug!("pairs handled above")
}; };
if by_ref && !arg.is_indirect() { if by_ref && !arg.is_indirect() {
...@@ -776,40 +810,39 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -776,40 +810,39 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
if fn_ret_ty.is_ignore() { if fn_ret_ty.is_ignore() {
return ReturnDest::Nothing; return ReturnDest::Nothing;
} }
let dest = match *dest { let dest = if let Some(index) = self.mir.local_index(dest) {
mir::Lvalue::Temp(idx) => { let ret_ty = self.lvalue_ty(dest);
let ret_ty = self.lvalue_ty(dest); match self.locals[index] {
match self.temps[idx] { LocalRef::Lvalue(dest) => dest,
TempRef::Lvalue(dest) => dest, LocalRef::Operand(None) => {
TempRef::Operand(None) => { // Handle temporary lvalues, specifically Operand ones, as
// Handle temporary lvalues, specifically Operand ones, as // they don't have allocas
// they don't have allocas return if fn_ret_ty.is_indirect() {
return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary,
// Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return.
// but the calling convention has an indirect return. let tmp = bcx.with_block(|bcx| {
let tmp = bcx.with_block(|bcx| { base::alloc_ty(bcx, ret_ty, "tmp_ret")
base::alloc_ty(bcx, ret_ty, "tmp_ret") });
}); llargs.push(tmp);
llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index)
ReturnDest::IndirectOperand(tmp, idx) } else if is_intrinsic {
} else if is_intrinsic { // Currently, intrinsics always need a location to store
// Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the
// the result. so we create a temporary alloca for the // result
// result let tmp = bcx.with_block(|bcx| {
let tmp = bcx.with_block(|bcx| { base::alloc_ty(bcx, ret_ty, "tmp_ret")
base::alloc_ty(bcx, ret_ty, "tmp_ret") });
}); ReturnDest::IndirectOperand(tmp, index)
ReturnDest::IndirectOperand(tmp, idx) } else {
} else { ReturnDest::DirectOperand(index)
ReturnDest::DirectOperand(idx) };
}; }
} LocalRef::Operand(Some(_)) => {
TempRef::Operand(Some(_)) => { bug!("lvalue local already assigned to");
bug!("lvalue temp already assigned to");
}
} }
} }
_ => self.trans_lvalue(bcx, dest) } else {
self.trans_lvalue(bcx, dest)
}; };
if fn_ret_ty.is_indirect() { if fn_ret_ty.is_indirect() {
llargs.push(dest.llval); llargs.push(dest.llval);
...@@ -853,11 +886,11 @@ fn store_return(&mut self, ...@@ -853,11 +886,11 @@ fn store_return(&mut self,
match dest { match dest {
Nothing => (), Nothing => (),
Store(dst) => ret_ty.store(bcx, op.immediate(), dst), Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
IndirectOperand(tmp, idx) => { IndirectOperand(tmp, index) => {
let op = self.trans_load(bcx, tmp, op.ty); let op = self.trans_load(bcx, tmp, op.ty);
self.temps[idx] = TempRef::Operand(Some(op)); self.locals[index] = LocalRef::Operand(Some(op));
} }
DirectOperand(idx) => { DirectOperand(index) => {
// If there is a cast, we have to store and reload. // If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() { let op = if ret_ty.cast.is_some() {
let tmp = bcx.with_block(|bcx| { let tmp = bcx.with_block(|bcx| {
...@@ -868,7 +901,7 @@ fn store_return(&mut self, ...@@ -868,7 +901,7 @@ fn store_return(&mut self,
} else { } else {
op.unpack_if_pair(bcx) op.unpack_if_pair(bcx)
}; };
self.temps[idx] = TempRef::Operand(Some(op)); self.locals[index] = LocalRef::Operand(Some(op));
} }
} }
} }
...@@ -879,8 +912,8 @@ enum ReturnDest { ...@@ -879,8 +912,8 @@ enum ReturnDest {
Nothing, Nothing,
// Store the return value to the pointer // Store the return value to the pointer
Store(ValueRef), Store(ValueRef),
// Stores an indirect return value to an operand temporary lvalue // Stores an indirect return value to an operand local lvalue
IndirectOperand(ValueRef, mir::Temp), IndirectOperand(ValueRef, mir::Local),
// Stores a direct return value to an operand temporary lvalue // Stores a direct return value to an operand local lvalue
DirectOperand(mir::Temp) DirectOperand(mir::Local)
} }
...@@ -203,17 +203,8 @@ struct MirConstContext<'a, 'tcx: 'a> { ...@@ -203,17 +203,8 @@ struct MirConstContext<'a, 'tcx: 'a> {
/// Type parameters for const fn and associated constants. /// Type parameters for const fn and associated constants.
substs: &'tcx Substs<'tcx>, substs: &'tcx Substs<'tcx>,
/// Arguments passed to a const fn. /// Values of locals in a constant or const fn.
args: IndexVec<mir::Arg, Const<'tcx>>, locals: IndexVec<mir::Local, Option<Const<'tcx>>>
/// Variable values - specifically, argument bindings of a const fn.
vars: IndexVec<mir::Var, Option<Const<'tcx>>>,
/// Temp values.
temps: IndexVec<mir::Temp, Option<Const<'tcx>>>,
/// Value assigned to Return, which is the resulting constant.
return_value: Option<Const<'tcx>>
} }
...@@ -223,15 +214,17 @@ fn new(ccx: &'a CrateContext<'a, 'tcx>, ...@@ -223,15 +214,17 @@ fn new(ccx: &'a CrateContext<'a, 'tcx>,
substs: &'tcx Substs<'tcx>, substs: &'tcx Substs<'tcx>,
args: IndexVec<mir::Arg, Const<'tcx>>) args: IndexVec<mir::Arg, Const<'tcx>>)
-> MirConstContext<'a, 'tcx> { -> MirConstContext<'a, 'tcx> {
MirConstContext { let mut context = MirConstContext {
ccx: ccx, ccx: ccx,
mir: mir, mir: mir,
substs: substs, substs: substs,
args: args, locals: (0..mir.count_locals()).map(|_| None).collect(),
vars: IndexVec::from_elem(None, &mir.var_decls), };
temps: IndexVec::from_elem(None, &mir.temp_decls), for (i, arg) in args.into_iter().enumerate() {
return_value: None let index = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(i))).unwrap();
context.locals[index] = Some(arg);
} }
context
} }
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
...@@ -302,9 +295,10 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> { ...@@ -302,9 +295,10 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
mir::TerminatorKind::Goto { target } => target, mir::TerminatorKind::Goto { target } => target,
mir::TerminatorKind::Return => { mir::TerminatorKind::Return => {
failure?; failure?;
return Ok(self.return_value.unwrap_or_else(|| { let index = self.mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
return Ok(self.locals[index].unwrap_or_else(|| {
span_bug!(span, "no returned value in constant"); span_bug!(span, "no returned value in constant");
})) }));
} }
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => { mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
...@@ -366,30 +360,28 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> { ...@@ -366,30 +360,28 @@ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
} }
fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) { fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) {
let dest = match *dest { if let Some(index) = self.mir.local_index(dest) {
mir::Lvalue::Var(var) => &mut self.vars[var], self.locals[index] = Some(value);
mir::Lvalue::Temp(temp) => &mut self.temps[temp], } else {
mir::Lvalue::ReturnPointer => &mut self.return_value, span_bug!(span, "assignment to {:?} in constant", dest);
_ => span_bug!(span, "assignment to {:?} in constant", dest) }
};
*dest = Some(value);
} }
fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
-> Result<ConstLvalue<'tcx>, ConstEvalFailure> { -> Result<ConstLvalue<'tcx>, ConstEvalFailure> {
let tcx = self.ccx.tcx(); let tcx = self.ccx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
return Ok(self.locals[index].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", lvalue)
}).as_lvalue());
}
let lvalue = match *lvalue { let lvalue = match *lvalue {
mir::Lvalue::Var(var) => { mir::Lvalue::Var(_) |
self.vars[var].unwrap_or_else(|| { mir::Lvalue::Temp(_) |
span_bug!(span, "{:?} not initialized", var) mir::Lvalue::Arg(_) |
}).as_lvalue() mir::Lvalue::ReturnPointer => bug!(), // handled above
}
mir::Lvalue::Temp(temp) => {
self.temps[temp].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", temp)
}).as_lvalue()
}
mir::Lvalue::Arg(arg) => self.args[arg].as_lvalue(),
mir::Lvalue::Static(def_id) => { mir::Lvalue::Static(def_id) => {
ConstLvalue { ConstLvalue {
base: Base::Static(consts::get_static(self.ccx, def_id).val), base: Base::Static(consts::get_static(self.ccx, def_id).val),
...@@ -397,9 +389,6 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) ...@@ -397,9 +389,6 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
ty: self.mir.lvalue_ty(tcx, lvalue).to_ty(tcx) ty: self.mir.lvalue_ty(tcx, lvalue).to_ty(tcx)
} }
} }
mir::Lvalue::ReturnPointer => {
span_bug!(span, "accessing Lvalue::ReturnPointer in constant")
}
mir::Lvalue::Projection(ref projection) => { mir::Lvalue::Projection(ref projection) => {
let tr_base = self.const_lvalue(&projection.base, span)?; let tr_base = self.const_lvalue(&projection.base, span)?;
let projected_ty = LvalueTy::Ty { ty: tr_base.ty } let projected_ty = LvalueTy::Ty { ty: tr_base.ty }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
use std::ptr; use std::ptr;
use super::{MirContext, TempRef}; use super::{MirContext, LocalRef};
use super::operand::OperandValue; use super::operand::OperandValue;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
...@@ -88,40 +88,30 @@ pub fn trans_lvalue(&mut self, ...@@ -88,40 +88,30 @@ pub fn trans_lvalue(&mut self,
-> LvalueRef<'tcx> { -> LvalueRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue); debug!("trans_lvalue(lvalue={:?})", lvalue);
let fcx = bcx.fcx();
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let tcx = bcx.tcx(); let tcx = bcx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => {
return lvalue;
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as lvalue", lvalue);
}
}
}
let result = match *lvalue { let result = match *lvalue {
mir::Lvalue::Var(var) => self.vars[var], mir::Lvalue::Var(_) |
mir::Lvalue::Temp(temp) => match self.temps[temp] { mir::Lvalue::Temp(_) |
TempRef::Lvalue(lvalue) => mir::Lvalue::Arg(_) |
lvalue, mir::Lvalue::ReturnPointer => bug!(), // handled above
TempRef::Operand(..) =>
bug!("using operand temp {:?} as lvalue", lvalue),
},
mir::Lvalue::Arg(arg) => self.args[arg],
mir::Lvalue::Static(def_id) => { mir::Lvalue::Static(def_id) => {
let const_ty = self.lvalue_ty(lvalue); let const_ty = self.lvalue_ty(lvalue);
LvalueRef::new_sized(consts::get_static(ccx, def_id).val, LvalueRef::new_sized(consts::get_static(ccx, def_id).val,
LvalueTy::from_ty(const_ty)) LvalueTy::from_ty(const_ty))
}, },
mir::Lvalue::ReturnPointer => {
let llval = if !fcx.fn_ty.ret.is_ignore() {
bcx.with_block(|bcx| {
fcx.get_ret_slot(bcx, "")
})
} else {
// This is a void return; that is, there’s no place to store the value and
// there cannot really be one (or storing into it doesn’t make sense, anyway).
// Ergo, we return an undef ValueRef, so we do not have to special-case every
// place using lvalues, and could use it the same way you use a regular
// ReturnPointer LValue (i.e. store into it, load from it etc).
C_undef(fcx.fn_ty.ret.original_ty.ptr_to())
};
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
},
mir::Lvalue::Projection(box mir::Projection { mir::Lvalue::Projection(box mir::Projection {
ref base, ref base,
elem: mir::ProjectionElem::Deref elem: mir::ProjectionElem::Deref
...@@ -240,44 +230,41 @@ pub fn trans_lvalue(&mut self, ...@@ -240,44 +230,41 @@ pub fn trans_lvalue(&mut self,
} }
// Perform an action using the given Lvalue. // Perform an action using the given Lvalue.
// If the Lvalue is an empty TempRef::Operand, then a temporary stack slot // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
// is created first, then used as an operand to update the Lvalue. // is created first, then used as an operand to update the Lvalue.
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, f: F) -> U lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
{ {
match *lvalue { if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::Temp(temp) => { match self.locals[index] {
match self.temps[temp] { LocalRef::Lvalue(lvalue) => f(self, lvalue),
TempRef::Lvalue(lvalue) => f(self, lvalue), LocalRef::Operand(None) => {
TempRef::Operand(None) => { let lvalue_ty = self.lvalue_ty(lvalue);
let lvalue_ty = self.lvalue_ty(lvalue); let lvalue = LvalueRef::alloca(bcx,
let lvalue = LvalueRef::alloca(bcx, lvalue_ty,
lvalue_ty, "lvalue_temp");
"lvalue_temp"); let ret = f(self, lvalue);
let ret = f(self, lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); self.locals[index] = LocalRef::Operand(Some(op));
self.temps[temp] = TempRef::Operand(Some(op)); ret
ret }
} LocalRef::Operand(Some(_)) => {
TempRef::Operand(Some(_)) => { // See comments in LocalRef::new_operand as to why
// See comments in TempRef::new_operand as to why // we always have Some in a ZST LocalRef::Operand.
// we always have Some in a ZST TempRef::Operand. let ty = self.lvalue_ty(lvalue);
let ty = self.lvalue_ty(lvalue); if common::type_is_zero_size(bcx.ccx(), ty) {
if common::type_is_zero_size(bcx.ccx(), ty) { // Pass an undef pointer as no stores can actually occur.
// Pass an undef pointer as no stores can actually occur. let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to());
let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to()); f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty))) } else {
} else { bug!("Lvalue local already set");
bug!("Lvalue temp already set");
}
} }
} }
} }
_ => { } else {
let lvalue = self.trans_lvalue(bcx, lvalue); let lvalue = self.trans_lvalue(bcx, lvalue);
f(self, lvalue) f(self, lvalue)
}
} }
} }
......
...@@ -84,16 +84,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { ...@@ -84,16 +84,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
/// Cached unreachable block /// Cached unreachable block
unreachable_block: Option<Block<'bcx, 'tcx>>, unreachable_block: Option<Block<'bcx, 'tcx>>,
/// An LLVM alloca for each MIR `VarDecl` /// The location where each MIR arg/var/tmp/ret is stored. This is
vars: IndexVec<mir::Var, LvalueRef<'tcx>>,
/// The location where each MIR `TempDecl` is stored. This is
/// usually an `LvalueRef` representing an alloca, but not always: /// usually an `LvalueRef` representing an alloca, but not always:
/// sometimes we can skip the alloca and just store the value /// sometimes we can skip the alloca and just store the value
/// directly using an `OperandRef`, which makes for tighter LLVM /// directly using an `OperandRef`, which makes for tighter LLVM
/// IR. The conditions for using an `OperandRef` are as follows: /// IR. The conditions for using an `OperandRef` are as follows:
/// ///
/// - the type of the temporary must be judged "immediate" by `type_is_immediate` /// - the type of the local must be judged "immediate" by `type_is_immediate`
/// - the operand must never be referenced indirectly /// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator /// - we should not take its address using the `&` operator
/// - nor should it appear in an lvalue path like `tmp.a` /// - nor should it appear in an lvalue path like `tmp.a`
...@@ -102,12 +99,7 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { ...@@ -102,12 +99,7 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
/// ///
/// Avoiding allocs can also be important for certain intrinsics, /// Avoiding allocs can also be important for certain intrinsics,
/// notably `expect`. /// notably `expect`.
temps: IndexVec<mir::Temp, TempRef<'tcx>>, locals: IndexVec<mir::Local, LocalRef<'tcx>>,
/// The arguments to the function; as args are lvalues, these are
/// always indirect, though we try to avoid creating an alloca
/// when we can (and just reuse the pointer the caller provided).
args: IndexVec<mir::Arg, LvalueRef<'tcx>>,
/// Debug information for MIR scopes. /// Debug information for MIR scopes.
scopes: IndexVec<mir::VisibilityScope, DIScope> scopes: IndexVec<mir::VisibilityScope, DIScope>
...@@ -119,14 +111,14 @@ pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc { ...@@ -119,14 +111,14 @@ pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc {
} }
} }
enum TempRef<'tcx> { enum LocalRef<'tcx> {
Lvalue(LvalueRef<'tcx>), Lvalue(LvalueRef<'tcx>),
Operand(Option<OperandRef<'tcx>>), Operand(Option<OperandRef<'tcx>>),
} }
impl<'tcx> TempRef<'tcx> { impl<'tcx> LocalRef<'tcx> {
fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
ty: ty::Ty<'tcx>) -> TempRef<'tcx> { ty: ty::Ty<'tcx>) -> LocalRef<'tcx> {
if common::type_is_zero_size(ccx, ty) { if common::type_is_zero_size(ccx, ty) {
// Zero-size temporaries aren't always initialized, which // Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but // doesn't matter because they don't contain data, but
...@@ -142,9 +134,9 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, ...@@ -142,9 +134,9 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
val: val, val: val,
ty: ty ty: ty
}; };
TempRef::Operand(Some(op)) LocalRef::Operand(Some(op))
} else { } else {
TempRef::Operand(None) LocalRef::Operand(None)
} }
} }
} }
...@@ -157,8 +149,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { ...@@ -157,8 +149,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
// Analyze the temps to determine which must be lvalues // Analyze the temps to determine which must be lvalues
// FIXME // FIXME
let (lvalue_temps, cleanup_kinds) = bcx.with_block(|bcx| { let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| {
(analyze::lvalue_temps(bcx, &mir), (analyze::lvalue_locals(bcx, &mir),
analyze::cleanup_kinds(bcx, &mir)) analyze::cleanup_kinds(bcx, &mir))
}); });
...@@ -166,37 +158,49 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { ...@@ -166,37 +158,49 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let scopes = debuginfo::create_mir_scopes(fcx); let scopes = debuginfo::create_mir_scopes(fcx);
// Allocate variable and temp allocas // Allocate variable and temp allocas
let args = arg_value_refs(&bcx, &mir, &scopes); let locals = {
let vars = mir.var_decls.iter() let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals);
.map(|decl| (bcx.monomorphize(&decl.ty), decl)) let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| {
.map(|(mty, decl)| { let ty = bcx.monomorphize(&decl.ty);
let lvalue = LvalueRef::alloca(&bcx, mty, &decl.name.as_str()); let scope = scopes[decl.source_info.scope];
let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo;
let scope = scopes[decl.source_info.scope];
if !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo { let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap();
bcx.with_block(|bcx| { if !lvalue_locals.contains(local.index()) && !dbg {
declare_local(bcx, decl.name, mty, scope, return LocalRef::new_operand(bcx.ccx(), ty);
VariableAccess::DirectVariable { alloca: lvalue.llval }, }
VariableKind::LocalVariable, decl.source_info.span);
});
}
lvalue let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str());
}).collect(); if dbg {
let temps = mir.temp_decls.iter() bcx.with_block(|bcx| {
.map(|decl| bcx.monomorphize(&decl.ty)) declare_local(bcx, decl.name, ty, scope,
.enumerate() VariableAccess::DirectVariable { alloca: lvalue.llval },
.map(|(i, mty)| if lvalue_temps.contains(i) { VariableKind::LocalVariable, decl.source_info.span);
TempRef::Lvalue(LvalueRef::alloca(&bcx, });
mty, }
&format!("temp{:?}", i))) LocalRef::Lvalue(lvalue)
} else { });
// If this is an immediate temp, we do not create an
// alloca in advance. Instead we wait until we see the let locals = mir.temp_decls.iter().enumerate().map(|(i, decl)| {
// definition and update the operand there. (mir::Lvalue::Temp(mir::Temp::new(i)), decl.ty)
TempRef::new_operand(bcx.ccx(), mty) }).chain(mir.return_ty.maybe_converging().map(|ty| (mir::Lvalue::ReturnPointer, ty)));
})
.collect(); args.into_iter().chain(vars).chain(locals.map(|(lvalue, ty)| {
let ty = bcx.monomorphize(&ty);
let local = mir.local_index(&lvalue).unwrap();
if lvalue == mir::Lvalue::ReturnPointer && fcx.fn_ty.ret.is_indirect() {
let llretptr = llvm::get_param(fcx.llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
} else if lvalue_locals.contains(local.index()) {
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", lvalue)))
} else {
// If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the
// definition and update the operand there.
LocalRef::new_operand(bcx.ccx(), ty)
}
})).collect()
};
// Allocate a `Block` for every basic block // Allocate a `Block` for every basic block
let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> = let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
...@@ -225,9 +229,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { ...@@ -225,9 +229,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
unreachable_block: None, unreachable_block: None,
cleanup_kinds: cleanup_kinds, cleanup_kinds: cleanup_kinds,
landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
vars: vars, locals: locals,
temps: temps,
args: args,
scopes: scopes scopes: scopes
}; };
...@@ -266,10 +268,11 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { ...@@ -266,10 +268,11 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
/// Produce, for each argument, a `ValueRef` pointing at the /// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always /// argument's value. As arguments are lvalues, these are always
/// indirect. /// indirect.
fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir: &mir::Mir<'tcx>, mir: &mir::Mir<'tcx>,
scopes: &IndexVec<mir::VisibilityScope, DIScope>) scopes: &IndexVec<mir::VisibilityScope, DIScope>,
-> IndexVec<mir::Arg, LvalueRef<'tcx>> { lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let fcx = bcx.fcx(); let fcx = bcx.fcx();
let tcx = bcx.tcx(); let tcx = bcx.tcx();
let mut idx = 0; let mut idx = 0;
...@@ -285,6 +288,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -285,6 +288,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| { mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
let arg_ty = bcx.monomorphize(&arg_decl.ty); let arg_ty = bcx.monomorphize(&arg_decl.ty);
let local = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(arg_index))).unwrap();
if arg_decl.spread { if arg_decl.spread {
// This argument (e.g. the last argument in the "rust-call" ABI) // This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have // is a tuple that was spread at the ABI level and now we have
...@@ -305,8 +309,8 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -305,8 +309,8 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
let arg = &fcx.fn_ty.args[idx]; let arg = &fcx.fn_ty.args[idx];
idx += 1; idx += 1;
if common::type_is_fat_ptr(tcx, tupled_arg_ty) { if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
// We pass fat pointers as two words, but inside the tuple // We pass fat pointers as two words, but inside the tuple
// they are the two sub-fields of a single aggregate field. // they are the two sub-fields of a single aggregate field.
let meta = &fcx.fn_ty.args[idx]; let meta = &fcx.fn_ty.args[idx];
idx += 1; idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst)); arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst));
...@@ -335,7 +339,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -335,7 +339,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
bcx.fcx().span.unwrap_or(DUMMY_SP)); bcx.fcx().span.unwrap_or(DUMMY_SP));
})); }));
} }
return LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
} }
let arg = &fcx.fn_ty.args[idx]; let arg = &fcx.fn_ty.args[idx];
...@@ -345,9 +349,42 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -345,9 +349,42 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// already put it in a temporary alloca and gave it up, unless // already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(. // we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes // FIXME: lifetimes
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1; llarg_idx += 1;
llarg llarg
} else if !lvalue_locals.contains(local.index()) &&
!arg.is_indirect() && arg.cast.is_none() &&
arg_scope.is_none() {
if arg.is_ignore() {
return LocalRef::new_operand(bcx.ccx(), arg_ty);
}
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
let val = if common::type_is_fat_ptr(tcx, arg_ty) {
let meta = &fcx.fn_ty.args[idx];
idx += 1;
assert_eq!((meta.cast, meta.pad), (None, None));
let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
OperandValue::Pair(llarg, llmeta)
} else {
OperandValue::Immediate(llarg)
};
let operand = OperandRef {
val: val,
ty: arg_ty
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else { } else {
let lltemp = bcx.with_block(|bcx| { let lltemp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
...@@ -441,7 +478,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -441,7 +478,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
bcx.fcx().span.unwrap_or(DUMMY_SP)); bcx.fcx().span.unwrap_or(DUMMY_SP));
} }
})); }));
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)) LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
}).collect() }).collect()
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
use std::fmt; use std::fmt;
use super::{MirContext, TempRef}; use super::{MirContext, LocalRef};
/// The representation of a Rust value. The enum variant is in fact /// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a /// uniquely determined by the value's type, but is kept as a
...@@ -112,6 +112,8 @@ pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) ...@@ -112,6 +112,8 @@ pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>)
if let OperandValue::Immediate(llval) = self.val { if let OperandValue::Immediate(llval) = self.val {
// Deconstruct the immediate aggregate. // Deconstruct the immediate aggregate.
if common::type_is_imm_pair(bcx.ccx(), self.ty) { if common::type_is_imm_pair(bcx.ccx(), self.ty) {
debug!("Operand::unpack_if_pair: unpacking {:?}", self);
let mut a = bcx.extract_value(llval, 0); let mut a = bcx.extract_value(llval, 0);
let mut b = bcx.extract_value(llval, 1); let mut b = bcx.extract_value(llval, 1);
...@@ -171,17 +173,17 @@ pub fn trans_consume(&mut self, ...@@ -171,17 +173,17 @@ pub fn trans_consume(&mut self,
{ {
debug!("trans_consume(lvalue={:?})", lvalue); debug!("trans_consume(lvalue={:?})", lvalue);
// watch out for temporaries that do not have an // watch out for locals that do not have an
// alloca; they are handled somewhat differently // alloca; they are handled somewhat differently
if let &mir::Lvalue::Temp(index) = lvalue { if let Some(index) = self.mir.local_index(lvalue) {
match self.temps[index] { match self.locals[index] {
TempRef::Operand(Some(o)) => { LocalRef::Operand(Some(o)) => {
return o; return o;
} }
TempRef::Operand(None) => { LocalRef::Operand(None) => {
bug!("use of {:?} before def", lvalue); bug!("use of {:?} before def", lvalue);
} }
TempRef::Lvalue(..) => { LocalRef::Lvalue(..) => {
// use path below // use path below
} }
} }
...@@ -189,9 +191,8 @@ pub fn trans_consume(&mut self, ...@@ -189,9 +191,8 @@ pub fn trans_consume(&mut self,
// Moves out of pair fields are trivial. // Moves out of pair fields are trivial.
if let &mir::Lvalue::Projection(ref proj) = lvalue { if let &mir::Lvalue::Projection(ref proj) = lvalue {
if let mir::Lvalue::Temp(index) = proj.base { if let Some(index) = self.mir.local_index(&proj.base) {
let temp_ref = &self.temps[index]; if let LocalRef::Operand(Some(o)) = self.locals[index] {
if let &TempRef::Operand(Some(o)) = temp_ref {
match (o.val, &proj.elem) { match (o.val, &proj.elem) {
(OperandValue::Pair(a, b), (OperandValue::Pair(a, b),
&mir::ProjectionElem::Field(ref f, ty)) => { &mir::ProjectionElem::Field(ref f, ty)) => {
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
use common::{self, BlockAndBuilder}; use common::{self, BlockAndBuilder};
use super::MirContext; use super::MirContext;
use super::TempRef; use super::LocalRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_statement(&mut self, pub fn trans_statement(&mut self,
...@@ -27,37 +27,34 @@ pub fn trans_statement(&mut self, ...@@ -27,37 +27,34 @@ pub fn trans_statement(&mut self,
debug_loc.apply(bcx.fcx()); debug_loc.apply(bcx.fcx());
match statement.kind { match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => { mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
match *lvalue { if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::Temp(index) => { match self.locals[index] {
match self.temps[index] { LocalRef::Lvalue(tr_dest) => {
TempRef::Lvalue(tr_dest) => { self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) }
} LocalRef::Operand(None) => {
TempRef::Operand(None) => { let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue,
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
debug_loc); self.locals[index] = LocalRef::Operand(Some(operand));
self.temps[index] = TempRef::Operand(Some(operand)); bcx
bcx }
} LocalRef::Operand(Some(_)) => {
TempRef::Operand(Some(_)) => { let ty = self.lvalue_ty(lvalue);
let ty = self.lvalue_ty(lvalue);
if !common::type_is_zero_size(bcx.ccx(), ty) { if !common::type_is_zero_size(bcx.ccx(), ty) {
span_bug!(statement.source_info.span, span_bug!(statement.source_info.span,
"operand {:?} already assigned", "operand {:?} already assigned",
rvalue); rvalue);
} else { } else {
// If the type is zero-sized, it's already been set here, // If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand // but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0 self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
}
} }
} }
} }
_ => { } else {
let tr_dest = self.trans_lvalue(&bcx, lvalue); let tr_dest = self.trans_lvalue(&bcx, lvalue);
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
}
} }
} }
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
// compile-flags: -C no-prepopulate-passes // compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"] #![crate_type = "lib"]
#![feature(rustc_attrs)]
pub struct Bytes { pub struct Bytes {
a: u8, a: u8,
...@@ -21,6 +22,7 @@ pub struct Bytes { ...@@ -21,6 +22,7 @@ pub struct Bytes {
// CHECK-LABEL: @borrow // CHECK-LABEL: @borrow
#[no_mangle] #[no_mangle]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn borrow(x: &i32) -> &i32 { pub fn borrow(x: &i32) -> &i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull // CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
x x
...@@ -28,6 +30,7 @@ pub fn borrow(x: &i32) -> &i32 { ...@@ -28,6 +30,7 @@ pub fn borrow(x: &i32) -> &i32 {
// CHECK-LABEL: @_box // CHECK-LABEL: @_box
#[no_mangle] #[no_mangle]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn _box(x: Box<i32>) -> i32 { pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull // CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
*x *x
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// compile-flags: -C no-prepopulate-passes // compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"] #![crate_type = "lib"]
#![feature(naked_functions)] #![feature(naked_functions, rustc_attrs)]
// CHECK: Function Attrs: naked uwtable // CHECK: Function Attrs: naked uwtable
// CHECK-NEXT: define internal void @naked_empty() // CHECK-NEXT: define internal void @naked_empty()
...@@ -26,6 +26,7 @@ fn naked_empty() { ...@@ -26,6 +26,7 @@ fn naked_empty() {
// CHECK: Function Attrs: naked uwtable // CHECK: Function Attrs: naked uwtable
#[no_mangle] #[no_mangle]
#[naked] #[naked]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
// CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}}) // CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}})
fn naked_with_args(a: isize) { fn naked_with_args(a: isize) {
// CHECK: %a = alloca i{{[0-9]+}} // CHECK: %a = alloca i{{[0-9]+}}
...@@ -45,6 +46,7 @@ fn naked_with_return() -> isize { ...@@ -45,6 +46,7 @@ fn naked_with_return() -> isize {
// CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}}) // CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}})
#[no_mangle] #[no_mangle]
#[naked] #[naked]
#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
fn naked_with_args_and_return(a: isize) -> isize { fn naked_with_args_and_return(a: isize) -> isize {
// CHECK: %a = alloca i{{[0-9]+}} // CHECK: %a = alloca i{{[0-9]+}}
// CHECK: ret i{{[0-9]+}} %{{[0-9]+}} // CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
......
...@@ -10,10 +10,13 @@ ...@@ -10,10 +10,13 @@
// error-pattern: overflow representing the type `S` // error-pattern: overflow representing the type `S`
#![feature(rustc_attrs)]
trait Mirror { type It: ?Sized; } trait Mirror { type It: ?Sized; }
impl<T: ?Sized> Mirror for T { type It = Self; } impl<T: ?Sized> Mirror for T { type It = Self; }
struct S(Option<<S as Mirror>::It>); struct S(Option<<S as Mirror>::It>);
#[rustc_no_mir] // FIXME #27840 MIR tries to represent `std::option::Option<S>` first.
fn main() { fn main() {
let _s = S(None); let _s = S(None);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册