提交 18ecc564 编写于 作者: E Eduard-Mihai Burtescu

rustc_trans: support scalar pairs directly in the Rust ABI.

上级 7a361414
......@@ -74,22 +74,19 @@ pub fn AddFunctionAttrStringValue(llfn: ValueRef,
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub enum AttributePlace {
ReturnValue,
Argument(u32),
Function,
}
impl AttributePlace {
pub fn ReturnValue() -> Self {
AttributePlace::Argument(0)
}
pub fn as_uint(self) -> c_uint {
match self {
AttributePlace::ReturnValue => 0,
AttributePlace::Argument(i) => 1 + i,
AttributePlace::Function => !0,
AttributePlace::Argument(i) => i,
}
}
}
......
此差异已折叠。
......@@ -116,7 +116,7 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe
naked(llfn, true);
} else if attr.check_name("allocator") {
Attribute::NoAlias.apply_llfn(
llvm::AttributePlace::ReturnValue(), llfn);
llvm::AttributePlace::ReturnValue, llfn);
} else if attr.check_name("unwind") {
unwind(llfn, true);
} else if attr.check_name("rustc_allocator_nounwind") {
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform};
use abi::{FnType, ArgType, LayoutExt, Uniform};
use context::CrateContext;
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
......@@ -35,8 +35,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() {
arg.make_indirect();
arg.attrs.set(ArgAttribute::ByVal);
arg.make_indirect_byval();
}
}
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind};
use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
use common::CrateContext;
use rustc::ty::layout::{self, TyLayout};
......@@ -82,8 +82,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
if arg.layout.is_aggregate() {
arg.make_indirect();
arg.attrs.set(ArgAttribute::ByVal);
arg.make_indirect_byval();
} else {
arg.extend_integer_width_to(32);
}
......@@ -102,7 +101,15 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let mut free_regs = 2;
for arg in &mut fty.args {
if arg.is_ignore() || arg.is_indirect() { continue; }
let attrs = match arg.mode {
PassMode::Ignore |
PassMode::Indirect(_) => continue,
PassMode::Direct(ref mut attrs) => attrs,
PassMode::Pair(..) |
PassMode::Cast(_) => {
bug!("x86 shouldn't be passing arguments by {:?}", arg.mode)
}
};
// At this point we know this must be a primitive of sorts.
let unit = arg.layout.homogeneous_aggregate(ccx).unwrap();
......@@ -124,7 +131,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
free_regs -= size_in_regs;
if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
arg.attrs.set(ArgAttribute::InReg);
attrs.set(ArgAttribute::InReg);
}
if free_regs == 0 {
......
......@@ -11,7 +11,7 @@
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind};
use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
use context::CrateContext;
use rustc::ty::layout::{self, TyLayout, Size};
......@@ -214,11 +214,11 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
};
if in_mem {
arg.make_indirect();
if is_arg {
arg.attrs.set(ArgAttribute::ByVal);
arg.make_indirect_byval();
} else {
// `sret` parameter thus one less integer register available
arg.make_indirect();
int_regs -= 1;
}
} else {
......
......@@ -13,7 +13,7 @@
use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType};
use abi::{Abi, FnType, PassMode};
use mir::lvalue::{LvalueRef, Alignment};
use mir::operand::{OperandRef, OperandValue};
use base::*;
......@@ -237,7 +237,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
"volatile_load" => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let Some(ty) = fn_ty.ret.cast {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to());
}
let load = bcx.volatile_load(ptr);
......@@ -671,7 +671,7 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
};
if !fn_ty.ret.is_ignore() {
if let Some(ty) = fn_ty.ret.cast {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
} else {
......
......@@ -15,7 +15,7 @@
use rustc::ty::layout::{self, LayoutOf};
use rustc::traits;
use rustc::mir;
use abi::{Abi, FnType, ArgType};
use abi::{Abi, FnType, ArgType, PassMode};
use base;
use callee;
use builder::Builder;
......@@ -207,44 +207,47 @@ fn trans_terminator(&mut self,
}
mir::TerminatorKind::Return => {
if self.fn_ty.ret.is_ignore() || self.fn_ty.ret.is_indirect() {
bcx.ret_void();
return;
}
let llval = match self.fn_ty.ret.mode {
PassMode::Ignore | PassMode::Indirect(_) => {
bcx.ret_void();
return;
}
let llval = if let Some(cast_ty) = self.fn_ty.ret.cast {
let op = match self.locals[mir::RETURN_POINTER] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
layout: tr_lvalue.layout
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
op.val.store(&bcx, scratch);
scratch.llval
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
"return pointer is unaligned!");
llval
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
if let Ref(llval, align) = op.val {
bcx.load(llval, align.non_abi())
} else {
op.immediate_or_packed_pair(&bcx)
}
};
let load = bcx.load(
bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
Some(self.fn_ty.ret.layout.align));
load
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
if let Ref(llval, align) = op.val {
bcx.load(llval, align.non_abi())
} else {
op.immediate_or_packed_pair(&bcx)
}
PassMode::Cast(cast_ty) => {
let op = match self.locals[mir::RETURN_POINTER] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
layout: tr_lvalue.layout
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
op.val.store(&bcx, scratch);
scratch.llval
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
"return pointer is unaligned!");
llval
}
};
bcx.load(
bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
Some(self.fn_ty.ret.layout.align))
}
};
bcx.ret(llval);
......@@ -559,12 +562,12 @@ fn trans_terminator(&mut self,
for (i, arg) in first_args.iter().enumerate() {
let mut op = self.trans_operand(&bcx, arg);
if i == 0 {
if let Pair(_, meta) = op.val {
if let Some(ty::InstanceDef::Virtual(_, idx)) = def {
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&bcx, meta, &fn_ty));
}
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
if let Pair(data_ptr, meta) = op.val {
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&bcx, meta, &fn_ty));
llargs.push(data_ptr);
continue;
}
}
......@@ -604,21 +607,6 @@ fn trans_argument(&mut self,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
arg: &ArgType<'tcx>) {
if let Pair(a, b) = op.val {
// Treat the values in a fat pointer separately.
if !arg.nested.is_empty() {
assert_eq!(arg.nested.len(), 2);
let imm_op = |x| OperandRef {
val: Immediate(x),
// We won't be checking the type again.
layout: bcx.ccx.layout_of(bcx.tcx().types.never)
};
self.trans_argument(bcx, imm_op(a), llargs, &arg.nested[0]);
self.trans_argument(bcx, imm_op(b), llargs, &arg.nested[1]);
return;
}
}
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty.llvm_type(bcx.ccx)));
......@@ -628,15 +616,29 @@ fn trans_argument(&mut self,
return;
}
if let PassMode::Pair(..) = arg.mode {
match op.val {
Pair(a, b) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("trans_argument: {:?} invalid for pair arugment", op)
}
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
op.val.store(bcx, scratch);
(scratch.llval, Alignment::AbiAligned, true)
} else {
(op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false)
match arg.mode {
PassMode::Indirect(_) | PassMode::Cast(_) => {
let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
op.val.store(bcx, scratch);
(scratch.llval, Alignment::AbiAligned, true)
}
_ => {
(op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false)
}
}
}
Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => {
......@@ -653,7 +655,7 @@ fn trans_argument(&mut self,
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let Some(ty) = arg.cast {
if let PassMode::Cast(ty) = arg.mode {
llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
(align | Alignment::Packed(arg.layout.align))
.non_abi());
......@@ -890,7 +892,7 @@ fn store_return(&mut self,
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() {
let op = if let PassMode::Cast(_) = ret_ty.mode {
let tmp = LvalueRef::alloca(bcx, ret_ty.layout, "tmp_ret");
tmp.storage_live(bcx);
ret_ty.store(bcx, llval, tmp);
......
......@@ -22,7 +22,7 @@
use common::{CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance;
use abi::{ArgAttribute, FnType};
use abi::{ArgAttribute, FnType, PassMode};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
......@@ -429,55 +429,52 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let arg = &mircx.fn_ty.args[idx];
idx += 1;
let lvalue = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up
// FIXME: lifetimes
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned)
} else if !lvalue_locals.contains(local.index()) &&
!arg.nested.is_empty() {
assert_eq!(arg.nested.len(), 2);
let (a, b) = (&arg.nested[0], &arg.nested[1]);
assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none());
assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none());
let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1;
let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(b, &(name + ".1"));
if arg.pad.is_some() {
llarg_idx += 1;
}
return LocalRef::Operand(Some(OperandRef {
val: OperandValue::Pair(a, b),
layout: arg.layout
}));
} else if !lvalue_locals.contains(local.index()) &&
!arg.is_indirect() && arg.cast.is_none() &&
arg_scope.is_none() {
if arg.is_ignore() {
return LocalRef::new_operand(bcx.ccx, arg.layout);
}
if arg_scope.is_none() && !lvalue_locals.contains(local.index()) {
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
if arg.pad.is_some() {
llarg_idx += 1;
let local = |op| LocalRef::Operand(Some(op));
match arg.mode {
PassMode::Ignore => {
return local(OperandRef::new_zst(bcx.ccx, arg.layout));
}
PassMode::Direct(_) => {
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
return local(
OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout));
}
PassMode::Pair(..) => {
let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1;
let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(b, &(name + ".1"));
llarg_idx += 1;
return local(OperandRef {
val: OperandValue::Pair(a, b),
layout: arg.layout
});
}
_ => {}
}
}
let lvalue = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up.
// FIXME: lifetimes
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
return LocalRef::Operand(Some(
OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout)
));
LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned)
} else {
let tmp = LvalueRef::alloca(bcx, arg.layout, &name);
arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
......@@ -489,16 +486,19 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// The Rust ABI passes indirect variables using a pointer and a manual copy, so we
// need to insert a deref here, but the C ABI uses a pointer and a copy using the
// byval attribute, for which LLVM does the deref itself, so we must not add it.
let variable_access = if arg.is_indirect() &&
!arg.attrs.contains(ArgAttribute::ByVal) {
VariableAccess::IndirectVariable {
alloca: lvalue.llval,
address_operations: &deref_op,
}
} else {
VariableAccess::DirectVariable { alloca: lvalue.llval }
let mut variable_access = VariableAccess::DirectVariable {
alloca: lvalue.llval
};
if let PassMode::Indirect(ref attrs) = arg.mode {
if !attrs.contains(ArgAttribute::ByVal) {
variable_access = VariableAccess::IndirectVariable {
alloca: lvalue.llval,
address_operations: &deref_op,
};
}
}
declare_local(
bcx,
&mircx.debug_context,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册