提交 030244cd 编写于 作者: I Irina Popa

rustc_target: move in cabi_* from rustc_trans.

上级 fb15d447
......@@ -8,8 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
......
......@@ -8,10 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
use llvm::CallConv;
use abi::call::{Conv, FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
......@@ -109,7 +108,7 @@ pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
// If this is a target with a hard-float ABI, and the function is not explicitly
// `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
let vfp = cx.target_spec().llvm_target.ends_with("hf")
&& fty.cconv != CallConv::ArmAapcsCallConv
&& fty.conv != Conv::ArmAapcs
&& !fty.variadic;
if !fty.ret.is_ignore() {
......
......@@ -8,8 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::call::{FnType, ArgType, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
......
......@@ -10,7 +10,7 @@
#![allow(non_upper_case_globals)]
use abi::{FnType, ArgType, LayoutExt};
use abi::call::{FnType, ArgType};
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
......
......@@ -8,9 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
......
......@@ -8,8 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgAttribute, ArgType, CastTarget, FnType, LayoutExt, PassMode, Reg, RegKind, Uniform};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
use abi::call::{ArgAttribute, ArgType, CastTarget, FnType, PassMode, Reg, RegKind, Uniform};
use abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
fn extend_integer_width_mips<Ty>(arg: &mut ArgType<Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
......
......@@ -8,7 +8,28 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{Align, HasDataLayout, Size};
use abi::{self, Abi, Align, FieldPlacement, Size};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
mod aarch64;
mod arm;
mod asmjs;
mod hexagon;
mod mips;
mod mips64;
mod msp430;
mod nvptx;
mod nvptx64;
mod powerpc;
mod powerpc64;
mod s390x;
mod sparc;
mod sparc64;
mod x86;
mod x86_64;
mod x86_win64;
mod wasm32;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PassMode {
......@@ -211,4 +232,280 @@ pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
|acc, align| acc.max(align))
}
}
\ No newline at end of file
}
impl<'a, Ty> TyLayout<'a, Ty> {
fn is_aggregate(&self) -> bool {
match self.abi {
Abi::Uninhabited |
Abi::Scalar(_) |
Abi::Vector { .. } => false,
Abi::ScalarPair(..) |
Abi::Aggregate { .. } => true
}
}
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
{
match self.abi {
Abi::Uninhabited => None,
// The primitive for this algorithm.
Abi::Scalar(ref scalar) => {
let kind = match scalar.value {
abi::Int(..) |
abi::Pointer => RegKind::Integer,
abi::F32 |
abi::F64 => RegKind::Float
};
Some(Reg {
kind,
size: self.size
})
}
Abi::Vector { .. } => {
Some(Reg {
kind: RegKind::Vector,
size: self.size
})
}
Abi::ScalarPair(..) |
Abi::Aggregate { .. } => {
let mut total = Size::from_bytes(0);
let mut result = None;
let is_union = match self.fields {
FieldPlacement::Array { count, .. } => {
if count > 0 {
return self.field(cx, 0).homogeneous_aggregate(cx);
} else {
return None;
}
}
FieldPlacement::Union(_) => true,
FieldPlacement::Arbitrary { .. } => false
};
for i in 0..self.fields.count() {
if !is_union && total != self.fields.offset(i) {
return None;
}
let field = self.field(cx, i);
match (result, field.homogeneous_aggregate(cx)) {
// The field itself must be a homogeneous aggregate.
(_, None) => return None,
// If this is the first field, record the unit.
(None, Some(unit)) => {
result = Some(unit);
}
// For all following fields, the unit must be the same.
(Some(prev_unit), Some(unit)) => {
if prev_unit != unit {
return None;
}
}
}
// Keep track of the offset (without padding).
let size = field.size;
if is_union {
total = total.max(size);
} else {
total += size;
}
}
// There needs to be no padding.
if total != self.size {
None
} else {
result
}
}
}
}
}
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
#[derive(Debug)]
pub struct ArgType<'a, Ty> {
pub layout: TyLayout<'a, Ty>,
/// Dummy argument, which is emitted before the real argument.
pub pad: Option<Reg>,
pub mode: PassMode,
}
impl<'a, Ty> ArgType<'a, Ty> {
pub fn new(layout: TyLayout<'a, Ty>) -> Self {
ArgType {
layout,
pad: None,
mode: PassMode::Direct(ArgAttributes::new()),
}
}
pub fn make_indirect(&mut self) {
assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
// Start with fresh attributes for the pointer.
let mut attrs = ArgAttributes::new();
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
attrs.set(ArgAttribute::NoAlias)
.set(ArgAttribute::NoCapture)
.set(ArgAttribute::NonNull);
attrs.pointee_size = self.layout.size;
// FIXME(eddyb) We should be doing this, but at least on
// i686-pc-windows-msvc, it results in wrong stack offsets.
// attrs.pointee_align = Some(self.layout.align);
self.mode = PassMode::Indirect(attrs);
}
pub fn make_indirect_byval(&mut self) {
self.make_indirect();
match self.mode {
PassMode::Indirect(ref mut attrs) => {
attrs.set(ArgAttribute::ByVal);
}
_ => unreachable!()
}
}
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
if let Abi::Scalar(ref scalar) = self.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode {
attrs.set(if signed {
ArgAttribute::SExt
} else {
ArgAttribute::ZExt
});
}
}
}
}
}
pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
self.mode = PassMode::Cast(target.into());
}
pub fn pad_with(&mut self, reg: Reg) {
self.pad = Some(reg);
}
pub fn is_indirect(&self) -> bool {
match self.mode {
PassMode::Indirect(_) => true,
_ => false
}
}
pub fn is_ignore(&self) -> bool {
self.mode == PassMode::Ignore
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Conv {
C,
ArmAapcs,
Msp430Intr,
PtxKernel,
X86Fastcall,
X86Intr,
X86Stdcall,
X86ThisCall,
X86VectorCall,
X86_64SysV,
X86_64Win64,
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Debug)]
pub struct FnType<'a, Ty> {
/// The LLVM types of each argument.
pub args: Vec<ArgType<'a, Ty>>,
/// LLVM return type.
pub ret: ArgType<'a, Ty>,
pub variadic: bool,
pub conv: Conv,
}
impl<'a, Ty> FnType<'a, Ty> {
pub fn adjust_for_cabi<C>(&mut self, cx: C, abi: ::syntax::abi::Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
match &cx.target_spec().arch[..] {
"x86" => {
let flavor = if abi == ::syntax::abi::Abi::Fastcall {
x86::Flavor::Fastcall
} else {
x86::Flavor::General
};
x86::compute_abi_info(cx, self, flavor);
},
"x86_64" => if abi == ::syntax::abi::Abi::SysV64 {
x86_64::compute_abi_info(cx, self);
} else if abi == ::syntax::abi::Abi::Win64 || cx.target_spec().options.is_like_windows {
x86_win64::compute_abi_info(self);
} else {
x86_64::compute_abi_info(cx, self);
},
"aarch64" => aarch64::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"mips" => mips::compute_abi_info(cx, self),
"mips64" => mips64::compute_abi_info(cx, self),
"powerpc" => powerpc::compute_abi_info(cx, self),
"powerpc64" => powerpc64::compute_abi_info(cx, self),
"s390x" => s390x::compute_abi_info(cx, self),
"asmjs" => asmjs::compute_abi_info(cx, self),
"wasm32" => {
if cx.target_spec().llvm_target.contains("emscripten") {
asmjs::compute_abi_info(cx, self)
} else {
wasm32::compute_abi_info(self)
}
}
"msp430" => msp430::compute_abi_info(self),
"sparc" => sparc::compute_abi_info(cx, self),
"sparc64" => sparc64::compute_abi_info(cx, self),
"nvptx" => nvptx::compute_abi_info(self),
"nvptx64" => nvptx64::compute_abi_info(self),
"hexagon" => hexagon::compute_abi_info(self),
a => return Err(format!("unrecognized arch \"{}\" in target specification", a))
}
if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
attrs.set(ArgAttribute::StructRet);
}
Ok(())
}
}
......@@ -11,7 +11,7 @@
// Reference: MSP430 Embedded Application Binary Interface
// http://www.ti.com/lit/an/slaa534/slaa534.pdf
use abi::{ArgType, FnType, LayoutExt};
use abi::call::{ArgType, FnType};
// 3.5 Structures or Unions Passed and Returned by Reference
//
......
......@@ -11,7 +11,7 @@
// Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use abi::{ArgType, FnType, LayoutExt};
use abi::call::{ArgType, FnType};
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
......
......@@ -11,7 +11,7 @@
// Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use abi::{ArgType, FnType, LayoutExt};
use abi::call::{ArgType, FnType};
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
......
......@@ -8,9 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
......
......@@ -12,9 +12,8 @@
// Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
#[derive(Debug, Clone, Copy, PartialEq)]
enum ABI {
......
......@@ -11,9 +11,8 @@
// FIXME: The assumes we're using the non-vector ABI, i.e. compiling
// for a pre-z13 machine or using -mno-vx.
use abi::{FnType, ArgType, LayoutExt, Reg};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::call::{FnType, ArgType, Reg};
use abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
......
......@@ -8,9 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
......
......@@ -10,8 +10,8 @@
// FIXME: This needs an audit for correctness and completeness.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, ArgType};
use abi::call::{FnType, ArgType};
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
ret.extend_integer_width_to(32);
......
......@@ -8,9 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
use abi::call::{ArgAttribute, FnType, PassMode, Reg, RegKind};
use abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
#[derive(PartialEq)]
pub enum Flavor {
......@@ -108,7 +108,7 @@ pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flav
PassMode::Direct(ref mut attrs) => attrs,
PassMode::Pair(..) |
PassMode::Cast(_) => {
bug!("x86 shouldn't be passing arguments by {:?}", arg.mode)
unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
}
};
......
......@@ -11,8 +11,8 @@
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
use abi::call::{ArgType, CastTarget, FnType, Reg, RegKind};
use abi::{self, Abi, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
/// Classification of "eightbyte" components.
// NB: the order of the variants is from general to specific,
......@@ -49,9 +49,9 @@ fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
}
let mut c = match layout.abi {
abi::Abi::Uninhabited => return Ok(()),
Abi::Uninhabited => return Ok(()),
abi::Abi::Scalar(ref scalar) => {
Abi::Scalar(ref scalar) => {
match scalar.value {
abi::Int(..) |
abi::Pointer => Class::Int,
......@@ -60,10 +60,10 @@ fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
}
}
abi::Abi::Vector { .. } => Class::Sse,
Abi::Vector { .. } => Class::Sse,
abi::Abi::ScalarPair(..) |
abi::Abi::Aggregate { .. } => {
Abi::ScalarPair(..) |
Abi::Aggregate { .. } => {
match layout.variants {
abi::Variants::Single { .. } => {
for i in 0..layout.fields.count() {
......@@ -161,7 +161,7 @@ fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg
}
})
}
Some(c) => bug!("reg_component: unhandled class {:?}", c)
Some(c) => unreachable!("reg_component: unhandled class {:?}", c)
}
}
......
......@@ -8,18 +8,17 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgType, FnType, Reg};
use rustc_target::abi;
use abi::call::{ArgType, FnType, Reg};
use abi::Abi;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
let fixup = |a: &mut ArgType<Ty>| {
match a.layout.abi {
abi::Abi::Uninhabited => {}
abi::Abi::ScalarPair(..) |
abi::Abi::Aggregate { .. } => {
Abi::Uninhabited => {}
Abi::ScalarPair(..) |
Abi::Aggregate { .. } => {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
16 => a.cast_to(Reg::i16()),
......@@ -28,11 +27,11 @@ pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
_ => a.make_indirect()
}
}
abi::Abi::Vector { .. } => {
Abi::Vector { .. } => {
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
abi::Abi::Scalar(_) => {
Abi::Scalar(_) => {
if a.layout.size.bytes() > 8 {
a.make_indirect();
} else {
......
......@@ -13,36 +13,16 @@
use builder::Builder;
use common::{ty_fn_sig, C_usize};
use context::CodegenCx;
use cabi_x86;
use cabi_x86_64;
use cabi_x86_win64;
use cabi_arm;
use cabi_aarch64;
use cabi_powerpc;
use cabi_powerpc64;
use cabi_s390x;
use cabi_mips;
use cabi_mips64;
use cabi_asmjs;
use cabi_msp430;
use cabi_sparc;
use cabi_sparc64;
use cabi_nvptx;
use cabi_nvptx64;
use cabi_hexagon;
use cabi_wasm32;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
use rustc_target::abi::{LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty};
use rustc::ty::layout;
use libc::c_uint;
use std::cmp;
pub use syntax::abi::Abi;
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
......@@ -144,108 +124,6 @@ fn llvm_type(&self, cx: &CodegenCx) -> Type {
}
}
pub trait LayoutExt<'a, Ty>: Sized {
fn is_aggregate(&self) -> bool;
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy;
}
impl<'a, Ty> LayoutExt<'a, Ty> for TyLayout<'a, Ty> {
fn is_aggregate(&self) -> bool {
match self.abi {
layout::Abi::Uninhabited |
layout::Abi::Scalar(_) |
layout::Abi::Vector { .. } => false,
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => true
}
}
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
{
match self.abi {
layout::Abi::Uninhabited => None,
// The primitive for this algorithm.
layout::Abi::Scalar(ref scalar) => {
let kind = match scalar.value {
layout::Int(..) |
layout::Pointer => RegKind::Integer,
layout::F32 |
layout::F64 => RegKind::Float
};
Some(Reg {
kind,
size: self.size
})
}
layout::Abi::Vector { .. } => {
Some(Reg {
kind: RegKind::Vector,
size: self.size
})
}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
let mut total = Size::from_bytes(0);
let mut result = None;
let is_union = match self.fields {
layout::FieldPlacement::Array { count, .. } => {
if count > 0 {
return self.field(cx, 0).homogeneous_aggregate(cx);
} else {
return None;
}
}
layout::FieldPlacement::Union(_) => true,
layout::FieldPlacement::Arbitrary { .. } => false
};
for i in 0..self.fields.count() {
if !is_union && total != self.fields.offset(i) {
return None;
}
let field = self.field(cx, i);
match (result, field.homogeneous_aggregate(cx)) {
// The field itself must be a homogeneous aggregate.
(_, None) => return None,
// If this is the first field, record the unit.
(None, Some(unit)) => {
result = Some(unit);
}
// For all following fields, the unit must be the same.
(Some(prev_unit), Some(unit)) => {
if prev_unit != unit {
return None;
}
}
}
// Keep track of the offset (without padding).
let size = field.size;
if is_union {
total = cmp::max(total, size);
} else {
total += size;
}
}
// There needs to be no padding.
if total != self.size {
None
} else {
result
}
}
}
}
}
impl LlvmType for CastTarget {
fn llvm_type(&self, cx: &CodegenCx) -> Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
......@@ -282,99 +160,16 @@ fn llvm_type(&self, cx: &CodegenCx) -> Type {
}
}
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
#[derive(Debug)]
pub struct ArgType<'tcx, Ty = ty::Ty<'tcx>> {
pub layout: TyLayout<'tcx, Ty>,
/// Dummy argument, which is emitted before the real argument.
pub pad: Option<Reg>,
pub mode: PassMode,
pub trait ArgTypeExt<'a, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>);
fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>);
}
impl<'a, 'tcx, Ty> ArgType<'tcx, Ty> {
fn new(layout: TyLayout<'tcx, Ty>) -> Self {
ArgType {
layout,
pad: None,
mode: PassMode::Direct(ArgAttributes::new()),
}
}
pub fn make_indirect(&mut self) {
assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
// Start with fresh attributes for the pointer.
let mut attrs = ArgAttributes::new();
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
attrs.set(ArgAttribute::NoAlias)
.set(ArgAttribute::NoCapture)
.set(ArgAttribute::NonNull);
attrs.pointee_size = self.layout.size;
// FIXME(eddyb) We should be doing this, but at least on
// i686-pc-windows-msvc, it results in wrong stack offsets.
// attrs.pointee_align = Some(self.layout.align);
self.mode = PassMode::Indirect(attrs);
}
pub fn make_indirect_byval(&mut self) {
self.make_indirect();
match self.mode {
PassMode::Indirect(ref mut attrs) => {
attrs.set(ArgAttribute::ByVal);
}
_ => bug!()
}
}
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
if let layout::Int(i, signed) = scalar.value {
if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode {
attrs.set(if signed {
ArgAttribute::SExt
} else {
ArgAttribute::ZExt
});
}
}
}
}
}
pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
self.mode = PassMode::Cast(target.into());
}
pub fn pad_with(&mut self, reg: Reg) {
self.pad = Some(reg);
}
pub fn is_indirect(&self) -> bool {
match self.mode {
PassMode::Indirect(_) => true,
_ => false
}
}
pub fn is_ignore(&self) -> bool {
self.mode == PassMode::Ignore
}
}
impl<'a, 'tcx> ArgType<'tcx> {
impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
/// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
self.layout.llvm_type(cx)
}
......@@ -382,7 +177,7 @@ pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
if self.is_ignore() {
return;
}
......@@ -434,7 +229,7 @@ pub fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>)
}
}
pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
let mut next = || {
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
......@@ -452,26 +247,29 @@ pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRe
}
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Debug)]
pub struct FnType<'tcx, Ty = ty::Ty<'tcx>> {
/// The LLVM types of each argument.
pub args: Vec<ArgType<'tcx, Ty>>,
/// LLVM return type.
pub ret: ArgType<'tcx, Ty>,
pub variadic: bool,
pub cconv: llvm::CallConv
pub trait FnTypeExt<'a, 'tcx> {
fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
-> Self;
fn new(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'a, 'tcx>,
abi: Abi);
fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, llfn: ValueRef);
fn apply_attrs_callsite(&self, callsite: ValueRef);
}
impl<'a, 'tcx> FnType<'tcx> {
pub fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
-> Self {
let fn_ty = instance.ty(cx.tcx);
let sig = ty_fn_sig(cx, fn_ty);
......@@ -479,7 +277,7 @@ pub fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
FnType::new(cx, sig, &[])
}
pub fn new(cx: &CodegenCx<'a, 'tcx>,
fn new(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
......@@ -487,7 +285,7 @@ pub fn new(cx: &CodegenCx<'a, 'tcx>,
fn_ty
}
pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
......@@ -512,34 +310,34 @@ pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
fn_ty
}
pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
use self::Abi::*;
let cconv = match cx.sess().target.target.adjust_abi(sig.abi) {
let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
RustIntrinsic | PlatformIntrinsic |
Rust | RustCall => llvm::CCallConv,
Rust | RustCall => Conv::C,
// It's the ABI's job to select this, not us.
System => bug!("system abi should be selected elsewhere"),
Stdcall => llvm::X86StdcallCallConv,
Fastcall => llvm::X86FastcallCallConv,
Vectorcall => llvm::X86_VectorCall,
Thiscall => llvm::X86_ThisCall,
C => llvm::CCallConv,
Unadjusted => llvm::CCallConv,
Win64 => llvm::X86_64_Win64,
SysV64 => llvm::X86_64_SysV,
Aapcs => llvm::ArmAapcsCallConv,
PtxKernel => llvm::PtxKernel,
Msp430Interrupt => llvm::Msp430Intr,
X86Interrupt => llvm::X86_Intr,
Stdcall => Conv::X86Stdcall,
Fastcall => Conv::X86Fastcall,
Vectorcall => Conv::X86VectorCall,
Thiscall => Conv::X86ThisCall,
C => Conv::C,
Unadjusted => Conv::C,
Win64 => Conv::X86_64Win64,
SysV64 => Conv::X86_64SysV,
Aapcs => Conv::ArmAapcs,
PtxKernel => Conv::PtxKernel,
Msp430Interrupt => Conv::Msp430Intr,
X86Interrupt => Conv::X86Intr,
// These API constants ought to be more specific...
Cdecl => llvm::CCallConv,
Cdecl => Conv::C,
};
let mut inputs = sig.inputs();
......@@ -682,7 +480,7 @@ pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
arg_of(ty, false)
}).collect(),
variadic: sig.variadic,
cconv,
conv,
}
}
......@@ -693,7 +491,7 @@ fn adjust_for_abi(&mut self,
if abi == Abi::Rust || abi == Abi::RustCall ||
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
let fixup = |arg: &mut ArgType<'tcx>| {
let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
if arg.is_ignore() { return; }
match arg.layout.abi {
......@@ -753,63 +551,8 @@ fn adjust_for_abi(&mut self,
cx.sess().fatal(&msg);
}
}
}
impl<'a, Ty> FnType<'a, Ty> {
fn adjust_for_cabi<C>(&mut self, cx: C, abi: Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
match &cx.target_spec().arch[..] {
"x86" => {
let flavor = if abi == Abi::Fastcall {
cabi_x86::Flavor::Fastcall
} else {
cabi_x86::Flavor::General
};
cabi_x86::compute_abi_info(cx, self, flavor);
},
"x86_64" => if abi == Abi::SysV64 {
cabi_x86_64::compute_abi_info(cx, self);
} else if abi == Abi::Win64 || cx.target_spec().options.is_like_windows {
cabi_x86_win64::compute_abi_info(self);
} else {
cabi_x86_64::compute_abi_info(cx, self);
},
"aarch64" => cabi_aarch64::compute_abi_info(cx, self),
"arm" => cabi_arm::compute_abi_info(cx, self),
"mips" => cabi_mips::compute_abi_info(cx, self),
"mips64" => cabi_mips64::compute_abi_info(cx, self),
"powerpc" => cabi_powerpc::compute_abi_info(cx, self),
"powerpc64" => cabi_powerpc64::compute_abi_info(cx, self),
"s390x" => cabi_s390x::compute_abi_info(cx, self),
"asmjs" => cabi_asmjs::compute_abi_info(cx, self),
"wasm32" => {
if cx.target_spec().llvm_target.contains("emscripten") {
cabi_asmjs::compute_abi_info(cx, self)
} else {
cabi_wasm32::compute_abi_info(self)
}
}
"msp430" => cabi_msp430::compute_abi_info(self),
"sparc" => cabi_sparc::compute_abi_info(cx, self),
"sparc64" => cabi_sparc64::compute_abi_info(cx, self),
"nvptx" => cabi_nvptx::compute_abi_info(self),
"nvptx64" => cabi_nvptx64::compute_abi_info(self),
"hexagon" => cabi_hexagon::compute_abi_info(self),
a => return Err(format!("unrecognized arch \"{}\" in target specification", a))
}
if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
attrs.set(ArgAttribute::StructRet);
}
Ok(())
}
}
impl<'a, 'tcx> FnType<'tcx> {
pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
let mut llargument_tys = Vec::new();
let llreturn_ty = match self.ret.mode {
......@@ -851,7 +594,23 @@ pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
}
}
pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C => llvm::CCallConv,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::PtxKernel => llvm::PtxKernel,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
}
}
fn apply_attrs_llfn(&self, llfn: ValueRef) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
......@@ -881,7 +640,7 @@ pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
}
}
pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
fn apply_attrs_callsite(&self, callsite: ValueRef) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
......@@ -910,8 +669,9 @@ pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
}
}
if self.cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, self.cconv);
let cconv = self.llvm_cconv();
if cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, cconv);
}
}
}
......@@ -25,7 +25,7 @@
use rustc::ty::{self, Ty};
use rustc::session::config::Sanitizer;
use rustc_target::spec::PanicStrategy;
use abi::{Abi, FnType};
use abi::{Abi, FnType, FnTypeExt};
use attributes;
use context::CodegenCx;
use common;
......@@ -131,7 +131,7 @@ pub fn declare_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name: &str,
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(cx, sig, &[]);
let llfn = declare_raw_fn(cx, name, fty.cconv, fty.llvm_type(cx));
let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx));
// FIXME(canndrew): This is_never should really be an is_uninhabited
if sig.output().is_never() {
......
......@@ -87,7 +87,7 @@ fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx>],
llresult: ValueRef,
span: Span) {
......
......@@ -102,24 +102,6 @@ mod back {
mod attributes;
mod base;
mod builder;
mod cabi_aarch64;
mod cabi_arm;
mod cabi_asmjs;
mod cabi_hexagon;
mod cabi_mips;
mod cabi_mips64;
mod cabi_msp430;
mod cabi_nvptx;
mod cabi_nvptx64;
mod cabi_powerpc;
mod cabi_powerpc64;
mod cabi_s390x;
mod cabi_sparc;
mod cabi_sparc64;
mod cabi_x86;
mod cabi_x86_64;
mod cabi_x86_win64;
mod cabi_wasm32;
mod callee;
mod common;
mod consts;
......
......@@ -9,7 +9,7 @@
// except according to those terms.
use llvm::ValueRef;
use abi::FnType;
use abi::{FnType, FnTypeExt};
use callee;
use common::*;
use builder::Builder;
......@@ -35,7 +35,7 @@ pub fn from_index(index: usize) -> Self {
pub fn get_fn(self, bx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
fn_ty: &FnType<'tcx>) -> ValueRef {
fn_ty: &FnType<'tcx, Ty<'tcx>>) -> ValueRef {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
......
......@@ -10,10 +10,10 @@
use llvm::{self, ValueRef, BasicBlockRef};
use rustc::middle::lang_items;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutOf};
use rustc::mir;
use abi::{Abi, FnType, ArgType, LlvmType, PassMode};
use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode};
use base;
use callee;
use builder::Builder;
......@@ -110,7 +110,7 @@ fn trans_terminator(&mut self,
let do_call = |
this: &mut Self,
bx: Builder<'a, 'tcx>,
fn_ty: FnType<'tcx>,
fn_ty: FnType<'tcx, Ty<'tcx>>,
fn_ptr: ValueRef,
llargs: &[ValueRef],
destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>,
......@@ -604,7 +604,7 @@ fn trans_argument(&mut self,
bx: &Builder<'a, 'tcx>,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
arg: &ArgType<'tcx>) {
arg: &ArgType<'tcx, Ty<'tcx>>) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty.llvm_type(bx.cx)));
......@@ -683,7 +683,7 @@ fn trans_arguments_untupled(&mut self,
bx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
args: &[ArgType<'tcx>]) {
args: &[ArgType<'tcx, Ty<'tcx>>]) {
let tuple = self.trans_operand(bx, operand);
// Handle both by-ref and immediate tuples.
......@@ -776,7 +776,7 @@ pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
}
fn make_return_dest(&mut self, bx: &Builder<'a, 'tcx>,
dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx>,
dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
-> ReturnDest<'tcx> {
// If the return is ignored, we can just return a do-nothing ReturnDest
......@@ -873,7 +873,7 @@ fn trans_transmute_into(&mut self, bx: &Builder<'a, 'tcx>,
fn store_return(&mut self,
bx: &Builder<'a, 'tcx>,
dest: ReturnDest<'tcx>,
ret_ty: &ArgType<'tcx>,
ret_ty: &ArgType<'tcx, Ty<'tcx>>,
llval: ValueRef) {
use self::ReturnDest::*;
......
......@@ -12,7 +12,7 @@
use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::mir::{self, Mir};
use rustc::ty::subst::Substs;
......@@ -22,7 +22,7 @@
use common::{CodegenCx, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance;
use abi::{ArgAttribute, FnType, PassMode};
use abi::{ArgAttribute, ArgTypeExt, FnType, FnTypeExt, PassMode};
use type_::Type;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
......@@ -53,7 +53,7 @@ pub struct FunctionCx<'a, 'tcx:'a> {
cx: &'a CodegenCx<'a, 'tcx>,
fn_ty: FnType<'tcx>,
fn_ty: FnType<'tcx, Ty<'tcx>>,
/// When unwinding is initiated, we have to store this personality
/// value somewhere so that we can load it and re-use it in the
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::FnType;
use abi::{FnType, FnTypeExt};
use common::*;
use rustc::hir;
use rustc::ty::{self, Ty, TypeFoldable};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册