提交 fb15d447 编写于 作者: I Irina Popa

rustc_trans: generalize cabi_* to any context type.

上级 c45dda92
......@@ -784,18 +784,18 @@ pub trait LayoutOf {
fn layout_of(self, ty: Self::Ty) -> Self::TyLayout;
}
pub trait TyLayoutMethods<'a, C: LayoutOf>: Sized {
pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>;
fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout;
}
impl<'a, Ty> TyLayout<'a, Ty> {
pub fn for_variant<C>(self, cx: C, variant_index: usize) -> Self
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf {
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::for_variant(self, cx, variant_index)
}
pub fn field<C>(self, cx: C, i: usize) -> C::TyLayout
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf {
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::field(self, cx, i)
}
}
......
......@@ -397,6 +397,16 @@ pub struct Target {
pub options: TargetOptions,
}
pub trait HasTargetSpec: Copy {
fn target_spec(&self) -> &Target;
}
impl<'a> HasTargetSpec for &'a Target {
fn target_spec(&self) -> &Target {
self
}
}
/// Optional aspects of a target specification.
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
......
......@@ -36,8 +36,10 @@
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutOf, Size, TyLayout};
use rustc::ty::layout;
use libc::c_uint;
use std::cmp;
......@@ -142,12 +144,13 @@ fn llvm_type(&self, cx: &CodegenCx) -> Type {
}
}
pub trait LayoutExt<'tcx> {
pub trait LayoutExt<'a, Ty>: Sized {
fn is_aggregate(&self) -> bool;
fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg>;
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy;
}
impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
impl<'a, Ty> LayoutExt<'a, Ty> for TyLayout<'a, Ty> {
fn is_aggregate(&self) -> bool {
match self.abi {
layout::Abi::Uninhabited |
......@@ -158,7 +161,9 @@ fn is_aggregate(&self) -> bool {
}
}
fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg> {
fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
{
match self.abi {
layout::Abi::Uninhabited => None,
......@@ -280,8 +285,8 @@ fn llvm_type(&self, cx: &CodegenCx) -> Type {
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
#[derive(Debug)]
pub struct ArgType<'tcx> {
pub layout: TyLayout<'tcx>,
pub struct ArgType<'tcx, Ty = ty::Ty<'tcx>> {
pub layout: TyLayout<'tcx, Ty>,
/// Dummy argument, which is emitted before the real argument.
pub pad: Option<Reg>,
......@@ -289,8 +294,8 @@ pub struct ArgType<'tcx> {
pub mode: PassMode,
}
impl<'a, 'tcx> ArgType<'tcx> {
fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
impl<'a, 'tcx, Ty> ArgType<'tcx, Ty> {
fn new(layout: TyLayout<'tcx, Ty>) -> Self {
ArgType {
layout,
pad: None,
......@@ -364,7 +369,9 @@ pub fn is_indirect(&self) -> bool {
pub fn is_ignore(&self) -> bool {
self.mode == PassMode::Ignore
}
}
impl<'a, 'tcx> ArgType<'tcx> {
/// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
......@@ -451,12 +458,12 @@ pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRe
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Debug)]
pub struct FnType<'tcx> {
pub struct FnType<'tcx, Ty = ty::Ty<'tcx>> {
/// The LLVM types of each argument.
pub args: Vec<ArgType<'tcx>>,
pub args: Vec<ArgType<'tcx, Ty>>,
/// LLVM return type.
pub ret: ArgType<'tcx>,
pub ret: ArgType<'tcx, Ty>,
pub variadic: bool,
......@@ -474,7 +481,7 @@ pub fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
pub fn new(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
fn_ty.adjust_for_abi(cx, sig.abi);
fn_ty
......@@ -482,7 +489,7 @@ pub fn new(cx: &CodegenCx<'a, 'tcx>,
pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
// Don't pass the vtable, it's not an argument of the virtual fn.
{
......@@ -507,7 +514,7 @@ pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
extra_args: &[Ty<'tcx>]) -> Self {
debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
use self::Abi::*;
......@@ -569,7 +576,7 @@ pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
// Handle safe Rust thin and fat pointers.
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
scalar: &layout::Scalar,
layout: TyLayout<'tcx>,
layout: TyLayout<'tcx, Ty<'tcx>>,
offset: Size,
is_return: bool| {
// Booleans are always an i1 that needs to be zero-extended.
......@@ -742,7 +749,18 @@ fn adjust_for_abi(&mut self,
return;
}
match &cx.sess().target.target.arch[..] {
if let Err(msg) = self.adjust_for_cabi(cx, abi) {
cx.sess().fatal(&msg);
}
}
}
impl<'a, Ty> FnType<'a, Ty> {
fn adjust_for_cabi<C>(&mut self, cx: C, abi: Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
match &cx.target_spec().arch[..] {
"x86" => {
let flavor = if abi == Abi::Fastcall {
cabi_x86::Flavor::Fastcall
......@@ -753,7 +771,7 @@ fn adjust_for_abi(&mut self,
},
"x86_64" => if abi == Abi::SysV64 {
cabi_x86_64::compute_abi_info(cx, self);
} else if abi == Abi::Win64 || cx.sess().target.target.options.is_like_windows {
} else if abi == Abi::Win64 || cx.target_spec().options.is_like_windows {
cabi_x86_win64::compute_abi_info(self);
} else {
cabi_x86_64::compute_abi_info(cx, self);
......@@ -767,10 +785,10 @@ fn adjust_for_abi(&mut self,
"s390x" => cabi_s390x::compute_abi_info(cx, self),
"asmjs" => cabi_asmjs::compute_abi_info(cx, self),
"wasm32" => {
if cx.sess().opts.target_triple.triple().contains("emscripten") {
if cx.target_spec().llvm_target.contains("emscripten") {
cabi_asmjs::compute_abi_info(cx, self)
} else {
cabi_wasm32::compute_abi_info(cx, self)
cabi_wasm32::compute_abi_info(self)
}
}
"msp430" => cabi_msp430::compute_abi_info(self),
......@@ -779,14 +797,18 @@ fn adjust_for_abi(&mut self,
"nvptx" => cabi_nvptx::compute_abi_info(self),
"nvptx64" => cabi_nvptx64::compute_abi_info(self),
"hexagon" => cabi_hexagon::compute_abi_info(self),
a => cx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
a => return Err(format!("unrecognized arch \"{}\" in target specification", a))
}
if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
attrs.set(ArgAttribute::StructRet);
}
Ok(())
}
}
impl<'a, 'tcx> FnType<'tcx> {
pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
let mut llargument_tys = Vec::new();
......
......@@ -9,10 +9,13 @@
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
let size = arg.layout.size;
......@@ -38,7 +41,10 @@ fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgTyp
})
}
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
return;
......@@ -69,7 +75,10 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>)
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32);
return;
......@@ -100,7 +109,10 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
arg.make_indirect();
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
......
......@@ -9,11 +9,15 @@
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
use llvm::CallConv;
fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
let size = arg.layout.size;
......@@ -39,7 +43,10 @@ fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgTyp
})
}
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
return;
......@@ -71,7 +78,10 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>,
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32);
return;
......@@ -92,10 +102,13 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>,
});
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
// If this is a target with a hard-float ABI, and the function is not explicitly
// `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
let vfp = cx.sess().target.target.llvm_target.ends_with("hf")
let vfp = cx.target_spec().llvm_target.ends_with("hf")
&& fty.cconv != CallConv::ArmAapcsCallConv
&& !fty.variadic;
......
......@@ -9,14 +9,17 @@
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Uniform};
use context::CodegenCx;
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if ret.layout.is_aggregate() {
if let Some(unit) = ret.layout.homogeneous_aggregate(cx) {
let size = ret.layout.size;
......@@ -33,13 +36,16 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>)
}
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() {
arg.make_indirect_byval();
}
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
......
......@@ -12,7 +12,7 @@
use abi::{FnType, ArgType, LayoutExt};
fn classify_ret_ty(ret: &mut ArgType) {
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
......@@ -20,7 +20,7 @@ fn classify_ret_ty(ret: &mut ArgType) {
}
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
......@@ -28,7 +28,7 @@ fn classify_arg_ty(arg: &mut ArgType) {
}
}
pub fn compute_abi_info(fty: &mut FnType) {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
......
......@@ -9,23 +9,24 @@
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use context::CodegenCx;
use rustc::ty::layout::Size;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ret: &mut ArgType<'tcx>,
offset: &mut Size) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
*offset += cx.tcx.data_layout.pointer_size;
*offset += cx.data_layout().pointer_size;
}
}
fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
let dl = &cx.tcx.data_layout;
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
......@@ -44,7 +45,9 @@ fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
......
......@@ -9,13 +9,12 @@
// except according to those terms.
use abi::{ArgAttribute, ArgType, CastTarget, FnType, LayoutExt, PassMode, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc::ty::layout::{self, Size};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) {
fn extend_integer_width_mips<Ty>(arg: &mut ArgType<Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if let layout::Int(i, signed) = scalar.value {
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
attrs.set(ArgAttribute::SExt);
......@@ -28,18 +27,24 @@ fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) {
arg.extend_integer_width_to(bits);
}
fn float_reg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &ArgType<'tcx>, i: usize) -> Option<Reg> {
fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
match ret.layout.field(cx, i).abi {
layout::Abi::Scalar(ref scalar) => match scalar.value {
layout::F32 => Some(Reg::f32()),
layout::F64 => Some(Reg::f64()),
abi::Abi::Scalar(ref scalar) => match scalar.value {
abi::F32 => Some(Reg::f32()),
abi::F64 => Some(Reg::f64()),
_ => None
},
_ => None
}
}
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
extend_integer_width_mips(ret, 64);
return;
......@@ -52,7 +57,7 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>)
// use of float registers to structures (not unions) containing exactly one or two
// float fields.
if let layout::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
if let abi::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
if ret.layout.fields.count() == 1 {
if let Some(reg) = float_reg(cx, ret, 0) {
ret.cast_to(reg);
......@@ -78,27 +83,30 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>)
}
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
extend_integer_width_mips(arg, 64);
return;
}
let dl = &cx.tcx.data_layout;
let dl = cx.data_layout();
let size = arg.layout.size;
let mut prefix = [None; 8];
let mut prefix_index = 0;
match arg.layout.fields {
layout::FieldPlacement::Array { .. } => {
abi::FieldPlacement::Array { .. } => {
// Arrays are passed indirectly
arg.make_indirect();
return;
}
layout::FieldPlacement::Union(_) => {
abi::FieldPlacement::Union(_) => {
// Unions and are always treated as a series of 64-bit integer chunks
},
layout::FieldPlacement::Arbitrary { .. } => {
abi::FieldPlacement::Arbitrary { .. } => {
// Structures are split up into a series of 64-bit integer chunks, but any aligned
// doubles not part of another aggregate are passed as floats.
let mut last_offset = Size::from_bytes(0);
......@@ -108,8 +116,8 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
let offset = arg.layout.fields.offset(i);
// We only care about aligned doubles
if let layout::Abi::Scalar(ref scalar) = field.abi {
if let layout::F64 = scalar.value {
if let abi::Abi::Scalar(ref scalar) = field.abi {
if let abi::F64 = scalar.value {
if offset.is_abi_aligned(dl.f64_align) {
// Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_abi_aligned(dl.f64_align));
......@@ -143,7 +151,10 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
});
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
......
......@@ -19,7 +19,7 @@
// returned by reference. To pass a structure or union by reference, the caller
// places its address in the appropriate location: either in a register or on
// the stack, according to its position in the argument list. (..)"
fn classify_ret_ty(ret: &mut ArgType) {
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
......@@ -27,7 +27,7 @@ fn classify_ret_ty(ret: &mut ArgType) {
}
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
......@@ -35,7 +35,7 @@ fn classify_arg_ty(arg: &mut ArgType) {
}
}
pub fn compute_abi_info(fty: &mut FnType) {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
......
......@@ -13,7 +13,7 @@
use abi::{ArgType, FnType, LayoutExt};
fn classify_ret_ty(ret: &mut ArgType) {
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
......@@ -21,7 +21,7 @@ fn classify_ret_ty(ret: &mut ArgType) {
}
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
......@@ -29,7 +29,7 @@ fn classify_arg_ty(arg: &mut ArgType) {
}
}
pub fn compute_abi_info(fty: &mut FnType) {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
......
......@@ -13,7 +13,7 @@
use abi::{ArgType, FnType, LayoutExt};
fn classify_ret_ty(ret: &mut ArgType) {
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
......@@ -21,7 +21,7 @@ fn classify_ret_ty(ret: &mut ArgType) {
}
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
......@@ -29,7 +29,7 @@ fn classify_arg_ty(arg: &mut ArgType) {
}
}
pub fn compute_abi_info(fty: &mut FnType) {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
......
......@@ -9,23 +9,24 @@
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use context::CodegenCx;
use rustc::ty::layout::Size;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ret: &mut ArgType<'tcx>,
offset: &mut Size) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
*offset += cx.tcx.data_layout.pointer_size;
*offset += cx.data_layout().pointer_size;
}
}
fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
let dl = &cx.tcx.data_layout;
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
......@@ -44,7 +45,9 @@ fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
......
......@@ -13,8 +13,8 @@
// need to be fixed when PowerPC vector support is added.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc::ty::layout;
use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
#[derive(Debug, Clone, Copy, PartialEq)]
enum ABI {
......@@ -23,10 +23,11 @@ enum ABI {
}
use self::ABI::*;
fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
arg: &mut ArgType<'tcx>,
abi: ABI)
-> Option<Uniform> {
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// ELFv1 only passes one-member aggregates transparently.
// ELFv2 passes up to eight uniquely addressable members.
......@@ -52,7 +53,10 @@ fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
})
}
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, abi: ABI) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
......@@ -92,7 +96,10 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>,
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
......@@ -112,7 +119,7 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>,
if size.bits() <= 64 {
(Reg { kind: RegKind::Integer, size }, size)
} else {
let align = layout::Align::from_bits(64, 64).unwrap();
let align = Align::from_bits(64, 64).unwrap();
(Reg::i64(), size.abi_align(align))
}
},
......@@ -128,11 +135,13 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>,
});
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let abi = match cx.sess().target.target.target_endian.as_str() {
"big" => ELFv1,
"little" => ELFv2,
_ => unimplemented!(),
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
let abi = match cx.data_layout().endian {
Endian::Big => ELFv1,
Endian::Little => ELFv2,
};
if !fty.ret.is_ignore() {
......
......@@ -12,11 +12,12 @@
// for a pre-z13 machine or using -mno-vx.
use abi::{FnType, ArgType, LayoutExt, Reg};
use context::CodegenCx;
use rustc::ty::layout::{self, TyLayout};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn classify_ret_ty(ret: &mut ArgType) {
fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
ret.extend_integer_width_to(64);
} else {
......@@ -24,16 +25,18 @@ fn classify_ret_ty(ret: &mut ArgType) {
}
}
fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
layout: TyLayout<'tcx>) -> bool {
fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C>,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
match layout.abi {
layout::Abi::Scalar(ref scalar) => {
abi::Abi::Scalar(ref scalar) => {
match scalar.value {
layout::F32 | layout::F64 => true,
abi::F32 | abi::F64 => true,
_ => false
}
}
layout::Abi::Aggregate { .. } => {
abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
} else {
......@@ -44,7 +47,10 @@ fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
arg.extend_integer_width_to(64);
return;
......@@ -67,7 +73,10 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
}
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
......
......@@ -9,23 +9,24 @@
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
use context::CodegenCx;
use rustc::ty::layout::Size;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ret: &mut ArgType<'tcx>,
offset: &mut Size) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
*offset += cx.tcx.data_layout.pointer_size;
*offset += cx.data_layout().pointer_size;
}
}
fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
let dl = &cx.tcx.data_layout;
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
......@@ -44,7 +45,9 @@ fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
*offset = offset.abi_align(align) + size.abi_align(align);
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
......
......@@ -11,10 +11,13 @@
// FIXME: This needs an audit for correctness and completeness.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use context::CodegenCx;
use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// Ensure we have at most eight uniquely addressable members.
if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
......@@ -38,7 +41,10 @@ fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgTyp
})
}
fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
......@@ -72,7 +78,10 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>)
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
......@@ -95,7 +104,10 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
});
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
......
......@@ -9,19 +9,18 @@
// except according to those terms.
use abi::{FnType, ArgType};
use context::CodegenCx;
fn classify_ret_ty<'a, 'tcx>(_cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
ret.extend_integer_width_to(32);
}
fn classify_arg_ty(arg: &mut ArgType) {
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
arg.extend_integer_width_to(32);
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
......
......@@ -9,9 +9,8 @@
// except according to those terms.
use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
use common::CodegenCx;
use rustc::ty::layout::{self, TyLayout};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use rustc_target::spec::HasTargetSpec;
#[derive(PartialEq)]
pub enum Flavor {
......@@ -19,16 +18,18 @@ pub enum Flavor {
Fastcall
}
fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
layout: TyLayout<'tcx>) -> bool {
fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
match layout.abi {
layout::Abi::Scalar(ref scalar) => {
abi::Abi::Scalar(ref scalar) => {
match scalar.value {
layout::F32 | layout::F64 => true,
abi::F32 | abi::F64 => true,
_ => false
}
}
layout::Abi::Aggregate { .. } => {
abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
} else {
......@@ -39,9 +40,10 @@ fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
fty: &mut FnType<'tcx>,
flavor: Flavor) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
if !fty.ret.is_ignore() {
if fty.ret.layout.is_aggregate() {
// Returning a structure. Most often, this will use
......@@ -51,7 +53,7 @@ pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
// Some links:
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
let t = &cx.sess().target.target;
let t = cx.target_spec();
if t.options.abi_return_struct_as_int {
// According to Clang, everyone but MSVC returns single-element
// float aggregates directly in a floating-point register.
......
......@@ -12,9 +12,7 @@
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
use context::CodegenCx;
use rustc::ty::layout::{self, TyLayout, Size};
use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
/// Classification of "eightbyte" components.
// NB: the order of the variants is from general to specific,
......@@ -33,13 +31,16 @@ enum Class {
const LARGEST_VECTOR_SIZE: usize = 512;
const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
fn classify_arg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &ArgType<'tcx>)
-> Result<[Option<Class>; MAX_EIGHTBYTES], Memory> {
fn classify<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
layout: TyLayout<'tcx>,
cls: &mut [Option<Class>],
off: Size)
-> Result<(), Memory> {
fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>)
-> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
cls: &mut [Option<Class>], off: Size) -> Result<(), Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !off.is_abi_aligned(layout.align) {
if !layout.is_zst() {
return Err(Memory);
......@@ -48,31 +49,31 @@ fn classify<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
let mut c = match layout.abi {
layout::Abi::Uninhabited => return Ok(()),
abi::Abi::Uninhabited => return Ok(()),
layout::Abi::Scalar(ref scalar) => {
abi::Abi::Scalar(ref scalar) => {
match scalar.value {
layout::Int(..) |
layout::Pointer => Class::Int,
layout::F32 |
layout::F64 => Class::Sse
abi::Int(..) |
abi::Pointer => Class::Int,
abi::F32 |
abi::F64 => Class::Sse
}
}
layout::Abi::Vector { .. } => Class::Sse,
abi::Abi::Vector { .. } => Class::Sse,
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
abi::Abi::ScalarPair(..) |
abi::Abi::Aggregate { .. } => {
match layout.variants {
layout::Variants::Single { .. } => {
abi::Variants::Single { .. } => {
for i in 0..layout.fields.count() {
let field_off = off + layout.fields.offset(i);
classify(cx, layout.field(cx, i), cls, field_off)?;
}
return Ok(());
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => return Err(Memory),
abi::Variants::Tagged { .. } |
abi::Variants::NicheFilling { .. } => return Err(Memory),
}
}
......@@ -178,11 +179,14 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
target
}
pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
let mut sse_regs = 8; // XMM0-7
let mut x86_64_ty = |arg: &mut ArgType<'tcx>, is_arg: bool| {
let mut x86_64_ty = |arg: &mut ArgType<'a, Ty>, is_arg: bool| {
let mut cls_or_mem = classify_arg(cx, arg);
let mut needed_int = 0;
......
......@@ -10,16 +10,16 @@
use abi::{ArgType, FnType, Reg};
use rustc::ty::layout;
use rustc_target::abi;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info(fty: &mut FnType) {
let fixup = |a: &mut ArgType| {
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
let fixup = |a: &mut ArgType<Ty>| {
match a.layout.abi {
layout::Abi::Uninhabited => {}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
abi::Abi::Uninhabited => {}
abi::Abi::ScalarPair(..) |
abi::Abi::Aggregate { .. } => {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
16 => a.cast_to(Reg::i16()),
......@@ -28,11 +28,11 @@ pub fn compute_abi_info(fty: &mut FnType) {
_ => a.make_indirect()
}
}
layout::Abi::Vector { .. } => {
abi::Abi::Vector { .. } => {
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
layout::Abi::Scalar(_) => {
abi::Abi::Scalar(_) => {
if a.layout.size.bytes() > 8 {
a.make_indirect();
} else {
......
......@@ -31,6 +31,7 @@
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc_target::spec::{HasTargetSpec, Target};
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
......@@ -453,6 +454,12 @@ fn data_layout(&self) -> &ty::layout::TargetDataLayout {
}
}
impl<'a, 'tcx> HasTargetSpec for &'a CodegenCx<'a, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target.target
}
}
impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册