abi.rs 29.5 KB
Newer Older
1
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 3 4 5 6 7 8 9 10
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

11
use llvm::{self, AttributePlace};
12
use base;
13
use builder::{Builder, MemFlags};
14
use common::{ty_fn_sig, C_usize};
15
use context::CodegenCx;
16
use mir::place::PlaceRef;
17
use mir::operand::OperandValue;
18
use type_::Type;
19
use type_of::{LayoutLlvmExt, PointerKind};
20
use value::Value;
21

22
use rustc_target::abi::{LayoutOf, Size, TyLayout};
23
use rustc::ty::{self, Ty};
24
use rustc::ty::layout;
25

26 27
use libc::c_uint;

28
pub use rustc_target::spec::abi::Abi;
29
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
30
pub use rustc_target::abi::call::*;
31 32 33 34 35 36 37

macro_rules! for_each_kind {
    ($flags: ident, $f: ident, $($kind: ident),+) => ({
        $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
    })
}

38 39 40 41 42
trait ArgAttributeExt {
    fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
}

impl ArgAttributeExt for ArgAttribute {
43 44
    fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
        for_each_kind!(self, f,
45
                       ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
46 47 48
    }
}

49
pub trait ArgAttributesExt {
50 51
    fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
    fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
52 53
}

54
impl ArgAttributesExt for ArgAttributes {
55
    fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
56
        let mut regular = self.regular;
57
        unsafe {
58 59 60 61 62 63 64 65 66 67 68 69
            let deref = self.pointee_size.bytes();
            if deref != 0 {
                if regular.contains(ArgAttribute::NonNull) {
                    llvm::LLVMRustAddDereferenceableAttr(llfn,
                                                         idx.as_uint(),
                                                         deref);
                } else {
                    llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
                                                               idx.as_uint(),
                                                               deref);
                }
                regular -= ArgAttribute::NonNull;
70
            }
71 72 73 74 75 76
            if let Some(align) = self.pointee_align {
                llvm::LLVMRustAddAlignmentAttr(llfn,
                                               idx.as_uint(),
                                               align.abi() as u32);
            }
            regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
77 78 79
        }
    }

80
    fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
81
        let mut regular = self.regular;
82
        unsafe {
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
            let deref = self.pointee_size.bytes();
            if deref != 0 {
                if regular.contains(ArgAttribute::NonNull) {
                    llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
                                                                 idx.as_uint(),
                                                                 deref);
                } else {
                    llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
                                                                       idx.as_uint(),
                                                                       deref);
                }
                regular -= ArgAttribute::NonNull;
            }
            if let Some(align) = self.pointee_align {
                llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
                                                       idx.as_uint(),
                                                       align.abi() as u32);
100
            }
101
            regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
102 103 104
        }
    }
}
105

106
pub trait LlvmType {
107
    fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
108 109
}

110
impl LlvmType for Reg {
111
    fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
112
        match self.kind {
113
            RegKind::Integer => Type::ix(cx, self.size.bits()),
114 115
            RegKind::Float => {
                match self.size.bits() {
116 117
                    32 => Type::f32(cx),
                    64 => Type::f64(cx),
118 119 120 121
                    _ => bug!("unsupported float: {:?}", self)
                }
            }
            RegKind::Vector => {
122
                Type::vector(Type::i8(cx), self.size.bytes())
123 124 125 126 127
            }
        }
    }
}

128
impl LlvmType for CastTarget {
129
    fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
130
        let rest_ll_unit = self.rest.unit.llvm_type(cx);
131 132 133 134 135 136
        let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
            (0, 0)
        } else {
            (self.rest.total.bytes() / self.rest.unit.size.bytes(),
            self.rest.total.bytes() % self.rest.unit.size.bytes())
        };
137 138 139 140 141 142

        if self.prefix.iter().all(|x| x.is_none()) {
            // Simplify to a single unit when there is no prefix and size <= unit size
            if self.rest.total <= self.rest.unit.size {
                return rest_ll_unit;
            }
143

144 145
            // Simplify to array when all chunks are the same size and type
            if rem_bytes == 0 {
146
                return Type::array(rest_ll_unit, rest_count);
147 148
            }
        }
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

        // Create list of fields in the main structure
        let mut args: Vec<_> =
            self.prefix.iter().flat_map(|option_kind| option_kind.map(
                    |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
            .chain((0..rest_count).map(|_| rest_ll_unit))
            .collect();

        // Append final integer
        if rem_bytes != 0 {
            // Only integers can be really split further.
            assert_eq!(self.rest.unit.kind, RegKind::Integer);
            args.push(Type::ix(cx, rem_bytes * 8));
        }

        Type::struct_(cx, &args, false)
165 166
    }
}
167

168 169 170 171
pub trait ArgTypeExt<'ll, 'tcx> {
    fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
    fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>);
    fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>);
J
Jyun-Yan You 已提交
172 173
}

174
impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
175
    /// Get the LLVM type for a place of the original Rust type of
176
    /// this argument/return, i.e. the result of `type_of::type_of`.
177
    fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
178
        self.layout.llvm_type(cx)
179 180
    }

181
    /// Store a direct/indirect value described by this ArgType into a
182
    /// place for the original Rust type of this argument/return.
183 184
    /// Can be used for both storing formal arguments into Rust variables
    /// or results of call/invoke instructions into their destinations.
185
    fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) {
186 187 188
        if self.is_ignore() {
            return;
        }
189
        let cx = bx.cx;
190
        if self.is_sized_indirect() {
191
            OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
192 193
        } else if self.is_unsized_indirect() {
            bug!("unsized ArgType must be handled through store_fn_arg");
194
        } else if let PassMode::Cast(cast) = self.mode {
195 196 197 198
            // FIXME(eddyb): Figure out when the simpler Store is safe, clang
            // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
            let can_store_through_cast_ptr = false;
            if can_store_through_cast_ptr {
199 200
                let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
                bx.store(val, cast_dst, self.layout.align);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
            } else {
                // The actual return type is a struct, but the ABI
                // adaptation code has cast it into some scalar type.  The
                // code that follows is the only reliable way I have
                // found to do a transform like i64 -> {i32,i32}.
                // Basically we dump the data onto the stack then memcpy it.
                //
                // Other approaches I tried:
                // - Casting rust ret pointer to the foreign type and using Store
                //   is (a) unsafe if size of foreign type > size of rust type and
                //   (b) runs afoul of strict aliasing rules, yielding invalid
                //   assembly under -O (specifically, the store gets removed).
                // - Truncating foreign type to correct integral type and then
                //   bitcasting to the struct type yields invalid cast errors.

                // We instead thus allocate some scratch space...
217 218
                let scratch_size = cast.size(cx);
                let scratch_align = cast.align(cx);
219 220
                let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
                bx.lifetime_start(llscratch, scratch_size);
221 222

                // ...where we first store the value...
223
                bx.store(val, llscratch, scratch_align);
224 225

                // ...and then memcpy it to the intended destination.
226 227 228
                base::call_memcpy(bx,
                                  bx.pointercast(dst.llval, Type::i8p(cx)),
                                  bx.pointercast(llscratch, Type::i8p(cx)),
229
                                  C_usize(cx, self.layout.size.bytes()),
230
                                  self.layout.align.min(scratch_align),
231
                                  MemFlags::empty());
232

233
                bx.lifetime_end(llscratch, scratch_size);
234 235
            }
        } else {
236
            OperandValue::Immediate(val).store(bx, dst);
237 238
        }
    }
239

240
    fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) {
241
        let mut next = || {
242
            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
243 244 245 246 247 248
            *idx += 1;
            val
        };
        match self.mode {
            PassMode::Ignore => {},
            PassMode::Pair(..) => {
249
                OperandValue::Pair(next(), next()).store(bx, dst);
250
            }
251
            PassMode::Indirect(_, Some(_)) => {
252
                OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
253
            }
254
            PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
255
                self.store(bx, next(), dst);
256
            }
257 258
        }
    }
259 260
}

261 262
pub trait FnTypeExt<'tcx> {
    fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
263
                   -> Self;
264
    fn new(cx: &CodegenCx<'ll, 'tcx>,
265 266
           sig: ty::FnSig<'tcx>,
           extra_args: &[Ty<'tcx>]) -> Self;
267
    fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
268 269
                  sig: ty::FnSig<'tcx>,
                  extra_args: &[Ty<'tcx>]) -> Self;
270
    fn new_internal(
271
        cx: &CodegenCx<'ll, 'tcx>,
272 273 274 275
        sig: ty::FnSig<'tcx>,
        extra_args: &[Ty<'tcx>],
        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
    ) -> Self;
276
    fn adjust_for_abi(&mut self,
277
                      cx: &CodegenCx<'ll, 'tcx>,
278
                      abi: Abi);
279
    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
280
    fn llvm_cconv(&self) -> llvm::CallConv;
281 282
    fn apply_attrs_llfn(&self, llfn: &'ll Value);
    fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
283
}
284

285 286
impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
    fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
287
                       -> Self {
288 289
        let fn_ty = instance.ty(cx.tcx);
        let sig = ty_fn_sig(cx, fn_ty);
290
        let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
291
        FnType::new(cx, sig, &[])
292 293
    }

294
    fn new(cx: &CodegenCx<'ll, 'tcx>,
295
               sig: ty::FnSig<'tcx>,
296
               extra_args: &[Ty<'tcx>]) -> Self {
297 298 299
        FnType::new_internal(cx, sig, extra_args, |ty, _| {
            ArgType::new(cx.layout_of(ty))
        })
300 301
    }

302
    fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
303
                      sig: ty::FnSig<'tcx>,
304
                      extra_args: &[Ty<'tcx>]) -> Self {
305 306 307 308 309
        FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
            let mut layout = cx.layout_of(ty);
            // Don't pass the vtable, it's not an argument of the virtual fn.
            // Instead, pass just the (thin pointer) first field of `*dyn Trait`.
            if arg_idx == Some(0) {
310 311 312 313
                if layout.is_unsized() {
                    unimplemented!("by-value trait object is not \
                                    yet implemented in #![feature(unsized_locals)]");
                }
314 315 316 317 318 319 320 321 322
                // FIXME(eddyb) `layout.field(cx, 0)` is not enough because e.g.
                // `Box<dyn Trait>` has a few newtype wrappers around the raw
                // pointer, so we'd have to "dig down" to find `*dyn Trait`.
                let pointee = layout.ty.builtin_deref(true)
                    .unwrap_or_else(|| {
                        bug!("FnType::new_vtable: non-pointer self {:?}", layout)
                    }).ty;
                let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee);
                layout = cx.layout_of(fat_ptr_ty).field(cx, 0);
323
            }
324 325
            ArgType::new(layout)
        })
326 327
    }

328
    fn new_internal(
329
        cx: &CodegenCx<'ll, 'tcx>,
330 331 332 333 334
        sig: ty::FnSig<'tcx>,
        extra_args: &[Ty<'tcx>],
        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
    ) -> Self {
        debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
335

336
        use self::Abi::*;
337
        let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
338
            RustIntrinsic | PlatformIntrinsic |
339
            Rust | RustCall => Conv::C,
340 341

            // It's the ABI's job to select this, not us.
342
            System => bug!("system abi should be selected elsewhere"),
343

344 345 346 347 348 349 350 351 352 353 354 355
            Stdcall => Conv::X86Stdcall,
            Fastcall => Conv::X86Fastcall,
            Vectorcall => Conv::X86VectorCall,
            Thiscall => Conv::X86ThisCall,
            C => Conv::C,
            Unadjusted => Conv::C,
            Win64 => Conv::X86_64Win64,
            SysV64 => Conv::X86_64SysV,
            Aapcs => Conv::ArmAapcs,
            PtxKernel => Conv::PtxKernel,
            Msp430Interrupt => Conv::Msp430Intr,
            X86Interrupt => Conv::X86Intr,
R
Richard Diamond 已提交
356
            AmdGpuKernel => Conv::AmdGpuKernel,
357 358

            // These API constants ought to be more specific...
359
            Cdecl => Conv::C,
360 361
        };

362
        let mut inputs = sig.inputs();
363
        let extra_args = if sig.abi == RustCall {
364
            assert!(!sig.variadic && extra_args.is_empty());
365

366
            match sig.inputs().last().unwrap().sty {
A
Andrew Cann 已提交
367
                ty::TyTuple(ref tupled_arguments) => {
368
                    inputs = &sig.inputs()[0..sig.inputs().len() - 1];
369
                    tupled_arguments
370 371
                }
                _ => {
372 373
                    bug!("argument to function with \"rust-call\" ABI \
                          is not a tuple");
374 375 376
                }
            }
        } else {
377
            assert!(sig.variadic || extra_args.is_empty());
378 379 380
            extra_args
        };

381
        let target = &cx.sess().target.target;
382 383 384
        let win_x64_gnu = target.target_os == "windows"
                       && target.arch == "x86_64"
                       && target.target_env == "gnu";
U
Ulrich Weigand 已提交
385 386 387
        let linux_s390x = target.target_os == "linux"
                       && target.arch == "s390x"
                       && target.target_env == "gnu";
388
        let rust_abi = match sig.abi {
389 390 391 392
            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
            _ => false
        };

393
        // Handle safe Rust thin and fat pointers.
394 395
        let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
                                      scalar: &layout::Scalar,
396
                                      layout: TyLayout<'tcx, Ty<'tcx>>,
397 398 399 400 401 402 403 404 405 406 407 408 409
                                      offset: Size,
                                      is_return: bool| {
            // Booleans are always an i1 that needs to be zero-extended.
            if scalar.is_bool() {
                attrs.set(ArgAttribute::ZExt);
                return;
            }

            // Only pointer types handled below.
            if scalar.value != layout::Pointer {
                return;
            }

410 411
            if scalar.valid_range.start() < scalar.valid_range.end() {
                if *scalar.valid_range.start() > 0 {
412
                    attrs.set(ArgAttribute::NonNull);
413
                }
414 415
            }

416
            if let Some(pointee) = layout.pointee_info_at(cx, offset) {
417
                if let Some(kind) = pointee.safe {
418 419
                    attrs.pointee_size = pointee.size;
                    attrs.pointee_align = Some(pointee.align);
420

421 422
                    // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
                    // with align attributes, and those calls later block optimizations.
423
                    if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes {
424
                        attrs.pointee_align = None;
425
                    }
426

427
                    // `Box` pointer parameters never alias because ownership is transferred
428 429 430 431 432 433 434
                    // `&mut` pointer parameters never alias other parameters,
                    // or mutable global data
                    //
                    // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
                    // and can be marked as both `readonly` and `noalias`, as
                    // LLVM's definition of `noalias` is based solely on memory
                    // dependencies rather than pointer equality
435 436
                    let no_alias = match kind {
                        PointerKind::Shared => false,
437 438
                        PointerKind::UniqueOwned => true,
                        PointerKind::Frozen |
439 440 441
                        PointerKind::UniqueBorrowed => !is_return
                    };
                    if no_alias {
442
                        attrs.set(ArgAttribute::NoAlias);
443
                    }
444

445
                    if kind == PointerKind::Frozen && !is_return {
446
                        attrs.set(ArgAttribute::ReadOnly);
447
                    }
448 449 450 451
                }
            }
        };

452 453 454
        let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
            let is_return = arg_idx.is_none();
            let mut arg = mk_arg_type(ty, arg_idx);
455 456 457 458
            if arg.layout.is_zst() {
                // For some forsaken reason, x86_64-pc-windows-gnu
                // doesn't ignore zero-sized struct arguments.
                // The same is true for s390x-unknown-linux-gnu.
459 460
                if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
                    arg.mode = PassMode::Ignore;
461
                }
462 463
            }

464 465 466 467 468 469 470 471
            // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
            if !is_return && rust_abi {
                if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
                    let mut a_attrs = ArgAttributes::new();
                    let mut b_attrs = ArgAttributes::new();
                    adjust_for_rust_scalar(&mut a_attrs,
                                           a,
                                           arg.layout,
472
                                           Size::ZERO,
473 474 475 476
                                           false);
                    adjust_for_rust_scalar(&mut b_attrs,
                                           b,
                                           arg.layout,
477
                                           a.value.size(cx).abi_align(b.value.align(cx)),
478 479 480 481 482 483 484 485 486 487 488
                                           false);
                    arg.mode = PassMode::Pair(a_attrs, b_attrs);
                    return arg;
                }
            }

            if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
                if let PassMode::Direct(ref mut attrs) = arg.mode {
                    adjust_for_rust_scalar(attrs,
                                           scalar,
                                           arg.layout,
489
                                           Size::ZERO,
490 491
                                           is_return);
                }
492
            }
493 494 495

            arg
        };
496

497 498 499 500
        let mut fn_ty = FnType {
            ret: arg_of(sig.output(), None),
            args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
                arg_of(ty, Some(i))
501
            }).collect(),
502
            variadic: sig.variadic,
503
            conv,
504 505 506
        };
        fn_ty.adjust_for_abi(cx, sig.abi);
        fn_ty
507
    }
508

509
    fn adjust_for_abi(&mut self,
510
                      cx: &CodegenCx<'ll, 'tcx>,
511
                      abi: Abi) {
512 513
        if abi == Abi::Unadjusted { return }

514 515
        if abi == Abi::Rust || abi == Abi::RustCall ||
           abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
516
            let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
517 518
                if arg.is_ignore() { return; }

519
                match arg.layout.abi {
520
                    layout::Abi::Aggregate { .. } => {}
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545

                    // This is a fun case! The gist of what this is doing is
                    // that we want callers and callees to always agree on the
                    // ABI of how they pass SIMD arguments. If we were to *not*
                    // make these arguments indirect then they'd be immediates
                    // in LLVM, which means that they'd used whatever the
                    // appropriate ABI is for the callee and the caller. That
                    // means, for example, if the caller doesn't have AVX
                    // enabled but the callee does, then passing an AVX argument
                    // across this boundary would cause corrupt data to show up.
                    //
                    // This problem is fixed by unconditionally passing SIMD
                    // arguments through memory between callers and callees
                    // which should get them all to agree on ABI regardless of
                    // target feature sets. Some more information about this
                    // issue can be found in #44367.
                    //
                    // Note that the platform intrinsic ABI is exempt here as
                    // that's how we connect up to LLVM and it's unstable
                    // anyway, we control all calls to it in libstd.
                    layout::Abi::Vector { .. } if abi != Abi::PlatformIntrinsic => {
                        arg.make_indirect();
                        return
                    }

546
                    _ => return
547 548
                }

549
                let size = arg.layout.size;
550
                if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
551
                    arg.make_indirect();
552
                } else {
553 554 555
                    // We want to pass small aggregates as immediates, but using
                    // a LLVM aggregate type for this leads to bad optimizations,
                    // so we pick an appropriately sized integer type instead.
556
                    arg.cast_to(Reg {
557 558 559
                        kind: RegKind::Integer,
                        size
                    });
560 561
                }
            };
562
            fixup(&mut self.ret);
563
            for arg in &mut self.args {
564 565
                fixup(arg);
            }
566
            if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
567
                attrs.set(ArgAttribute::StructRet);
568
            }
569
            return;
570 571
        }

572 573 574 575 576
        if let Err(msg) = self.adjust_for_cabi(cx, abi) {
            cx.sess().fatal(&msg);
        }
    }

577
    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
578 579 580 581 582
        let args_capacity: usize = self.args.iter().map(|arg|
            if arg.pad.is_some() { 1 } else { 0 } +
            if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
        ).sum();
        let mut llargument_tys = Vec::with_capacity(
583
            if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
584
        );
585

586
        let llreturn_ty = match self.ret.mode {
587
            PassMode::Ignore => Type::void(cx),
588
            PassMode::Direct(_) | PassMode::Pair(..) => {
589
                self.ret.layout.immediate_llvm_type(cx)
590
            }
591
            PassMode::Cast(cast) => cast.llvm_type(cx),
592
            PassMode::Indirect(..) => {
593 594
                llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
                Type::void(cx)
595
            }
596 597
        };

598 599 600
        for arg in &self.args {
            // add padding
            if let Some(ty) = arg.pad {
601
                llargument_tys.push(ty.llvm_type(cx));
602
            }
603

604 605
            let llarg_ty = match arg.mode {
                PassMode::Ignore => continue,
606
                PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
607
                PassMode::Pair(..) => {
608 609
                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
610 611
                    continue;
                }
612
                PassMode::Indirect(_, Some(_)) => {
613 614 615 616 617 618
                    let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
                    let ptr_layout = cx.layout_of(ptr_ty);
                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                    continue;
                }
619
                PassMode::Cast(cast) => cast.llvm_type(cx),
620
                PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(),
621 622
            };
            llargument_tys.push(llarg_ty);
623 624 625
        }

        if self.variadic {
626
            Type::variadic_func(&llargument_tys, llreturn_ty)
627
        } else {
628
            Type::func(&llargument_tys, llreturn_ty)
629 630 631
        }
    }

632 633 634
    fn llvm_cconv(&self) -> llvm::CallConv {
        match self.conv {
            Conv::C => llvm::CCallConv,
R
Richard Diamond 已提交
635
            Conv::AmdGpuKernel => llvm::AmdGpuKernel,
636 637 638 639 640 641 642 643 644 645 646 647 648
            Conv::ArmAapcs => llvm::ArmAapcsCallConv,
            Conv::Msp430Intr => llvm::Msp430Intr,
            Conv::PtxKernel => llvm::PtxKernel,
            Conv::X86Fastcall => llvm::X86FastcallCallConv,
            Conv::X86Intr => llvm::X86_Intr,
            Conv::X86Stdcall => llvm::X86StdcallCallConv,
            Conv::X86ThisCall => llvm::X86_ThisCall,
            Conv::X86VectorCall => llvm::X86_VectorCall,
            Conv::X86_64SysV => llvm::X86_64_SysV,
            Conv::X86_64Win64 => llvm::X86_64_Win64,
        }
    }

649
    fn apply_attrs_llfn(&self, llfn: &'ll Value) {
650 651 652 653
        let mut i = 0;
        let mut apply = |attrs: &ArgAttributes| {
            attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
            i += 1;
654
        };
655 656 657 658
        match self.ret.mode {
            PassMode::Direct(ref attrs) => {
                attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
            }
659
            PassMode::Indirect(ref attrs, _) => apply(attrs),
660 661
            _ => {}
        }
662
        for arg in &self.args {
663 664 665 666 667 668
            if arg.pad.is_some() {
                apply(&ArgAttributes::new());
            }
            match arg.mode {
                PassMode::Ignore => {}
                PassMode::Direct(ref attrs) |
669 670
                PassMode::Indirect(ref attrs, None) => apply(attrs),
                PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
671 672 673
                    apply(attrs);
                    apply(extra_attrs);
                }
674 675 676
                PassMode::Pair(ref a, ref b) => {
                    apply(a);
                    apply(b);
677
                }
678
                PassMode::Cast(_) => apply(&ArgAttributes::new()),
679
            }
680 681 682
        }
    }

683
    fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
684 685 686 687
        let mut i = 0;
        let mut apply = |attrs: &ArgAttributes| {
            attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
            i += 1;
688
        };
689 690 691 692
        match self.ret.mode {
            PassMode::Direct(ref attrs) => {
                attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
            }
693
            PassMode::Indirect(ref attrs, _) => apply(attrs),
694 695
            _ => {}
        }
696 697 698 699 700 701 702 703
        if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
            // If the value is a boolean, the range is 0..2 and that ultimately
            // become 0..0 when the type becomes i1, which would be rejected
            // by the LLVM verifier.
            match scalar.value {
                layout::Int(..) if !scalar.is_bool() => {
                    let range = scalar.valid_range_exclusive(bx.cx);
                    if range.start != range.end {
G
gnzlbg 已提交
704
                        bx.range_metadata(callsite, range);
705 706 707 708 709
                    }
                }
                _ => {}
            }
        }
710
        for arg in &self.args {
711 712 713 714 715 716
            if arg.pad.is_some() {
                apply(&ArgAttributes::new());
            }
            match arg.mode {
                PassMode::Ignore => {}
                PassMode::Direct(ref attrs) |
717 718
                PassMode::Indirect(ref attrs, None) => apply(attrs),
                PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
719 720 721
                    apply(attrs);
                    apply(extra_attrs);
                }
722 723 724
                PassMode::Pair(ref a, ref b) => {
                    apply(a);
                    apply(b);
725
                }
726
                PassMode::Cast(_) => apply(&ArgAttributes::new()),
727
            }
728
        }
729

730 731 732
        let cconv = self.llvm_cconv();
        if cconv != llvm::CCallConv {
            llvm::SetInstructionCallConv(callsite, cconv);
733
        }
734 735
    }
}