abi.rs 24.2 KB
Newer Older
1
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 3 4 5 6 7 8 9 10
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

11
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector};
12
use base;
13 14
use build::AllocaFcx;
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
15 16 17 18 19 20 21 22
use context::CrateContext;
use cabi_x86;
use cabi_x86_64;
use cabi_x86_win64;
use cabi_arm;
use cabi_aarch64;
use cabi_powerpc;
use cabi_powerpc64;
U
Ulrich Weigand 已提交
23
use cabi_s390x;
24
use cabi_mips;
25
use cabi_mips64;
26
use cabi_asmjs;
27
use machine::{llalign_of_min, llsize_of, llsize_of_alloc};
28 29
use type_::Type;
use type_of;
30

31
use rustc::hir;
32
use rustc::ty::{self, Ty};
33

34
use libc::c_uint;
35
use std::cmp;
36

37
pub use syntax::abi::Abi;
38
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
U
Ulrich Weigand 已提交
39
use rustc::ty::layout::Layout;
40

41
#[derive(Clone, Copy, PartialEq, Debug)]
42
enum ArgKind {
J
Jyun-Yan You 已提交
43 44 45 46
    /// Pass the argument directly using the normal converted
    /// LLVM type or by coercing to another specified type
    Direct,
    /// Pass the argument indirectly via a hidden pointer
47 48 49
    Indirect,
    /// Ignore the argument (useful for empty struct)
    Ignore,
J
Jyun-Yan You 已提交
50 51 52 53 54 55
}

/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
56
#[derive(Clone, Copy, Debug)]
J
Jyun-Yan You 已提交
57
pub struct ArgType {
58
    kind: ArgKind,
J
Jyun-Yan You 已提交
59
    /// Original LLVM type
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    pub original_ty: Type,
    /// Sizing LLVM type (pointers are opaque).
    /// Unlike original_ty, this is guaranteed to be complete.
    ///
    /// For example, while we're computing the function pointer type in
    /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`.
    /// The field type will likely end up being `void(%Foo)*`, but we cannot
    /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`,
    /// until `%Foo` is completed by having all of its field types inserted,
    /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers
    /// with opaque ones, resulting in `{i8*}` for `Foo`.
    /// ABI-specific logic can then look at the size, alignment and fields of
    /// `{i8*}` in order to determine how the argument will be passed.
    /// Only later will `original_ty` aka `%Foo` be used in the LLVM function
    /// pointer type, without ever having introspected it.
75
    pub ty: Type,
76 77
    /// Signedness for integer types, None for other types
    pub signedness: Option<bool>,
J
Jyun-Yan You 已提交
78
    /// Coerced LLVM Type
79
    pub cast: Option<Type>,
J
Jyun-Yan You 已提交
80
    /// Dummy argument, which is emitted before the real argument
81
    pub pad: Option<Type>,
82 83
    /// LLVM attributes of argument
    pub attrs: llvm::Attributes
J
Jyun-Yan You 已提交
84 85 86
}

impl ArgType {
87
    fn new(original_ty: Type, ty: Type) -> ArgType {
J
Jyun-Yan You 已提交
88
        ArgType {
89
            kind: ArgKind::Direct,
90
            original_ty: original_ty,
J
Jyun-Yan You 已提交
91
            ty: ty,
92
            signedness: None,
93 94
            cast: None,
            pad: None,
95
            attrs: llvm::Attributes::default()
96 97 98
        }
    }

99
    pub fn make_indirect(&mut self, ccx: &CrateContext) {
100 101
        assert_eq!(self.kind, ArgKind::Direct);

102 103 104
        // Wipe old attributes, likely not valid through indirection.
        self.attrs = llvm::Attributes::default();

105
        let llarg_sz = llsize_of_alloc(ccx, self.ty);
106 107 108 109 110 111 112 113 114 115 116 117

        // For non-immediate arguments the callee gets its own copy of
        // the value on the stack, so there are no aliases. It's also
        // program-invisible so can't possibly capture
        self.attrs.set(llvm::Attribute::NoAlias)
                  .set(llvm::Attribute::NoCapture)
                  .set_dereferenceable(llarg_sz);

        self.kind = ArgKind::Indirect;
    }

    pub fn ignore(&mut self) {
118
        assert_eq!(self.kind, ArgKind::Direct);
119 120 121
        self.kind = ArgKind::Ignore;
    }

122 123 124 125 126 127 128 129 130 131 132 133 134
    pub fn extend_integer_width_to(&mut self, bits: u64) {
        // Only integers have signedness
        if let Some(signed) = self.signedness {
            if self.ty.int_width() < bits {
                self.attrs.set(if signed {
                    llvm::Attribute::SExt
                } else {
                    llvm::Attribute::ZExt
                });
            }
        }
    }

J
Jyun-Yan You 已提交
135
    pub fn is_indirect(&self) -> bool {
136
        self.kind == ArgKind::Indirect
J
Jyun-Yan You 已提交
137
    }
138 139

    pub fn is_ignore(&self) -> bool {
140
        self.kind == ArgKind::Ignore
141
    }
142

143 144 145 146 147 148 149 150 151 152
    /// Get the LLVM type for an lvalue of the original Rust type of
    /// this argument/return, i.e. the result of `type_of::type_of`.
    pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
        if self.original_ty == Type::i1(ccx) {
            Type::i8(ccx)
        } else {
            self.original_ty
        }
    }

153 154 155 156
    /// Store a direct/indirect value described by this ArgType into a
    /// lvalue for the original Rust type of this argument/return.
    /// Can be used for both storing formal arguments into Rust variables
    /// or results of call/invoke instructions into their destinations.
157
    pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
158 159 160
        if self.is_ignore() {
            return;
        }
161
        let ccx = bcx.ccx();
162
        if self.is_indirect() {
163 164 165
            let llsz = llsize_of(ccx, self.ty);
            let llalign = llalign_of_min(ccx, self.ty);
            base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
166
        } else if let Some(ty) = self.cast {
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
            // FIXME(eddyb): Figure out when the simpler Store is safe, clang
            // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
            let can_store_through_cast_ptr = false;
            if can_store_through_cast_ptr {
                let cast_dst = bcx.pointercast(dst, ty.ptr_to());
                let store = bcx.store(val, cast_dst);
                let llalign = llalign_of_min(ccx, self.ty);
                unsafe {
                    llvm::LLVMSetAlignment(store, llalign);
                }
            } else {
                // The actual return type is a struct, but the ABI
                // adaptation code has cast it into some scalar type.  The
                // code that follows is the only reliable way I have
                // found to do a transform like i64 -> {i32,i32}.
                // Basically we dump the data onto the stack then memcpy it.
                //
                // Other approaches I tried:
                // - Casting rust ret pointer to the foreign type and using Store
                //   is (a) unsafe if size of foreign type > size of rust type and
                //   (b) runs afoul of strict aliasing rules, yielding invalid
                //   assembly under -O (specifically, the store gets removed).
                // - Truncating foreign type to correct integral type and then
                //   bitcasting to the struct type yields invalid cast errors.

                // We instead thus allocate some scratch space...
                let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast");
                base::Lifetime::Start.call(bcx, llscratch);

                // ...where we first store the value...
                bcx.store(val, llscratch);

                // ...and then memcpy it to the intended destination.
                base::call_memcpy(bcx,
                                  bcx.pointercast(dst, Type::i8p(ccx)),
                                  bcx.pointercast(llscratch, Type::i8p(ccx)),
203
                                  C_uint(ccx, llsize_of_alloc(ccx, self.ty)),
204 205 206 207
                                  cmp::min(llalign_of_min(ccx, self.ty),
                                           llalign_of_min(ccx, ty)) as u32);

                base::Lifetime::End.call(bcx, llscratch);
208 209
            }
        } else {
210 211
            if self.original_ty == Type::i1(ccx) {
                val = bcx.zext(val, Type::i8(ccx));
212
            }
213
            bcx.store(val, dst);
214 215
        }
    }
216

217
    pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) {
218 219 220 221 222 223
        if self.pad.is_some() {
            *idx += 1;
        }
        if self.is_ignore() {
            return;
        }
224
        let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint);
225
        *idx += 1;
226
        self.store(bcx, val, dst);
227
    }
228 229
}

230 231 232 233 234
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
235
#[derive(Clone)]
236
pub struct FnType {
J
Jyun-Yan You 已提交
237
    /// The LLVM types of each argument.
238
    pub args: Vec<ArgType>,
239

240
    /// LLVM return type.
241 242 243 244 245
    pub ret: ArgType,

    pub variadic: bool,

    pub cconv: llvm::CallConv
246
}
247

248 249 250 251 252
impl FnType {
    pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                         abi: Abi,
                         sig: &ty::FnSig<'tcx>,
                         extra_args: &[Ty<'tcx>]) -> FnType {
253 254 255 256 257 258 259 260 261
        let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args);
        fn_ty.adjust_for_abi(ccx, abi, sig);
        fn_ty
    }

    pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                abi: Abi,
                                sig: &ty::FnSig<'tcx>,
                                extra_args: &[Ty<'tcx>]) -> FnType {
262
        use self::Abi::*;
263
        let cconv = match ccx.sess().target.target.adjust_abi(abi) {
264
            RustIntrinsic | PlatformIntrinsic |
265
            Rust | RustCall => llvm::CCallConv,
266 267

            // It's the ABI's job to select this, not us.
268
            System => bug!("system abi should be selected elsewhere"),
269 270 271 272 273 274

            Stdcall => llvm::X86StdcallCallConv,
            Fastcall => llvm::X86FastcallCallConv,
            Vectorcall => llvm::X86_VectorCall,
            C => llvm::CCallConv,
            Win64 => llvm::X86_64_Win64,
275
            SysV64 => llvm::X86_64_SysV,
276 277 278 279 280 281

            // These API constants ought to be more specific...
            Cdecl => llvm::CCallConv,
            Aapcs => llvm::CCallConv,
        };

282 283 284 285 286 287 288 289 290 291
        let mut inputs = &sig.inputs[..];
        let extra_args = if abi == RustCall {
            assert!(!sig.variadic && extra_args.is_empty());

            match inputs[inputs.len() - 1].sty {
                ty::TyTuple(ref tupled_arguments) => {
                    inputs = &inputs[..inputs.len() - 1];
                    &tupled_arguments[..]
                }
                _ => {
292 293
                    bug!("argument to function with \"rust-call\" ABI \
                          is not a tuple");
294 295 296 297 298 299 300
                }
            }
        } else {
            assert!(sig.variadic || extra_args.is_empty());
            extra_args
        };

301 302 303 304
        let target = &ccx.sess().target.target;
        let win_x64_gnu = target.target_os == "windows"
                       && target.arch == "x86_64"
                       && target.target_env == "gnu";
U
Ulrich Weigand 已提交
305 306 307
        let linux_s390x = target.target_os == "linux"
                       && target.arch == "s390x"
                       && target.target_env == "gnu";
308 309 310 311 312 313
        let rust_abi = match abi {
            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
            _ => false
        };

        let arg_of = |ty: Ty<'tcx>, is_return: bool| {
314 315 316
            if ty.is_bool() {
                let llty = Type::i1(ccx);
                let mut arg = ArgType::new(llty, llty);
317
                arg.attrs.set(llvm::Attribute::ZExt);
318 319
                arg
            } else {
320 321
                let mut arg = ArgType::new(type_of::type_of(ccx, ty),
                                           type_of::sizing_type_of(ccx, ty));
322 323 324
                if ty.is_integral() {
                    arg.signedness = Some(ty.is_signed());
                }
U
Ulrich Weigand 已提交
325 326 327 328
                // Rust enum types that map onto C enums also need to follow
                // the target ABI zero-/sign-extension rules.
                if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) {
                    arg.signedness = Some(signed);
329
                }
330
                if llsize_of_alloc(ccx, arg.ty) == 0 {
331 332
                    // For some forsaken reason, x86_64-pc-windows-gnu
                    // doesn't ignore zero-sized struct arguments.
U
Ulrich Weigand 已提交
333 334 335
                    // The same is true for s390x-unknown-linux-gnu.
                    if is_return || rust_abi ||
                       (!win_x64_gnu && !linux_s390x) {
336 337
                        arg.ignore();
                    }
338 339
                }
                arg
340 341 342
            }
        };

343
        let ret_ty = sig.output;
344
        let mut ret = arg_of(ret_ty, true);
345 346 347 348 349 350 351 352 353

        if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
            // The `noalias` attribute on the return value is useful to a
            // function ptr caller.
            if let ty::TyBox(_) = ret_ty.sty {
                // `Box` pointer return values never alias because ownership
                // is transferred
                ret.attrs.set(llvm::Attribute::NoAlias);
            }
354

355 356 357 358 359 360
            // We can also mark the return value as `dereferenceable` in certain cases
            match ret_ty.sty {
                // These are not really pointers but pairs, (pointer, len)
                ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
                ty::TyBox(ty) => {
                    let llty = type_of::sizing_type_of(ccx, ty);
361
                    let llsz = llsize_of_alloc(ccx, llty);
362
                    ret.attrs.set_dereferenceable(llsz);
363
                }
364
                _ => {}
365 366 367
            }
        }

368
        let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
369 370 371 372 373 374 375 376 377 378

        // Handle safe Rust thin and fat pointers.
        let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
            // `Box` pointer parameters never alias because ownership is transferred
            ty::TyBox(inner) => {
                arg.attrs.set(llvm::Attribute::NoAlias);
                Some(inner)
            }

            ty::TyRef(b, mt) => {
379
                use rustc::ty::{BrAnon, ReLateBound};
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

                // `&mut` pointer parameters never alias other parameters, or mutable global data
                //
                // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
                // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
                // on memory dependencies rather than pointer equality
                let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();

                if mt.mutbl != hir::MutMutable && !interior_unsafe {
                    arg.attrs.set(llvm::Attribute::NoAlias);
                }

                if mt.mutbl == hir::MutImmutable && !interior_unsafe {
                    arg.attrs.set(llvm::Attribute::ReadOnly);
                }

                // When a reference in an argument has no named lifetime, it's
                // impossible for that reference to escape this function
                // (returned or stored beyond the call by a closure).
                if let ReLateBound(_, BrAnon(_)) = *b {
                    arg.attrs.set(llvm::Attribute::NoCapture);
                }

                Some(mt.ty)
            }
            _ => None
        };

408
        for ty in inputs.iter().chain(extra_args.iter()) {
409
            let mut arg = arg_of(ty, false);
410

411
            if type_is_fat_ptr(ccx.tcx(), ty) {
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
                let original_tys = arg.original_ty.field_types();
                let sizing_tys = arg.ty.field_types();
                assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));

                let mut data = ArgType::new(original_tys[0], sizing_tys[0]);
                let mut info = ArgType::new(original_tys[1], sizing_tys[1]);

                if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
                    data.attrs.set(llvm::Attribute::NonNull);
                    if ccx.tcx().struct_tail(inner).is_trait() {
                        info.attrs.set(llvm::Attribute::NonNull);
                    }
                }
                args.push(data);
                args.push(info);
427
            } else {
428 429
                if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
                    let llty = type_of::sizing_type_of(ccx, inner);
430
                    let llsz = llsize_of_alloc(ccx, llty);
431 432
                    arg.attrs.set_dereferenceable(llsz);
                }
433
                args.push(arg);
434 435 436
            }
        }

437
        FnType {
438
            args: args,
439
            ret: ret,
440 441
            variadic: sig.variadic,
            cconv: cconv
442 443
        }
    }
444

445 446 447 448 449 450
    pub fn adjust_for_abi<'a, 'tcx>(&mut self,
                                    ccx: &CrateContext<'a, 'tcx>,
                                    abi: Abi,
                                    sig: &ty::FnSig<'tcx>) {
        if abi == Abi::Rust || abi == Abi::RustCall ||
           abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
451
            let fixup = |arg: &mut ArgType| {
452 453 454 455 456 457 458 459 460 461 462 463
                let mut llty = arg.ty;

                // Replace newtypes with their inner-most type.
                while llty.kind() == llvm::TypeKind::Struct {
                    let inner = llty.field_types();
                    if inner.len() != 1 {
                        break;
                    }
                    llty = inner[0];
                }

                if !llty.is_aggregate() {
464
                    // Scalars and vectors, always immediate.
465 466 467 468
                    if llty != arg.ty {
                        // Needs a cast as we've unpacked a newtype.
                        arg.cast = Some(llty);
                    }
469 470
                    return;
                }
471

472 473
                let size = llsize_of_alloc(ccx, llty);
                if size > llsize_of_alloc(ccx, ccx.int_type()) {
474
                    arg.make_indirect(ccx);
475 476 477 478 479 480 481
                } else if size > 0 {
                    // We want to pass small aggregates as immediates, but using
                    // a LLVM aggregate type for this leads to bad optimizations,
                    // so we pick an appropriately sized integer type instead.
                    arg.cast = Some(Type::ix(ccx, size * 8));
                }
            };
482 483
            // Fat pointers are returned by-value.
            if !self.ret.is_ignore() {
484
                if !type_is_fat_ptr(ccx.tcx(), sig.output) {
485
                    fixup(&mut self.ret);
486
                }
487
            }
488 489
            for arg in &mut self.args {
                if arg.is_ignore() { continue; }
490 491
                fixup(arg);
            }
492 493
            if self.ret.is_indirect() {
                self.ret.attrs.set(llvm::Attribute::StructRet);
494
            }
495
            return;
496 497
        }

498
        match &ccx.sess().target.target.arch[..] {
499
            "x86" => cabi_x86::compute_abi_info(ccx, self),
500 501 502
            "x86_64" => if abi == Abi::SysV64 {
                cabi_x86_64::compute_abi_info(ccx, self);
            } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
503
                cabi_x86_win64::compute_abi_info(ccx, self);
504
            } else {
505
                cabi_x86_64::compute_abi_info(ccx, self);
506
            },
507
            "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
508 509 510 511 512 513
            "arm" => {
                let flavor = if ccx.sess().target.target.target_os == "ios" {
                    cabi_arm::Flavor::Ios
                } else {
                    cabi_arm::Flavor::General
                };
514
                cabi_arm::compute_abi_info(ccx, self, flavor);
515
            },
516
            "mips" => cabi_mips::compute_abi_info(ccx, self),
517
            "mips64" => cabi_mips64::compute_abi_info(ccx, self),
518 519
            "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
            "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
U
Ulrich Weigand 已提交
520
            "s390x" => cabi_s390x::compute_abi_info(ccx, self),
521
            "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
B
Brian Anderson 已提交
522
            "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
523 524 525
            a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
        }

526 527
        if self.ret.is_indirect() {
            self.ret.attrs.set(llvm::Attribute::StructRet);
528
        }
529 530
    }

531
    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
532 533
        let mut llargument_tys = Vec::new();

534 535 536
        let llreturn_ty = if self.ret.is_ignore() {
            Type::void(ccx)
        } else if self.ret.is_indirect() {
537
            llargument_tys.push(self.ret.original_ty.ptr_to());
538
            Type::void(ccx)
539
        } else {
540
            self.ret.cast.unwrap_or(self.ret.original_ty)
541 542 543 544 545 546 547 548 549 550 551 552
        };

        for arg in &self.args {
            if arg.is_ignore() {
                continue;
            }
            // add padding
            if let Some(ty) = arg.pad {
                llargument_tys.push(ty);
            }

            let llarg_ty = if arg.is_indirect() {
553
                arg.original_ty.ptr_to()
V
Valerii Hiora 已提交
554
            } else {
555
                arg.cast.unwrap_or(arg.original_ty)
V
Valerii Hiora 已提交
556
            };
557 558 559 560 561 562 563 564 565 566 567

            llargument_tys.push(llarg_ty);
        }

        if self.variadic {
            Type::variadic_func(&llargument_tys, &llreturn_ty)
        } else {
            Type::func(&llargument_tys, &llreturn_ty)
        }
    }

568 569
    pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
570
        if !self.ret.is_ignore() {
A
Ariel Ben-Yehuda 已提交
571
            self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
572
        }
573 574 575 576
        i += 1;
        for arg in &self.args {
            if !arg.is_ignore() {
                if arg.pad.is_some() { i += 1; }
A
Ariel Ben-Yehuda 已提交
577
                arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
578 579 580 581 582 583
                i += 1;
            }
        }
    }

    pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
584
        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
585
        if !self.ret.is_ignore() {
A
Ariel Ben-Yehuda 已提交
586
            self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
587
        }
588 589
        i += 1;
        for arg in &self.args {
590 591
            if !arg.is_ignore() {
                if arg.pad.is_some() { i += 1; }
A
Ariel Ben-Yehuda 已提交
592
                arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
593
                i += 1;
594 595
            }
        }
596 597 598 599

        if self.cconv != llvm::CCallConv {
            llvm::SetInstructionCallConv(callsite, self.cconv);
        }
600 601
    }
}
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

pub fn align_up_to(off: usize, a: usize) -> usize {
    return (off + a - 1) / a * a;
}

fn align(off: usize, ty: Type, pointer: usize) -> usize {
    let a = ty_align(ty, pointer);
    return align_up_to(off, a);
}

pub fn ty_align(ty: Type, pointer: usize) -> usize {
    match ty.kind() {
        Integer => ((ty.int_width() as usize) + 7) / 8,
        Pointer => pointer,
        Float => 4,
        Double => 8,
        Struct => {
            if ty.is_packed() {
                1
            } else {
                let str_tys = ty.field_types();
                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t, pointer)))
            }
        }
        Array => {
            let elt = ty.element_type();
            ty_align(elt, pointer)
        }
        Vector => {
            let len = ty.vector_length();
            let elt = ty.element_type();
            ty_align(elt, pointer) * len
        }
        _ => bug!("ty_align: unhandled type")
    }
}

pub fn ty_size(ty: Type, pointer: usize) -> usize {
    match ty.kind() {
        Integer => ((ty.int_width() as usize) + 7) / 8,
        Pointer => pointer,
        Float => 4,
        Double => 8,
        Struct => {
            if ty.is_packed() {
                let str_tys = ty.field_types();
                str_tys.iter().fold(0, |s, t| s + ty_size(*t, pointer))
            } else {
                let str_tys = ty.field_types();
                let size = str_tys.iter().fold(0, |s, t| {
                    align(s, *t, pointer) + ty_size(*t, pointer)
                });
                align(size, ty, pointer)
            }
        }
        Array => {
            let len = ty.array_length();
            let elt = ty.element_type();
            let eltsz = ty_size(elt, pointer);
            len * eltsz
        }
        Vector => {
            let len = ty.vector_length();
            let elt = ty.element_type();
            let eltsz = ty_size(elt, pointer);
            len * eltsz
        },
        _ => bug!("ty_size: unhandled type")
    }
}