trans.rs 197.4 KB
Newer Older
1
import std._int;
2
import std._str;
3
import std._uint;
4 5 6
import std._vec;
import std._str.rustrt.sbuf;
import std._vec.rustrt.vbuf;
7
import std.map;
8
import std.map.hashmap;
9 10 11
import std.option;
import std.option.some;
import std.option.none;
12

13
import front.ast;
14
import driver.session;
15
import middle.ty;
16
import back.x86;
17 18
import back.abi;

19
import middle.ty.pat_ty;
20
import middle.ty.plain_ty;
P
Patrick Walton 已提交
21

22
import util.common;
23
import util.common.append;
24
import util.common.istr;
25
import util.common.new_def_hash;
26
import util.common.new_str_hash;
27 28 29

import lib.llvm.llvm;
import lib.llvm.builder;
30
import lib.llvm.target_data;
31
import lib.llvm.type_handle;
32
import lib.llvm.type_names;
33
import lib.llvm.mk_pass_manager;
34
import lib.llvm.mk_target_data;
35
import lib.llvm.mk_type_handle;
36
import lib.llvm.mk_type_names;
37 38 39
import lib.llvm.llvm.ModuleRef;
import lib.llvm.llvm.ValueRef;
import lib.llvm.llvm.TypeRef;
40
import lib.llvm.llvm.TypeHandleRef;
41 42
import lib.llvm.llvm.BuilderRef;
import lib.llvm.llvm.BasicBlockRef;
43

44 45
import lib.llvm.False;
import lib.llvm.True;
46

47 48 49 50 51 52 53
state obj namegen(mutable int i) {
    fn next(str prefix) -> str {
        i += 1;
        ret prefix + istr(i);
    }
}

54 55
type glue_fns = rec(ValueRef activate_glue,
                    ValueRef yield_glue,
56
                    ValueRef exit_task_glue,
57
                    vec[ValueRef] upcall_glues,
58
                    ValueRef no_op_type_glue,
59
                    ValueRef memcpy_glue,
60
                    ValueRef bzero_glue,
61
                    ValueRef vec_append_glue);
62

63 64 65 66
type tydesc_info = rec(ValueRef tydesc,
                       ValueRef take_glue,
                       ValueRef drop_glue);

67
state type crate_ctxt = rec(session.session sess,
68
                            ModuleRef llmod,
69
                            target_data td,
70
                            type_names tn,
71
                            ValueRef crate_ptr,
72
                            hashmap[str, ValueRef] upcalls,
73
                            hashmap[str, ValueRef] intrinsics,
74 75
                            hashmap[str, ValueRef] item_names,
                            hashmap[ast.def_id, ValueRef] item_ids,
76
                            hashmap[ast.def_id, @ast.item] items,
G
Graydon Hoare 已提交
77 78
                            hashmap[ast.def_id,
                                    @ast.native_item] native_items,
79 80
                            // TODO: hashmap[tup(tag_id,subtys), @tag_info]
                            hashmap[@ty.t, uint] tag_sizes,
81
                            hashmap[ast.def_id, ValueRef] discrims,
82
                            hashmap[ast.def_id, ValueRef] fn_pairs,
83
                            hashmap[ast.def_id, ValueRef] consts,
84
                            hashmap[ast.def_id,()] obj_methods,
85
                            hashmap[@ty.t, @tydesc_info] tydescs,
86
                            vec[ast.ty_param] obj_typarams,
87
                            vec[ast.obj_field] obj_fields,
88 89 90
                            @glue_fns glues,
                            namegen names,
                            str path);
91

92 93
state type fn_ctxt = rec(ValueRef llfn,
                         ValueRef lltaskptr,
94 95
                         ValueRef llenv,
                         ValueRef llretptr,
96
                         mutable option.t[ValueRef] llself,
97
                         mutable option.t[ValueRef] lliterbody,
98
                         hashmap[ast.def_id, ValueRef] llargs,
99
                         hashmap[ast.def_id, ValueRef] llobjfields,
100
                         hashmap[ast.def_id, ValueRef] lllocals,
101
                         hashmap[ast.def_id, ValueRef] lltydescs,
102
                         @crate_ctxt ccx);
103

104
tag cleanup {
105
    clean(fn(@block_ctxt cx) -> result);
106 107
}

108 109 110 111 112 113

tag block_kind {
    SCOPE_BLOCK;
    NON_SCOPE_BLOCK;
}

114 115
state type block_ctxt = rec(BasicBlockRef llbb,
                            builder build,
116
                            block_parent parent,
117
                            block_kind kind,
118 119 120
                            mutable vec[cleanup] cleanups,
                            @fn_ctxt fcx);

121 122 123 124 125 126 127 128
// FIXME: we should be able to use option.t[@block_parent] here but
// the infinite-tag check in rustboot gets upset.

tag block_parent {
    parent_none;
    parent_some(@block_ctxt);
}

129

130 131 132
state type result = rec(mutable @block_ctxt bcx,
                        mutable ValueRef val);

133 134 135 136
fn sep() -> str {
    ret "_";
}

137 138 139 140 141
fn res(@block_ctxt bcx, ValueRef val) -> result {
    ret rec(mutable bcx = bcx,
            mutable val = val);
}

142 143
fn ty_str(type_names tn, TypeRef t) -> str {
    ret lib.llvm.type_to_str(tn, t);
144 145 146 147 148 149
}

fn val_ty(ValueRef v) -> TypeRef {
    ret llvm.LLVMTypeOf(v);
}

150 151
fn val_str(type_names tn, ValueRef v) -> str {
    ret ty_str(tn, val_ty(v));
152
}
153 154 155 156


// LLVM type constructors.

157 158 159 160 161 162 163 164 165 166 167
fn T_void() -> TypeRef {
    // Note: For the time being llvm is kinda busted here, it has the notion
    // of a 'void' type that can only occur as part of the signature of a
    // function, but no general unit type of 0-sized value. This is, afaict,
    // vestigial from its C heritage, and we'll be attempting to submit a
    // patch upstream to fix it. In the mean time we only model function
    // outputs (Rust functions and C functions) using T_void, and model the
    // Rust general purpose nil type you can construct as 1-bit (always
    // zero). This makes the result incorrect for now -- things like a tuple
    // of 10 nil values will have 10-bit size -- but it doesn't seem like we
    // have any other options until it's fixed upstream.
168 169 170
    ret llvm.LLVMVoidType();
}

171 172 173 174 175
fn T_nil() -> TypeRef {
    // NB: See above in T_void().
    ret llvm.LLVMInt1Type();
}

176 177 178 179
fn T_i1() -> TypeRef {
    ret llvm.LLVMInt1Type();
}

180 181 182 183 184 185 186 187 188
fn T_i8() -> TypeRef {
    ret llvm.LLVMInt8Type();
}

fn T_i16() -> TypeRef {
    ret llvm.LLVMInt16Type();
}

fn T_i32() -> TypeRef {
189 190 191
    ret llvm.LLVMInt32Type();
}

192 193 194 195
fn T_i64() -> TypeRef {
    ret llvm.LLVMInt64Type();
}

196 197 198 199 200 201 202 203
fn T_f32() -> TypeRef {
    ret llvm.LLVMFloatType();
}

fn T_f64() -> TypeRef {
    ret llvm.LLVMDoubleType();
}

204 205 206 207
fn T_bool() -> TypeRef {
    ret T_i1();
}

208 209 210 211 212
fn T_int() -> TypeRef {
    // FIXME: switch on target type.
    ret T_i32();
}

213 214 215 216
fn T_char() -> TypeRef {
    ret T_i32();
}

217 218 219 220
fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
    ret llvm.LLVMFunctionType(output,
                              _vec.buf[TypeRef](inputs),
                              _vec.len[TypeRef](inputs),
221 222 223
                              False);
}

224
fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
225
    ret T_struct(vec(T_ptr(tfn),
226
                     T_opaque_closure_ptr(tn)));
227 228
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242
fn T_ptr(TypeRef t) -> TypeRef {
    ret llvm.LLVMPointerType(t, 0u);
}

fn T_struct(vec[TypeRef] elts) -> TypeRef {
    ret llvm.LLVMStructType(_vec.buf[TypeRef](elts),
                            _vec.len[TypeRef](elts),
                            False);
}

fn T_opaque() -> TypeRef {
    ret llvm.LLVMOpaqueType();
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
fn T_task(type_names tn) -> TypeRef {
    auto s = "task";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

    auto t = T_struct(vec(T_int(),      // Refcount
                          T_int(),      // Delegate pointer
                          T_int(),      // Stack segment pointer
                          T_int(),      // Runtime SP
                          T_int(),      // Rust SP
                          T_int(),      // GC chain
                          T_int(),      // Domain pointer
                          T_int()       // Crate cache pointer
                          ));
    tn.associate(s, t);
    ret t;
260 261
}

262 263 264 265 266 267
fn T_glue_fn(type_names tn) -> TypeRef {
    auto s = "glue_fn";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

268 269
    // Bit of a kludge: pick the fn typeref out of the tydesc..
    let vec[TypeRef] tydesc_elts = _vec.init_elt[TypeRef](T_nil(), 10u);
270
    llvm.LLVMGetStructElementTypes(T_tydesc(tn),
271
                                   _vec.buf[TypeRef](tydesc_elts));
272 273 274 275 276
    auto t =
        llvm.LLVMGetElementType
        (tydesc_elts.(abi.tydesc_field_drop_glue_off));
    tn.associate(s, t);
    ret t;
277 278
}

279 280 281 282 283 284
fn T_tydesc(type_names tn) -> TypeRef {

    auto s = "tydesc";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
285 286 287

    auto th = mk_type_handle();
    auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
288
    auto tydescpp = T_ptr(T_ptr(abs_tydesc));
289
    auto pvoid = T_ptr(T_i8());
290
    auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
291
                                     T_taskptr(tn),
292
                                     T_ptr(T_nil()),
293
                                     tydescpp,
294
                                     pvoid), T_void()));
295
    auto tydesc = T_struct(vec(tydescpp,          // first_param
296 297 298 299 300 301 302 303 304 305 306
                               T_int(),           // size
                               T_int(),           // align
                               glue_fn_ty,        // take_glue_off
                               glue_fn_ty,        // drop_glue_off
                               glue_fn_ty,        // free_glue_off
                               glue_fn_ty,        // sever_glue_off
                               glue_fn_ty,        // mark_glue_off
                               glue_fn_ty,        // obj_drop_glue_off
                               glue_fn_ty));      // is_stateful

    llvm.LLVMRefineType(abs_tydesc, tydesc);
307 308 309
    auto t = llvm.LLVMResolveTypeHandle(th.llth);
    tn.associate(s, t);
    ret t;
310 311
}

312 313 314 315
fn T_array(TypeRef t, uint n) -> TypeRef {
    ret llvm.LLVMArrayType(t, n);
}

316 317 318 319 320
fn T_vec(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int(),       // Refcount
                     T_int(),       // Alloc
                     T_int(),       // Fill
                     T_array(t, 0u) // Body elements
321 322 323
                     ));
}

324 325 326 327
fn T_opaque_vec_ptr() -> TypeRef {
    ret T_ptr(T_vec(T_int()));
}

328 329
fn T_str() -> TypeRef {
    ret T_vec(T_i8());
330 331
}

332 333 334 335
fn T_box(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int(), t));
}

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
fn T_crate(type_names tn) -> TypeRef {
    auto s = "crate";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

    auto t = T_struct(vec(T_int(),      // ptrdiff_t image_base_off
                          T_int(),      // uintptr_t self_addr
                          T_int(),      // ptrdiff_t debug_abbrev_off
                          T_int(),      // size_t debug_abbrev_sz
                          T_int(),      // ptrdiff_t debug_info_off
                          T_int(),      // size_t debug_info_sz
                          T_int(),      // size_t activate_glue_off
                          T_int(),      // size_t yield_glue_off
                          T_int(),      // size_t unwind_glue_off
                          T_int(),      // size_t gc_glue_off
                          T_int(),      // size_t main_exit_task_glue_off
                          T_int(),      // int n_rust_syms
                          T_int(),      // int n_c_syms
355 356
                          T_int(),      // int n_libs
                          T_int()       // uintptr_t abi_tag
357 358 359
                          ));
    tn.associate(s, t);
    ret t;
360 361
}

362 363 364 365
fn T_double() -> TypeRef {
    ret llvm.LLVMDoubleType();
}

366 367
fn T_taskptr(type_names tn) -> TypeRef {
    ret T_ptr(T_task(tn));
368 369
}

370 371
// This type must never be used directly; it must always be cast away.
fn T_typaram(type_names tn) -> TypeRef {
372 373 374 375 376
    auto s = "typaram";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

377
    auto t = T_i8();
378 379
    tn.associate(s, t);
    ret t;
380 381
}

382 383 384 385
fn T_typaram_ptr(type_names tn) -> TypeRef {
    ret T_ptr(T_typaram(tn));
}

386 387
fn T_closure_ptr(type_names tn,
                 TypeRef lltarget_ty,
388 389
                 TypeRef llbindings_ty,
                 uint n_ty_params) -> TypeRef {
390 391 392 393

    // NB: keep this in sync with code in trans_bind; we're making
    // an LLVM typeref structure that has the same "shape" as the ty.t
    // it constructs.
394
    ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
395
                                 lltarget_ty,
396 397
                                 llbindings_ty,
                                 T_captured_tydescs(tn, n_ty_params))
398 399 400
                             )));
}

401 402 403 404 405 406 407
fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
    auto s = "*closure";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
    auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
                                            T_ptr(T_nil()))),
408 409
                           T_nil(),
                           0u);
410 411
    tn.associate(s, t);
    ret t;
412 413
}

414 415 416 417 418 419 420 421 422 423 424 425
fn T_tag(type_names tn, uint size) -> TypeRef {
    auto s = "tag_" + _uint.to_str(size, 10u);
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
    auto t = T_struct(vec(T_int(), T_array(T_i8(), size)));
    tn.associate(s, t);
    ret t;
}

fn T_opaque_tag(type_names tn) -> TypeRef {
    auto s = "tag";
426 427 428
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
429
    auto t = T_struct(vec(T_int(), T_i8()));
430 431 432 433
    tn.associate(s, t);
    ret t;
}

434 435 436 437
fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
    ret T_ptr(T_opaque_tag(tn));
}

438 439
fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
    ret T_struct(_vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
440 441
}

442 443 444 445 446 447 448
fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
    // This function is not publicly exposed because it returns an incomplete
    // type. The dynamically-sized fields follow the captured tydescs.
    fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
        ret T_struct(vec(T_ptr(T_tydesc(tn)),
                         T_captured_tydescs(tn, n_captured_tydescs)));
    }
449

450
    ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
451 452
}

453
fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
454
    ret T_obj_ptr(tn, 0u);
455 456
}

457

458 459 460 461
// This function now fails if called on a type with dynamic size (as its
// return value was always meaningless in that case anyhow). Beware!
//
// TODO: Enforce via a predicate.
462
fn type_of(@crate_ctxt cx, @ty.t t) -> TypeRef {
463 464 465 466 467 468
    if (ty.type_has_dynamic_size(t)) {
        log "type_of() called on a type with dynamic size: " +
            ty.ty_to_str(t);
        fail;
    }

469
    ret type_of_inner(cx, t, false);
470 471
}

472 473 474 475 476 477
fn type_of_explicit_args(@crate_ctxt cx,
                     vec[ty.arg] inputs) -> vec[TypeRef] {
    let vec[TypeRef] atys = vec();
    for (ty.arg arg in inputs) {
        if (ty.type_has_dynamic_size(arg.ty)) {
            check (arg.mode == ast.alias);
478
            atys += T_typaram_ptr(cx.tn);
479
        } else {
480
            let TypeRef t;
481 482
            alt (arg.mode) {
                case (ast.alias) {
483 484 485 486
                    t = T_ptr(type_of_inner(cx, arg.ty, true));
                }
                case (_) {
                    t = type_of_inner(cx, arg.ty, false);
487 488 489 490 491 492 493
                }
            }
            atys += t;
        }
    }
    ret atys;
}
494 495 496 497 498 499 500 501

// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

502
fn type_of_fn_full(@crate_ctxt cx,
503
                   ast.proto proto,
504 505
                   option.t[TypeRef] obj_self,
                   vec[ty.arg] inputs,
506 507
                   @ty.t output,
                   uint ty_param_count) -> TypeRef {
508
    let vec[TypeRef] atys = vec();
509

510
    // Arg 0: Output pointer.
511
    if (ty.type_has_dynamic_size(output)) {
512
        atys += T_typaram_ptr(cx.tn);
513
    } else {
514
        atys += T_ptr(type_of_inner(cx, output, false));
515 516
    }

517
    // Arg 1: Task pointer.
518
    atys += T_taskptr(cx.tn);
519 520

    // Arg 2: Env (closure-bindings / self-obj)
521 522 523 524 525
    alt (obj_self) {
        case (some[TypeRef](?t)) {
            check (t as int != 0);
            atys += t;
        }
526
        case (_) {
527
            atys += T_opaque_closure_ptr(cx.tn);
528
        }
529 530
    }

531 532 533 534
    // Args >3: ty params, if not acquired via capture...
    if (obj_self == none[TypeRef]) {
        auto i = 0u;
        while (i < ty_param_count) {
535
            atys += T_ptr(T_tydesc(cx.tn));
536 537
            i += 1u;
        }
538 539
    }

540
    if (proto == ast.proto_iter) {
541 542 543
        // If it's an iter, the 'output' type of the iter is actually the
        // *input* type of the function we're given as our iter-block
        // argument.
544
        atys += T_fn_pair(cx.tn,
545
                          type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
546
                                          vec(rec(mode=ast.val, ty=output)),
547
                                          plain_ty(ty.ty_nil), 0u));
548 549
    }

550
    // ... then explicit args.
551
    atys += type_of_explicit_args(cx, inputs);
552

553
    ret T_fn(atys, llvm.LLVMVoidType());
554 555
}

556
fn type_of_fn(@crate_ctxt cx,
557
              ast.proto proto,
558 559 560 561 562
              vec[ty.arg] inputs,
              @ty.t output,
              uint ty_param_count) -> TypeRef {
    ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output,
                        ty_param_count);
563 564
}

565 566
fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
                     vec[ty.arg] inputs,
567
                     @ty.t output) -> TypeRef {
568 569 570 571 572 573 574 575 576 577 578 579
    let vec[TypeRef] atys = vec();
    if (abi == ast.native_abi_rust) {
        atys += T_taskptr(cx.tn);
        auto t = ty.ty_native_fn(abi, inputs, output);
        auto ty_param_count = ty.count_ty_params(plain_ty(t));
        auto i = 0u;
        while (i < ty_param_count) {
            atys += T_ptr(T_tydesc(cx.tn));
            i += 1u;
        }
    }
    atys += type_of_explicit_args(cx, inputs);
580
    ret T_fn(atys, type_of_inner(cx, output, false));
581 582
}

583
fn type_of_inner(@crate_ctxt cx, @ty.t t, bool boxed) -> TypeRef {
584 585
    let TypeRef llty = 0 as TypeRef;

586
    alt (t.struct) {
587 588 589 590 591
        case (ty.ty_native) { llty = T_ptr(T_i8()); }
        case (ty.ty_nil) { llty = T_nil(); }
        case (ty.ty_bool) { llty = T_bool(); }
        case (ty.ty_int) { llty = T_int(); }
        case (ty.ty_uint) { llty = T_int(); }
592
        case (ty.ty_machine(?tm)) {
593
            alt (tm) {
594 595 596 597 598 599 600 601 602 603
                case (common.ty_i8) { llty = T_i8(); }
                case (common.ty_u8) { llty = T_i8(); }
                case (common.ty_i16) { llty = T_i16(); }
                case (common.ty_u16) { llty = T_i16(); }
                case (common.ty_i32) { llty = T_i32(); }
                case (common.ty_u32) { llty = T_i32(); }
                case (common.ty_i64) { llty = T_i64(); }
                case (common.ty_u64) { llty = T_i64(); }
                case (common.ty_f32) { llty = T_f32(); }
                case (common.ty_f64) { llty = T_f64(); }
604 605
            }
        }
606 607
        case (ty.ty_char) { llty = T_char(); }
        case (ty.ty_str) { llty = T_ptr(T_str()); }
608 609 610 611 612 613 614
        case (ty.ty_tag(_, _)) {
            if (boxed) {
                llty = T_opaque_tag(cx.tn);
            } else {
                auto size = static_size_of_tag(cx, t);
                llty = T_tag(cx.tn, size);
            }
615
        }
616
        case (ty.ty_box(?t)) {
617
            llty = T_ptr(T_box(type_of_inner(cx, t, true)));
618
        }
619
        case (ty.ty_vec(?t)) {
620
            llty = T_ptr(T_vec(type_of_inner(cx, t, true)));
621
        }
622
        case (ty.ty_tup(?elts)) {
623
            let vec[TypeRef] tys = vec();
624
            for (@ty.t elt in elts) {
625
                tys += type_of_inner(cx, elt, boxed);
626
            }
627
            llty = T_struct(tys);
628
        }
629
        case (ty.ty_rec(?fields)) {
630
            let vec[TypeRef] tys = vec();
631
            for (ty.field f in fields) {
632
                tys += type_of_inner(cx, f.ty, boxed);
633
            }
634
            llty = T_struct(tys);
635
        }
636
        case (ty.ty_fn(?proto, ?args, ?out)) {
637
            llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out, 0u));
638
        }
639
        case (ty.ty_native_fn(?abi, ?args, ?out)) {
640
            llty = T_fn_pair(cx.tn, type_of_native_fn(cx, abi, args, out));
641
        }
642
        case (ty.ty_obj(?meths)) {
643 644 645
            auto th = mk_type_handle();
            auto self_ty = llvm.LLVMResolveTypeHandle(th.llth);

646
            let vec[TypeRef] mtys = vec();
647
            for (ty.method m in meths) {
648
                let TypeRef mty =
649
                    type_of_fn_full(cx, m.proto,
650
                                    some[TypeRef](self_ty),
651
                                    m.inputs, m.output, 0u);
652
                mtys += T_ptr(mty);
653
            }
654
            let TypeRef vtbl = T_struct(mtys);
655
            let TypeRef pair = T_struct(vec(T_ptr(vtbl),
656
                                            T_opaque_obj_ptr(cx.tn)));
657

658 659 660
            auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
            llvm.LLVMRefineType(abs_pair, pair);
            abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
661
            llty = abs_pair;
662
        }
663
        case (ty.ty_var(_)) {
664
            log "ty_var in trans.type_of";
665 666
            fail;
        }
667
        case (ty.ty_param(_)) {
668
            llty = T_i8();
669
        }
670
        case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
671
    }
672 673 674 675

    check (llty as int != 0);
    llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
    ret llty;
676 677
}

678
fn type_of_arg(@crate_ctxt cx, &ty.arg arg) -> TypeRef {
679 680 681 682 683 684 685 686 687 688 689
    alt (arg.ty.struct) {
        case (ty.ty_param(_)) {
            if (arg.mode == ast.alias) {
                ret T_typaram_ptr(cx.tn);
            }
        }
        case (_) {
            // fall through
        }
    }

690
    auto typ;
691
    if (arg.mode == ast.alias) {
692 693 694
        typ = T_ptr(type_of_inner(cx, arg.ty, true));
    } else {
        typ = type_of_inner(cx, arg.ty, false);
695
    }
696
    ret typ;
697 698
}

699 700 701 702 703 704 705 706 707
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!

fn sanitize(str s) -> str {
    auto result = "";
    for (u8 c in s) {
        if (c == ('@' as u8)) {
            result += "boxed_";
        } else {
708 709 710 711 712 713 714
            if (c == (',' as u8)) {
                result += "_";
            } else {
                if (c == ('{' as u8) || c == ('(' as u8)) {
                    result += "_of_";
                } else {
                    if (c != 10u8 && c != ('}' as u8) && c != (')' as u8) &&
G
Graydon Hoare 已提交
715 716
                        c != (' ' as u8) && c != ('\t' as u8) &&
                        c != (';' as u8)) {
717 718 719 720 721
                        auto v = vec(c);
                        result += _str.from_bytes(v);
                    }
                }
            }
722 723 724 725 726
        }
    }
    ret result;
}

727 728 729 730 731 732
// LLVM constant constructors.

fn C_null(TypeRef t) -> ValueRef {
    ret llvm.LLVMConstNull(t);
}

733
fn C_integral(int i, TypeRef t) -> ValueRef {
734 735 736 737 738 739
    // FIXME. We can't use LLVM.ULongLong with our existing minimal native
    // API, which only knows word-sized args.  Lucky for us LLVM has a "take a
    // string encoding" version.  Hilarious. Please fix to handle:
    //
    // ret llvm.LLVMConstInt(T_int(), t as LLVM.ULongLong, False);
    //
740 741 742
    ret llvm.LLVMConstIntOfString(t, _str.buf(istr(i)), 10);
}

743 744 745 746 747
fn C_nil() -> ValueRef {
    // NB: See comment above in T_void().
    ret C_integral(0, T_i1());
}

748 749
fn C_bool(bool b) -> ValueRef {
    if (b) {
750
        ret C_integral(1, T_bool());
751
    } else {
752
        ret C_integral(0, T_bool());
753 754 755
    }
}

756 757
fn C_int(int i) -> ValueRef {
    ret C_integral(i, T_int());
758 759
}

760 761 762
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
fn C_cstr(@crate_ctxt cx, str s) -> ValueRef {
763
    auto sc = llvm.LLVMConstString(_str.buf(s), _str.byte_len(s), False);
764
    auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(sc),
765 766
                                _str.buf(cx.names.next("str")));
    llvm.LLVMSetInitializer(g, sc);
767
    llvm.LLVMSetGlobalConstant(g, True);
768 769
    llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
                        as llvm.Linkage);
770
    ret g;
771 772
}

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
// A rust boxed-and-length-annotated string.
fn C_str(@crate_ctxt cx, str s) -> ValueRef {
    auto len = _str.byte_len(s);
    auto box = C_struct(vec(C_int(abi.const_refcount as int),
                            C_int(len + 1u as int), // 'alloc'
                            C_int(len + 1u as int), // 'fill'
                            llvm.LLVMConstString(_str.buf(s),
                                                 len, False)));
    auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(box),
                                _str.buf(cx.names.next("str")));
    llvm.LLVMSetInitializer(g, box);
    llvm.LLVMSetGlobalConstant(g, True);
    llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
                        as llvm.Linkage);
    ret llvm.LLVMConstPointerCast(g, T_ptr(T_str()));
}

790 791 792 793 794 795 796 797 798 799 800
fn C_zero_byte_arr(uint size) -> ValueRef {
    auto i = 0u;
    let vec[ValueRef] elts = vec();
    while (i < size) {
        elts += vec(C_integral(0, T_i8()));
        i += 1u;
    }
    ret llvm.LLVMConstArray(T_i8(), _vec.buf[ValueRef](elts),
                            _vec.len[ValueRef](elts));
}

801 802 803 804 805 806
fn C_struct(vec[ValueRef] elts) -> ValueRef {
    ret llvm.LLVMConstStruct(_vec.buf[ValueRef](elts),
                             _vec.len[ValueRef](elts),
                             False);
}

807
fn decl_fn(ModuleRef llmod, str name, uint cc, TypeRef llty) -> ValueRef {
808 809
    let ValueRef llfn =
        llvm.LLVMAddFunction(llmod, _str.buf(name), llty);
810
    llvm.LLVMSetFunctionCallConv(llfn, cc);
811 812 813
    ret llfn;
}

814 815
fn decl_cdecl_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
    ret decl_fn(llmod, name, lib.llvm.LLVMCCallConv, llty);
816 817
}

818 819
fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
    ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
820 821
}

822 823
fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
    ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
824 825
}

826
fn decl_upcall_glue(ModuleRef llmod, type_names tn, uint _n) -> ValueRef {
827 828 829 830
    // It doesn't actually matter what type we come up with here, at the
    // moment, as we cast the upcall function pointers to int before passing
    // them to the indirect upcall-invocation glue.  But eventually we'd like
    // to call them directly, once we have a calling convention worked out.
831
    let int n = _n as int;
832
    let str s = abi.upcall_glue_name(n);
833
    let vec[TypeRef] args =
834
        vec(T_int(),     // callee
835
            T_int()) // taskptr
836 837
        + _vec.init_elt[TypeRef](T_int(), n as uint);

838
    ret decl_fastcall_fn(llmod, s, T_fn(args, T_int()));
839 840
}

841
fn get_upcall(@crate_ctxt cx, str name, int n_args) -> ValueRef {
842 843 844
    if (cx.upcalls.contains_key(name)) {
        ret cx.upcalls.get(name);
    }
845
    auto inputs = vec(T_taskptr(cx.tn));
846
    inputs += _vec.init_elt[TypeRef](T_int(), n_args as uint);
847
    auto output = T_int();
848
    auto f = decl_cdecl_fn(cx.llmod, name, T_fn(inputs, output));
849 850 851 852
    cx.upcalls.insert(name, f);
    ret f;
}

853
fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args) -> result {
854
    let int n = _vec.len[ValueRef](args) as int;
855
    let ValueRef llupcall = get_upcall(cx.fcx.ccx, name, n);
856 857
    llupcall = llvm.LLVMConstPointerCast(llupcall, T_int());

858
    let ValueRef llglue = cx.fcx.ccx.glues.upcall_glues.(n);
859 860
    let vec[ValueRef] call_args = vec(llupcall);
    call_args += cx.build.PtrToInt(cx.fcx.lltaskptr, T_int());
861

862 863 864
    for (ValueRef a in args) {
        call_args += cx.build.ZExtOrBitCast(a, T_int());
    }
865

866
    ret res(cx, cx.build.FastCall(llglue, call_args));
867 868
}

869
fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
870
    ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v),
871
                                            C_int(0)));
872 873
}

874
fn find_scope_cx(@block_ctxt cx) -> @block_ctxt {
875
    if (cx.kind == SCOPE_BLOCK) {
876 877 878 879 880 881 882 883 884 885 886 887
        ret cx;
    }
    alt (cx.parent) {
        case (parent_some(?b)) {
            be find_scope_cx(b);
        }
        case (parent_none) {
            fail;
        }
    }
}

888 889 890 891 892
fn umax(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
    auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
    ret cx.build.Select(cond, b, a);
}

893 894 895 896 897
fn umin(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
    auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
    ret cx.build.Select(cond, a, b);
}

898 899 900 901 902 903
fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
    auto mask = cx.build.Sub(align, C_int(1));
    auto bumped = cx.build.Add(off, mask);
    ret cx.build.And(bumped, cx.build.Not(mask));
}

904 905 906 907 908
// Returns the real size of the given type for the current target.
fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
    ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
}

909
fn llsize_of(TypeRef t) -> ValueRef {
910 911 912
    ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
}

913
fn llalign_of(TypeRef t) -> ValueRef {
914 915 916
    ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMAlignOf(t), T_int(), False);
}

917
fn size_of(@block_ctxt cx, @ty.t t) -> result {
918
    if (!ty.type_has_dynamic_size(t)) {
919
        ret res(cx, llsize_of(type_of(cx.fcx.ccx, t)));
920 921 922 923
    }
    ret dynamic_size_of(cx, t);
}

924
fn align_of(@block_ctxt cx, @ty.t t) -> result {
925
    if (!ty.type_has_dynamic_size(t)) {
926
        ret res(cx, llalign_of(type_of(cx.fcx.ccx, t)));
927 928 929 930
    }
    ret dynamic_align_of(cx, t);
}

931 932 933 934 935 936 937 938 939 940 941
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(@crate_ctxt cx, @ty.t t) -> uint {
    if (ty.type_has_dynamic_size(t)) {
        log "dynamically sized type passed to static_size_of_tag()";
        fail;
    }

    if (cx.tag_sizes.contains_key(t)) {
        ret cx.tag_sizes.get(t);
    }

B
Brian Anderson 已提交
942 943
    auto tid;
    let vec[@ty.t] subtys;
944 945 946 947 948 949 950 951 952 953 954
    alt (t.struct) {
        case (ty.ty_tag(?tid_, ?subtys_)) {
            tid = tid_;
            subtys = subtys_;
        }
        case (_) {
            log "non-tag passed to static_size_of_tag()";
            fail;
        }
    }

955
    // Pull the type parameters out of the corresponding tag item.
956
    let vec[ast.ty_param] ty_params = tag_ty_params(cx, tid);
957

958 959 960 961 962 963 964
    // Compute max(variant sizes).
    auto max_size = 0u;
    auto variants = tag_variants(cx, tid);
    for (ast.variant variant in variants) {
        let vec[@ty.t] tys = variant_types(cx, variant);
        auto tup_ty = ty.plain_ty(ty.ty_tup(tys));

965 966 967
        // Perform any type parameter substitutions.
        tup_ty = ty.substitute_ty_params(ty_params, subtys, tup_ty);

968 969 970 971 972 973 974 975 976 977 978 979
        // Here we possibly do a recursive call.
        auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));

        if (max_size < this_size) {
            max_size = this_size;
        }
    }

    cx.tag_sizes.insert(t, max_size);
    ret max_size;
}

980
fn dynamic_size_of(@block_ctxt cx, @ty.t t) -> result {
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
    fn align_elements(@block_ctxt cx, vec[@ty.t] elts) -> result {
        //
        // C padding rules:
        //
        //
        //   - Pad after each element so that next element is aligned.
        //   - Pad after final structure member so that whole structure
        //     is aligned to max alignment of interior.
        //
        auto off = C_int(0);
        auto max_align = C_int(1);
        auto bcx = cx;
        for (@ty.t e in elts) {
            auto elt_align = align_of(bcx, e);
            bcx = elt_align.bcx;
            auto elt_size = size_of(bcx, e);
            bcx = elt_size.bcx;
            auto aligned_off = align_to(bcx, off, elt_align.val);
            off = cx.build.Add(aligned_off, elt_size.val);
            max_align = umax(bcx, max_align, elt_align.val);
        }
        off = align_to(bcx, off, max_align);
        ret res(bcx, off);
    }

1006 1007 1008
    alt (t.struct) {
        case (ty.ty_param(?p)) {
            auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
1009
            ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
1010 1011
        }
        case (ty.ty_tup(?elts)) {
1012
            ret align_elements(cx, elts);
1013 1014
        }
        case (ty.ty_rec(?flds)) {
1015
            let vec[@ty.t] tys = vec();
1016
            for (ty.field f in flds) {
1017
                tys += vec(f.ty);
1018
            }
1019
            ret align_elements(cx, tys);
1020
        }
1021 1022 1023 1024 1025 1026 1027
        case (ty.ty_tag(?tid, ?tps)) {
            auto bcx = cx;

            // Compute max(variant sizes).
            let ValueRef max_size = bcx.build.Alloca(T_int());
            bcx.build.Store(C_int(0), max_size);

1028
            auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);
1029 1030
            auto variants = tag_variants(bcx.fcx.ccx, tid);
            for (ast.variant variant in variants) {
1031 1032 1033 1034 1035 1036 1037 1038
                // Perform type substitution on the raw variant types.
                let vec[@ty.t] raw_tys = variant_types(bcx.fcx.ccx, variant);
                let vec[@ty.t] tys = vec();
                for (@ty.t raw_ty in raw_tys) {
                    auto t = ty.substitute_ty_params(ty_params, tps, raw_ty);
                    tys += vec(t);
                }

1039 1040 1041 1042 1043 1044 1045 1046
                auto rslt = align_elements(bcx, tys);
                bcx = rslt.bcx;

                auto this_size = rslt.val;
                auto old_max_size = bcx.build.Load(max_size);
                bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
            }

1047 1048 1049
            auto max_size_val = bcx.build.Load(max_size);
            auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
            ret res(bcx, total_size);
1050
        }
1051 1052 1053
    }
}

1054
fn dynamic_align_of(@block_ctxt cx, @ty.t t) -> result {
1055 1056 1057
    alt (t.struct) {
        case (ty.ty_param(?p)) {
            auto aptr = field_of_tydesc(cx, t, abi.tydesc_field_align);
1058
            ret res(aptr.bcx, aptr.bcx.build.Load(aptr.val));
1059 1060 1061
        }
        case (ty.ty_tup(?elts)) {
            auto a = C_int(1);
1062
            auto bcx = cx;
1063
            for (@ty.t e in elts) {
1064 1065 1066
                auto align = align_of(bcx, e);
                bcx = align.bcx;
                a = umax(bcx, a, align.val);
1067
            }
1068
            ret res(bcx, a);
1069 1070 1071
        }
        case (ty.ty_rec(?flds)) {
            auto a = C_int(1);
1072
            auto bcx = cx;
1073
            for (ty.field f in flds) {
1074 1075 1076
                auto align = align_of(bcx, f.ty);
                bcx = align.bcx;
                a = umax(bcx, a, align.val);
1077
            }
1078
            ret res(bcx, a);
1079
        }
1080 1081 1082
        case (ty.ty_tag(_, _)) {
            ret res(cx, C_int(1)); // FIXME: stub
        }
1083 1084 1085
    }
}

1086
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
1087 1088 1089 1090
// tuple-like structure (tup, rec) with a static index. This one is driven off
// ty.struct and knows what to do when it runs into a ty_param stuck in the
// middle of the thing it's GEP'ing into. Much like size_of and align_of,
// above.
1091 1092

fn GEP_tup_like(@block_ctxt cx, @ty.t t,
1093
                ValueRef base, vec[int] ixs) -> result {
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103

    check (ty.type_is_tup_like(t));

    // It might be a static-known type. Handle this.

    if (! ty.type_has_dynamic_size(t)) {
        let vec[ValueRef] v = vec();
        for (int i in ixs) {
            v += C_int(i);
        }
1104
        ret res(cx, cx.build.GEP(base, v));
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
    }

    // It is a dynamic-containing type that, if we convert directly to an LLVM
    // TypeRef, will be all wrong; there's no proper LLVM type to represent
    // it, and the lowering function will stick in i8* values for each
    // ty_param, which is not right; the ty_params are all of some dynamic
    // size.
    //
    // What we must do instead is sadder. We must look through the indices
    // manually and split the input type into a prefix and a target. We then
    // measure the prefix size, bump the input pointer by that amount, and
    // cast to a pointer-to-target type.


    // Given a type, an index vector and an element number N in that vector,
    // calculate index X and the type that results by taking the first X-1
    // elements of the type and splitting the Xth off. Return the prefix as
    // well as the innermost Xth type.

    fn split_type(@ty.t t, vec[int] ixs, uint n)
        -> rec(vec[@ty.t] prefix, @ty.t target) {

        let uint len = _vec.len[int](ixs);

        // We don't support 0-index or 1-index GEPs. The former is nonsense
        // and the latter would only be meaningful if we supported non-0
        // values for the 0th index (we don't).

        check (len > 1u);

        if (n == 0u) {
            // Since we're starting from a value that's a pointer to a
            // *single* structure, the first index (in GEP-ese) should just be
            // 0, to yield the pointee.
            check (ixs.(n) == 0);
            ret split_type(t, ixs, n+1u);
        }

        check (n < len);

        let int ix = ixs.(n);
        let vec[@ty.t] prefix = vec();
        let int i = 0;
        while (i < ix) {
            append[@ty.t](prefix, ty.get_element_type(t, i as uint));
            i +=1 ;
        }

        auto selected = ty.get_element_type(t, i as uint);

        if (n == len-1u) {
            // We are at the innermost index.
            ret rec(prefix=prefix, target=selected);

        } else {
            // Not the innermost index; call self recursively to dig deeper.
            // Once we get an inner result, append it current prefix and
            // return to caller.
            auto inner = split_type(selected, ixs, n+1u);
            prefix += inner.prefix;
            ret rec(prefix=prefix with inner);
        }
    }

    // We make a fake prefix tuple-type here; luckily for measuring sizes
    // the tuple parens are associative so it doesn't matter that we've
    // flattened the incoming structure.

    auto s = split_type(t, ixs, 0u);
1174
    auto prefix_ty = plain_ty(ty.ty_tup(s.prefix));
1175 1176 1177 1178 1179
    auto bcx = cx;
    auto sz = size_of(bcx, prefix_ty);
    bcx = sz.bcx;
    auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
    auto bumped = bcx.build.GEP(raw, vec(sz.val));
1180 1181 1182

    if (ty.type_has_dynamic_size(s.target)) {
        ret res(bcx, bumped);
1183
    }
1184 1185 1186

    auto typ = T_ptr(type_of(bcx.fcx.ccx, s.target));
    ret res(bcx, bcx.build.PointerCast(bumped, typ));
1187 1188
}

1189 1190 1191 1192
// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
// This function uses GEP_tup_like() above and automatically performs casts as
// appropriate. @llblobptr is the data part of a tag value; its actual type is
// meaningless, as it will be cast away.
1193 1194 1195 1196 1197 1198
fn GEP_tag(@block_ctxt cx,
           ValueRef llblobptr,
           &ast.def_id tag_id,
           &ast.def_id variant_id,
           vec[@ty.t] ty_substs,
           int ix)
1199
        -> result {
1200 1201 1202
    auto ty_params = tag_ty_params(cx.fcx.ccx, tag_id);
    auto variant = tag_variant_with_id(cx.fcx.ccx, tag_id, variant_id);

1203 1204 1205 1206 1207 1208 1209
    // Synthesize a tuple type so that GEP_tup_like() can work its magic.
    // Separately, store the type of the element we're interested in.
    auto arg_tys = arg_tys_of_fn(variant.ann);
    auto elem_ty = ty.plain_ty(ty.ty_nil);  // typestate infelicity
    auto i = 0;
    let vec[@ty.t] true_arg_tys = vec();
    for (ty.arg a in arg_tys) {
1210 1211
        auto arg_ty = ty.substitute_ty_params(ty_params, ty_substs, a.ty);
        true_arg_tys += vec(arg_ty);
1212
        if (i == ix) {
1213
            elem_ty = arg_ty;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
        }

        i += 1;
    }
    auto tup_ty = ty.plain_ty(ty.ty_tup(true_arg_tys));

    // Cast the blob pointer to the appropriate type, if we need to (i.e. if
    // the blob pointer isn't dynamically sized).
    let ValueRef llunionptr;
    if (!ty.type_has_dynamic_size(tup_ty)) {
        auto llty = type_of(cx.fcx.ccx, tup_ty);
        llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
    } else {
        llunionptr = llblobptr;
    }

    // Do the GEP_tup_like().
    auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));

    // Cast the result to the appropriate type, if necessary.
    auto val;
    if (!ty.type_has_dynamic_size(elem_ty)) {
        auto llelemty = type_of(rslt.bcx.fcx.ccx, elem_ty);
        val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
    } else {
        val = rslt.val;
    }

    ret res(rslt.bcx, val);
}

1245

1246 1247
fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
        -> result {
1248 1249
    // FIXME: need a table to collect tydesc globals.
    auto tydesc = C_int(0);
1250 1251 1252 1253 1254
    auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc));
    rslt = res(rslt.bcx, vi2p(cx, rslt.val, llptr_ty));
    ret rslt;
}

1255 1256 1257 1258 1259 1260 1261 1262
fn trans_malloc_boxed(@block_ctxt cx, @ty.t t) -> result {
    // Synthesize a fake box type structurally so we have something
    // to measure the size of.
    auto boxed_body = plain_ty(ty.ty_tup(vec(plain_ty(ty.ty_int), t)));
    auto box_ptr = plain_ty(ty.ty_box(t));
    auto sz = size_of(cx, boxed_body);
    auto llty = type_of(cx.fcx.ccx, box_ptr);
    ret trans_raw_malloc(sz.bcx, llty, sz.val);
1263 1264 1265
}


1266 1267 1268 1269 1270
// Type descriptor and type glue stuff

// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
1271
fn field_of_tydesc(@block_ctxt cx, @ty.t t, int field) -> result {
1272
    auto tydesc = get_tydesc(cx, t);
1273 1274
    ret res(tydesc.bcx,
            tydesc.bcx.build.GEP(tydesc.val, vec(C_int(0), C_int(field))));
1275
}
1276

1277 1278 1279 1280 1281 1282 1283 1284 1285
// Given a type containing ty params, build a vector containing a ValueRef for
// each of the ty params it uses (from the current frame), as well as a vec
// containing a def_id for each such param. This is used solely for
// constructing derived tydescs.
fn linearize_ty_params(@block_ctxt cx, @ty.t t)
    -> tup(vec[ast.def_id], vec[ValueRef]) {
    let vec[ValueRef] param_vals = vec();
    let vec[ast.def_id] param_defs = vec();
    type rr = rec(@block_ctxt cx,
1286 1287
                  mutable vec[ValueRef] vals,
                  mutable vec[ast.def_id] defs);
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299

    state obj folder(@rr r) {
        fn fold_simple_ty(@ty.t t) -> @ty.t {
            alt(t.struct) {
                case (ty.ty_param(?pid)) {
                    let bool seen = false;
                    for (ast.def_id d in r.defs) {
                        if (d == pid) {
                            seen = true;
                        }
                    }
                    if (!seen) {
1300
                        r.vals += r.cx.fcx.lltydescs.get(pid);
1301 1302 1303
                        r.defs += pid;
                    }
                }
1304
                case (_) { }
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
            }
            ret t;
        }
    }


    auto x = @rec(cx = cx,
                  mutable vals = param_vals,
                  mutable defs = param_defs);

    ty.fold_ty(folder(x), t);

    ret tup(x.defs, x.vals);
}

1320
fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
1321
    // Is the supplied type a type param? If so, return the passed-in tydesc.
1322
    alt (ty.type_param(t)) {
1323
        case (some[ast.def_id](?id)) {
1324
            check (cx.fcx.lltydescs.contains_key(id));
1325 1326
            ret res(cx, cx.fcx.lltydescs.get(id));
        }
1327
        case (none[ast.def_id])      { /* fall through */ }
1328
    }
1329 1330

    // Does it contain a type param? If so, generate a derived tydesc.
1331
    let uint n_params = ty.count_ty_params(t);
1332

1333
    if (ty.count_ty_params(t) > 0u) {
1334
        auto tys = linearize_ty_params(cx, t);
1335

1336 1337 1338
        check (n_params == _vec.len[ast.def_id](tys._0));
        check (n_params == _vec.len[ValueRef](tys._1));

1339
        if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1340 1341
            declare_tydesc(cx.fcx.ccx, t);
            define_tydesc(cx.fcx.ccx, t, tys._0);
1342 1343
        }

1344
        auto root = cx.fcx.ccx.tydescs.get(t).tydesc;
1345

1346 1347
        auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc(cx.fcx.ccx.tn)),
                                               n_params));
1348

1349
        auto i = 0;
1350 1351 1352
        auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
        cx.build.Store(root, tdp);
        i += 1;
1353 1354
        for (ValueRef td in tys._1) {
            auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
1355
            cx.build.Store(td, tdp);
1356
            i += 1;
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
        }

        auto bcx = cx;
        auto sz = size_of(bcx, t);
        bcx = sz.bcx;
        auto align = align_of(bcx, t);
        bcx = align.bcx;

        auto v = trans_upcall(bcx, "upcall_get_type_desc",
                              vec(p2i(bcx.fcx.ccx.crate_ptr),
                                  sz.val,
                                  align.val,
1369
                                  C_int((1u + n_params) as int),
1370
                                  vp2i(bcx, tydescs)));
1371

1372 1373
        ret res(v.bcx, vi2p(v.bcx, v.val,
                            T_ptr(T_tydesc(cx.fcx.ccx.tn))));
1374 1375 1376
    }

    // Otherwise, generate a tydesc if necessary, and return it.
1377
    if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1378
        let vec[ast.def_id] defs = vec();
1379 1380
        declare_tydesc(cx.fcx.ccx, t);
        define_tydesc(cx.fcx.ccx, t, defs);
1381
    }
1382
    ret res(cx, cx.fcx.ccx.tydescs.get(t).tydesc);
1383 1384
}

1385 1386 1387 1388 1389 1390
// Generates the declaration for (but doesn't fill in) a type descriptor. This
// needs to be separate from make_tydesc() below, because sometimes type glue
// functions needs to refer to their own type descriptors.
fn declare_tydesc(@crate_ctxt cx, @ty.t t) {
    auto take_glue = declare_generic_glue(cx, t, "take");
    auto drop_glue = declare_generic_glue(cx, t, "drop");
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
    auto llsize;
    auto llalign;
    if (!ty.type_has_dynamic_size(t)) {
        auto llty = type_of(cx, t);
        llsize = llsize_of(llty);
        llalign = llalign_of(llty);
    } else {
        // These will be overwritten as the derived tydesc is generated, so
        // we create placeholder values.
        llsize = C_int(0);
        llalign = C_int(0);
    }

1405
    auto glue_fn_ty = T_ptr(T_glue_fn(cx.tn));
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

    // FIXME: this adjustment has to do with the ridiculous encoding of
    // glue-pointer-constants in the tydesc records: They are tydesc-relative
    // displacements.  This is purely for compatibility with rustboot and
    // should go when it is discarded.
    fn off(ValueRef tydescp,
           ValueRef gluefn) -> ValueRef {
        ret i2p(llvm.LLVMConstSub(p2i(gluefn), p2i(tydescp)),
                val_ty(gluefn));
    }

    auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
1418 1419 1420
    auto gvar = llvm.LLVMAddGlobal(cx.llmod, T_tydesc(cx.tn),
                                   _str.buf(name));
    auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(cx.tn)))),
1421 1422
                               llsize,
                               llalign,
1423 1424
                               off(gvar, take_glue),  // take_glue_off
                               off(gvar, drop_glue),  // drop_glue_off
1425 1426 1427 1428 1429 1430 1431 1432
                               C_null(glue_fn_ty),    // free_glue_off
                               C_null(glue_fn_ty),    // sever_glue_off
                               C_null(glue_fn_ty),    // mark_glue_off
                               C_null(glue_fn_ty),    // obj_drop_glue_off
                               C_null(glue_fn_ty)));  // is_stateful

    llvm.LLVMSetInitializer(gvar, tydesc);
    llvm.LLVMSetGlobalConstant(gvar, True);
1433 1434
    llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
                        as llvm.Linkage);
1435 1436 1437 1438 1439 1440 1441 1442

    auto info = rec(
        tydesc=gvar,
        take_glue=take_glue,
        drop_glue=drop_glue
    );

    cx.tydescs.insert(t, @info);
1443 1444
}

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
// declare_tydesc() above must have been called first.
fn define_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
    auto info = cx.tydescs.get(t);
    auto gvar = info.tydesc;

    auto tg = make_take_glue;
    auto take_glue = make_generic_glue(cx, t, info.take_glue, tg,
                                       typaram_defs);
    auto dg = make_drop_glue;
    auto drop_glue = make_generic_glue(cx, t, info.drop_glue, dg,
                                       typaram_defs);
}

fn declare_generic_glue(@crate_ctxt cx, @ty.t t, str name) -> ValueRef {
1459
    auto llfnty = T_glue_fn(cx.tn);
1460

1461
    auto fn_name = cx.names.next("_rust_" + name) + sep() + ty.ty_to_str(t);
1462
    fn_name = sanitize(fn_name);
1463 1464
    ret decl_fastcall_fn(cx.llmod, fn_name, llfnty);
}
1465

1466 1467 1468
fn make_generic_glue(@crate_ctxt cx, @ty.t t, ValueRef llfn,
                     val_and_ty_fn helper,
                     vec[ast.def_id] typaram_defs) -> ValueRef {
1469
    auto fcx = new_fn_ctxt(cx, llfn);
1470 1471
    auto bcx = new_top_block_ctxt(fcx);

1472
    auto re;
1473
    if (!ty.type_is_scalar(t)) {
1474
        auto llty;
1475 1476 1477
        if (ty.type_has_dynamic_size(t)) {
            llty = T_ptr(T_i8());
        } else if (ty.type_is_structural(t)) {
1478 1479 1480 1481
            llty = T_ptr(type_of(cx, t));
        } else {
            llty = type_of(cx, t);
        }
1482

1483
        auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
1484 1485 1486 1487 1488 1489 1490 1491
        auto p = 0;
        for (ast.def_id d in typaram_defs) {
            auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p)));
            llparam = bcx.build.Load(llparam);
            bcx.fcx.lltydescs.insert(d, llparam);
            p += 1;
        }

1492
        auto llrawptr = llvm.LLVMGetParam(llfn, 4u);
1493
        auto llval = bcx.build.BitCast(llrawptr, llty);
G
Graydon Hoare 已提交
1494

1495 1496 1497 1498
        re = helper(bcx, llval, t);
    } else {
        re = res(bcx, C_nil());
    }
1499

1500
    re.bcx.build.RetVoid();
1501 1502 1503
    ret llfn;
}

1504 1505
fn make_take_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
    if (ty.type_is_boxed(t)) {
1506 1507
        ret incr_refcnt_of_boxed(cx, v);

1508
    } else if (ty.type_is_structural(t)) {
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
        ret iter_structural_ty(cx, v, t,
                               bind incr_all_refcnts(_, _, _));
    }
    ret res(cx, C_nil());
}

fn incr_refcnt_of_boxed(@block_ctxt cx, ValueRef box_ptr) -> result {
    auto rc_ptr = cx.build.GEP(box_ptr, vec(C_int(0),
                                            C_int(abi.box_rc_field_refcnt)));
    auto rc = cx.build.Load(rc_ptr);

    auto rc_adj_cx = new_sub_block_ctxt(cx, "rc++");
    auto next_cx = new_sub_block_ctxt(cx, "next");

    auto const_test = cx.build.ICmp(lib.llvm.LLVMIntEQ,
                                    C_int(abi.const_refcount as int), rc);
    cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);

    rc = rc_adj_cx.build.Add(rc, C_int(1));
    rc_adj_cx.build.Store(rc, rc_ptr);
    rc_adj_cx.build.Br(next_cx.llbb);

    ret res(next_cx, C_nil());
}

1534
fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
1535
    alt (t.struct) {
1536
        case (ty.ty_str) {
G
Graydon Hoare 已提交
1537 1538 1539 1540
            ret decr_refcnt_and_if_zero
                (cx, v, bind trans_non_gc_free(_, v),
                 "free string",
                 T_int(), C_int(0));
1541 1542
        }

1543
        case (ty.ty_vec(_)) {
G
Graydon Hoare 已提交
1544 1545 1546 1547
            fn hit_zero(@block_ctxt cx, ValueRef v,
                        @ty.t t) -> result {
                auto res = iter_sequence(cx, v, t,
                                         bind drop_ty(_,_,_));
1548 1549 1550 1551 1552 1553 1554 1555 1556
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(res.bcx, v);
            }
            ret decr_refcnt_and_if_zero(cx, v,
                                        bind hit_zero(_, v, t),
                                        "free vector",
                                        T_int(), C_int(0));
        }

1557
        case (ty.ty_box(?body_ty)) {
G
Graydon Hoare 已提交
1558 1559
            fn hit_zero(@block_ctxt cx, ValueRef v,
                        @ty.t body_ty) -> result {
1560 1561 1562 1563
                auto body = cx.build.GEP(v,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));

1564
                auto body_val = load_scalar_or_boxed(cx, body, body_ty);
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
                auto res = drop_ty(cx, body_val, body_ty);
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(res.bcx, v);
            }
            ret decr_refcnt_and_if_zero(cx, v,
                                        bind hit_zero(_, v, body_ty),
                                        "free box",
                                        T_int(), C_int(0));
        }

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
        case (ty.ty_obj(_)) {
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {

                // Call through the obj's own fields-drop glue first.
                auto body =
                    cx.build.GEP(v,
                                 vec(C_int(0),
                                     C_int(abi.box_rc_field_body)));

                auto tydescptr =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.obj_body_elt_tydesc)));
1588

1589
                call_tydesc_glue_full(cx, body, cx.build.Load(tydescptr),
1590
                                      abi.tydesc_field_drop_glue_off);
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608

                // Then free the body.
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(cx, v);
            }
            auto box_cell =
                cx.build.GEP(v,
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));

            auto boxptr = cx.build.Load(box_cell);

            ret decr_refcnt_and_if_zero(cx, boxptr,
                                        bind hit_zero(_, boxptr),
                                        "free obj",
                                        T_int(), C_int(0));
        }

1609
        case (ty.ty_fn(_,_,_)) {
1610 1611 1612 1613 1614 1615 1616
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {

                // Call through the closure's own fields-drop glue first.
                auto body =
                    cx.build.GEP(v,
                                 vec(C_int(0),
                                     C_int(abi.box_rc_field_body)));
1617 1618 1619 1620
                auto bindings =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.closure_elt_bindings)));
1621 1622 1623 1624 1625

                auto tydescptr =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.closure_elt_tydesc)));
1626

1627
                call_tydesc_glue_full(cx, bindings, cx.build.Load(tydescptr),
1628 1629
                                      abi.tydesc_field_drop_glue_off);

1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647

                // Then free the body.
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(cx, v);
            }
            auto box_cell =
                cx.build.GEP(v,
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));

            auto boxptr = cx.build.Load(box_cell);

            ret decr_refcnt_and_if_zero(cx, boxptr,
                                        bind hit_zero(_, boxptr),
                                        "free fn",
                                        T_int(), C_int(0));
        }

1648
        case (_) {
1649
            if (ty.type_is_structural(t)) {
1650 1651 1652
                ret iter_structural_ty(cx, v, t,
                                       bind drop_ty(_, _, _));

1653
            } else if (ty.type_is_scalar(t) ||
1654
                       ty.type_is_native(t) ||
1655
                       ty.type_is_nil(t)) {
1656 1657 1658 1659
                ret res(cx, C_nil());
            }
        }
    }
1660
    cx.fcx.ccx.sess.bug("bad type in trans.make_drop_glue_inner: " +
1661
                        ty.ty_to_str(t));
1662 1663 1664
    fail;
}

1665 1666
fn decr_refcnt_and_if_zero(@block_ctxt cx,
                           ValueRef box_ptr,
1667
                           fn(@block_ctxt cx) -> result inner,
1668
                           str inner_name,
1669
                           TypeRef t_else, ValueRef v_else) -> result {
1670

1671
    auto load_rc_cx = new_sub_block_ctxt(cx, "load rc");
1672 1673 1674 1675
    auto rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
    auto inner_cx = new_sub_block_ctxt(cx, inner_name);
    auto next_cx = new_sub_block_ctxt(cx, "next");

1676 1677
    auto null_test = cx.build.IsNull(box_ptr);
    cx.build.CondBr(null_test, next_cx.llbb, load_rc_cx.llbb);
1678

1679 1680 1681 1682 1683 1684 1685 1686 1687 1688

    auto rc_ptr = load_rc_cx.build.GEP(box_ptr,
                                       vec(C_int(0),
                                           C_int(abi.box_rc_field_refcnt)));

    auto rc = load_rc_cx.build.Load(rc_ptr);
    auto const_test =
        load_rc_cx.build.ICmp(lib.llvm.LLVMIntEQ,
                              C_int(abi.const_refcount as int), rc);
    load_rc_cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
1689 1690 1691 1692

    rc = rc_adj_cx.build.Sub(rc, C_int(1));
    rc_adj_cx.build.Store(rc, rc_ptr);
    auto zero_test = rc_adj_cx.build.ICmp(lib.llvm.LLVMIntEQ, C_int(0), rc);
1693 1694 1695 1696
    rc_adj_cx.build.CondBr(zero_test, inner_cx.llbb, next_cx.llbb);

    auto inner_res = inner(inner_cx);
    inner_res.bcx.build.Br(next_cx.llbb);
1697

1698
    auto phi = next_cx.build.Phi(t_else,
1699
                                 vec(v_else, v_else, v_else, inner_res.val),
1700
                                 vec(cx.llbb,
1701
                                     load_rc_cx.llbb,
1702
                                     rc_adj_cx.llbb,
1703 1704
                                     inner_res.bcx.llbb));

1705
    ret res(next_cx, phi);
1706 1707
}

1708 1709
// Tag information

1710 1711
fn variant_types(@crate_ctxt cx, &ast.variant v) -> vec[@ty.t] {
    let vec[@ty.t] tys = vec();
1712
    alt (ty.ann_to_type(v.ann).struct) {
1713
        case (ty.ty_fn(_, ?args, _)) {
1714
            for (ty.arg arg in args) {
1715
                tys += vec(arg.ty);
1716 1717
            }
        }
1718
        case (ty.ty_tag(_, _)) { /* nothing */ }
1719 1720
        case (_) { fail; }
    }
1721 1722 1723
    ret tys;
}

1724 1725 1726 1727
fn type_of_variant(@crate_ctxt cx,
                   &ast.variant v,
                   vec[ast.ty_param] ty_params,
                   vec[@ty.t] ty_param_substs) -> TypeRef {
1728 1729 1730
    let vec[TypeRef] lltys = vec();
    auto tys = variant_types(cx, v);
    for (@ty.t typ in tys) {
1731 1732
        auto typ2 = ty.substitute_ty_params(ty_params, ty_param_substs, typ);
        lltys += vec(type_of(cx, typ2));
1733
    }
1734 1735 1736
    ret T_struct(lltys);
}

1737 1738 1739 1740 1741 1742 1743 1744 1745
// Returns the type parameters of a tag.
fn tag_ty_params(@crate_ctxt cx, ast.def_id id) -> vec[ast.ty_param] {
    check (cx.items.contains_key(id));
    alt (cx.items.get(id).node) {
        case (ast.item_tag(_, _, ?tps, _)) { ret tps; }
    }
    fail;   // not reached
}

1746 1747
// Returns the variants in a tag.
fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[ast.variant] {
1748 1749
    check (cx.items.contains_key(id));
    alt (cx.items.get(id).node) {
1750
        case (ast.item_tag(_, ?variants, _, _)) { ret variants; }
1751 1752 1753 1754
    }
    fail;   // not reached
}

1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
// Returns the tag variant with the given ID.
fn tag_variant_with_id(@crate_ctxt cx,
                       &ast.def_id tag_id,
                       &ast.def_id variant_id) -> ast.variant {
    auto variants = tag_variants(cx, tag_id);

    auto i = 0u;
    while (i < _vec.len[ast.variant](variants)) {
        auto variant = variants.(i);
        if (common.def_eq(variant.id, variant_id)) {
            ret variant;
        }
        i += 1u;
    }

    log "tag_variant_with_id(): no variant exists with that ID";
    fail;
}

1774 1775 1776 1777 1778 1779 1780 1781
// Returns a new plain tag type of the given ID with no type parameters. Don't
// use this function in new code; it's a hack to keep things working for now.
fn mk_plain_tag(ast.def_id tid) -> @ty.t {
    let vec[@ty.t] tps = vec();
    ret ty.plain_ty(ty.ty_tag(tid, tps));
}


1782
type val_pair_fn = fn(@block_ctxt cx, ValueRef dst, ValueRef src) -> result;
1783

1784
type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, @ty.t t) -> result;
1785

1786 1787 1788
type val_pair_and_ty_fn =
    fn(@block_ctxt cx, ValueRef av, ValueRef bv, @ty.t t) -> result;

1789
// Iterates through the elements of a structural type.
1790 1791
fn iter_structural_ty(@block_ctxt cx,
                      ValueRef v,
1792
                      @ty.t t,
1793 1794
                      val_and_ty_fn f)
    -> result {
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
    fn adaptor_fn(val_and_ty_fn f,
                  @block_ctxt cx,
                  ValueRef av,
                  ValueRef bv,
                  @ty.t t) -> result {
        ret f(cx, av, t);
    }
    be iter_structural_ty_full(cx, v, v, t,
                               bind adaptor_fn(f, _, _, _, _));
}


fn iter_structural_ty_full(@block_ctxt cx,
                           ValueRef av,
                           ValueRef bv,
                           @ty.t t,
                           val_pair_and_ty_fn f)
    -> result {
1813
    let result r = res(cx, C_nil());
1814

1815
    fn iter_boxpp(@block_ctxt cx,
1816 1817 1818 1819 1820
                  ValueRef box_a_cell,
                  ValueRef box_b_cell,
                  val_pair_and_ty_fn f) -> result {
        auto box_a_ptr = cx.build.Load(box_a_cell);
        auto box_b_ptr = cx.build.Load(box_b_cell);
1821 1822
        auto tnil = plain_ty(ty.ty_nil);
        auto tbox = plain_ty(ty.ty_box(tnil));
1823 1824 1825

        auto inner_cx = new_sub_block_ctxt(cx, "iter box");
        auto next_cx = new_sub_block_ctxt(cx, "next");
1826
        auto null_test = cx.build.IsNull(box_a_ptr);
1827 1828
        cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);

1829
        auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
1830 1831 1832 1833
        r.bcx.build.Br(next_cx.llbb);
        ret res(next_cx, r.val);
    }

1834
    alt (t.struct) {
1835
        case (ty.ty_tup(?args)) {
1836
            let int i = 0;
1837
            for (@ty.t arg in args) {
1838 1839 1840 1841
                r = GEP_tup_like(r.bcx, t, av, vec(0, i));
                auto elt_a = r.val;
                r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
                auto elt_b = r.val;
1842
                r = f(r.bcx,
1843 1844
                      load_scalar_or_boxed(r.bcx, elt_a, arg),
                      load_scalar_or_boxed(r.bcx, elt_b, arg),
1845
                      arg);
1846 1847 1848
                i += 1;
            }
        }
1849
        case (ty.ty_rec(?fields)) {
1850
            let int i = 0;
1851
            for (ty.field fld in fields) {
1852 1853 1854 1855
                r = GEP_tup_like(r.bcx, t, av, vec(0, i));
                auto llfld_a = r.val;
                r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
                auto llfld_b = r.val;
1856
                r = f(r.bcx,
1857 1858
                      load_scalar_or_boxed(r.bcx, llfld_a, fld.ty),
                      load_scalar_or_boxed(r.bcx, llfld_b, fld.ty),
1859
                      fld.ty);
1860 1861 1862
                i += 1;
            }
        }
1863
        case (ty.ty_tag(?tid, ?tps)) {
1864 1865
            auto variants = tag_variants(cx.fcx.ccx, tid);
            auto n_variants = _vec.len[ast.variant](variants);
1866

1867 1868 1869 1870 1871 1872 1873 1874 1875
            // Cast the tags to types we can GEP into.
            auto lltagty = T_opaque_tag_ptr(cx.fcx.ccx.tn);
            auto av_tag = cx.build.PointerCast(av, lltagty);
            auto bv_tag = cx.build.PointerCast(bv, lltagty);

            auto lldiscrim_a_ptr = cx.build.GEP(av_tag,
                                                vec(C_int(0), C_int(0)));
            auto llunion_a_ptr = cx.build.GEP(av_tag,
                                              vec(C_int(0), C_int(1)));
1876 1877
            auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);

1878 1879 1880 1881
            auto lldiscrim_b_ptr = cx.build.GEP(bv_tag,
                                                vec(C_int(0), C_int(0)));
            auto llunion_b_ptr = cx.build.GEP(bv_tag,
                                              vec(C_int(0), C_int(1)));
1882
            auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
G
Graydon Hoare 已提交
1883

1884 1885 1886 1887 1888 1889 1890
            // NB: we must hit the discriminant first so that structural
            // comparison know not to proceed when the discriminants differ.
            auto bcx = cx;
            bcx = f(bcx, lldiscrim_a, lldiscrim_b,
                    plain_ty(ty.ty_int)).bcx;

            auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
1891 1892
            unr_cx.build.Unreachable();

1893
            auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
1894
                                             n_variants);
G
Graydon Hoare 已提交
1895

1896
            auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
1897

1898 1899
            auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);

1900
            auto i = 0u;
1901
            for (ast.variant variant in variants) {
1902
                auto variant_cx = new_sub_block_ctxt(bcx,
1903
                                                     "tag-iter-variant-" +
1904 1905 1906
                                                     _uint.to_str(i, 10u));
                llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);

1907 1908
                if (_vec.len[ast.variant_arg](variant.args) > 0u) {
                    // N-ary variant.
1909 1910
                    auto llvarty = type_of_variant(bcx.fcx.ccx, variants.(i),
                                                   ty_params, tps);
1911 1912 1913 1914

                    auto fn_ty = ty.ann_to_type(variants.(i).ann);
                    alt (fn_ty.struct) {
                        case (ty.ty_fn(_, ?args, _)) {
1915 1916 1917 1918 1919
                            auto llvarp_a = variant_cx.build.
                                TruncOrBitCast(llunion_a_ptr, T_ptr(llvarty));

                            auto llvarp_b = variant_cx.build.
                                TruncOrBitCast(llunion_b_ptr, T_ptr(llvarty));
1920 1921 1922 1923

                            auto j = 0u;
                            for (ty.arg a in args) {
                                auto v = vec(C_int(0), C_int(j as int));
1924 1925 1926 1927 1928 1929

                                auto llfldp_a =
                                    variant_cx.build.GEP(llvarp_a, v);

                                auto llfldp_b =
                                    variant_cx.build.GEP(llvarp_b, v);
1930 1931

                                auto ty_subst = ty.substitute_ty_params(
1932
                                    ty_params, tps, a.ty);
1933

1934
                                auto llfld_a =
1935
                                    load_scalar_or_boxed(variant_cx,
1936
                                                         llfldp_a,
1937 1938
                                                         ty_subst);

1939 1940 1941 1942 1943 1944 1945
                                auto llfld_b =
                                    load_scalar_or_boxed(variant_cx,
                                                         llfldp_b,
                                                         ty_subst);

                                auto res = f(variant_cx,
                                             llfld_a, llfld_b, ty_subst);
1946 1947
                                variant_cx = res.bcx;
                                j += 1u;
1948 1949
                            }
                        }
1950
                        case (_) { fail; }
1951
                    }
1952 1953 1954 1955 1956

                    variant_cx.build.Br(next_cx.llbb);
                } else {
                    // Nullary variant; nothing to do.
                    variant_cx.build.Br(next_cx.llbb);
1957 1958 1959 1960 1961 1962 1963
                }

                i += 1u;
            }

            ret res(next_cx, C_nil());
        }
1964
        case (ty.ty_fn(_,_,_)) {
1965 1966 1967 1968 1969 1970
            auto box_cell_a =
                cx.build.GEP(av,
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));
            auto box_cell_b =
                cx.build.GEP(bv,
1971 1972
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));
1973
            ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
1974
        }
1975
        case (ty.ty_obj(_)) {
1976 1977 1978 1979 1980 1981
            auto box_cell_a =
                cx.build.GEP(av,
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));
            auto box_cell_b =
                cx.build.GEP(bv,
1982 1983
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));
1984
            ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
1985
        }
1986
        case (_) {
1987
            cx.fcx.ccx.sess.unimpl("type in iter_structural_ty_full");
1988
        }
1989
    }
1990
    ret r;
1991 1992
}

1993 1994
// Iterates through a pointer range, until the src* hits the src_lim*.
fn iter_sequence_raw(@block_ctxt cx,
1995
                     ValueRef dst,     // elt*
1996 1997 1998
                     ValueRef src,     // elt*
                     ValueRef src_lim, // elt*
                     ValueRef elt_sz,
1999
                     val_pair_fn f) -> result {
2000 2001 2002

    auto bcx = cx;

2003
    let ValueRef dst_int = vp2i(bcx, dst);
2004 2005 2006 2007 2008 2009 2010 2011 2012
    let ValueRef src_int = vp2i(bcx, src);
    let ValueRef src_lim_int = vp2i(bcx, src_lim);

    auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
    auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
    auto next_cx = new_sub_block_ctxt(cx, "next");

    bcx.build.Br(cond_cx.llbb);

2013 2014
    let ValueRef dst_curr = cond_cx.build.Phi(T_int(),
                                              vec(dst_int), vec(bcx.llbb));
2015 2016 2017
    let ValueRef src_curr = cond_cx.build.Phi(T_int(),
                                              vec(src_int), vec(bcx.llbb));

2018
    auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
2019 2020 2021 2022
                                       src_curr, src_lim_int);

    cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);

2023
    auto dst_curr_ptr = vi2p(body_cx, dst_curr, T_ptr(T_i8()));
2024
    auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
2025

2026
    auto body_res = f(body_cx, dst_curr_ptr, src_curr_ptr);
2027 2028
    body_cx = body_res.bcx;

2029
    auto dst_next = body_cx.build.Add(dst_curr, elt_sz);
2030
    auto src_next = body_cx.build.Add(src_curr, elt_sz);
2031 2032
    body_cx.build.Br(cond_cx.llbb);

2033 2034
    cond_cx.build.AddIncomingToPhi(dst_curr, vec(dst_next),
                                   vec(body_cx.llbb));
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
    cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
                                   vec(body_cx.llbb));

    ret res(next_cx, C_nil());
}


fn iter_sequence_inner(@block_ctxt cx,
                       ValueRef src,     // elt*
                       ValueRef src_lim, // elt*
                       @ty.t elt_ty,
                       val_and_ty_fn f) -> result {
    fn adaptor_fn(val_and_ty_fn f,
2048
                  @ty.t elt_ty,
2049
                  @block_ctxt cx,
2050 2051
                  ValueRef dst,
                  ValueRef src) -> result {
2052
        auto llty = type_of(cx.fcx.ccx, elt_ty);
2053
        auto p = cx.build.PointerCast(src, T_ptr(llty));
2054
        ret f(cx, load_scalar_or_boxed(cx, p, elt_ty), elt_ty);
2055 2056
    }

2057
    auto elt_sz = size_of(cx, elt_ty);
2058 2059
    be iter_sequence_raw(elt_sz.bcx, src, src, src_lim, elt_sz.val,
                         bind adaptor_fn(f, elt_ty, _, _, _));
2060 2061 2062
}


2063 2064 2065
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx,
                 ValueRef v,
2066
                 @ty.t t,
2067 2068 2069 2070
                 val_and_ty_fn f) -> result {

    fn iter_sequence_body(@block_ctxt cx,
                          ValueRef v,
2071
                          @ty.t elt_ty,
2072 2073 2074 2075 2076 2077 2078
                          val_and_ty_fn f,
                          bool trailing_null) -> result {

        auto p0 = cx.build.GEP(v, vec(C_int(0),
                                      C_int(abi.vec_elt_data)));
        auto lenptr = cx.build.GEP(v, vec(C_int(0),
                                          C_int(abi.vec_elt_fill)));
2079 2080

        auto llunit_ty = type_of(cx.fcx.ccx, elt_ty);
2081
        auto bcx = cx;
2082

2083
        auto len = bcx.build.Load(lenptr);
2084
        if (trailing_null) {
2085 2086
            auto unit_sz = size_of(bcx, elt_ty);
            bcx = unit_sz.bcx;
2087
            len = bcx.build.Sub(len, unit_sz.val);
2088 2089
        }

2090 2091
        auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
                       T_ptr(llunit_ty));
2092

2093
        ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
2094 2095
    }

2096 2097
    alt (t.struct) {
        case (ty.ty_vec(?et)) {
2098 2099
            ret iter_sequence_body(cx, v, et, f, false);
        }
2100
        case (ty.ty_str) {
2101
            auto et = plain_ty(ty.ty_machine(common.ty_u8));
2102
            ret iter_sequence_body(cx, v, et, f, true);
2103
        }
2104
        case (_) { fail; }
2105
    }
2106 2107 2108 2109
    cx.fcx.ccx.sess.bug("bad type in trans.iter_sequence");
    fail;
}

2110 2111 2112 2113 2114 2115 2116 2117 2118
fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
                         ValueRef tydesc, int field) {
    auto llrawptr = cx.build.BitCast(v, T_ptr(T_i8()));
    auto lltydescs = cx.build.GEP(tydesc,
                                  vec(C_int(0),
                                      C_int(abi.tydesc_field_first_param)));
    lltydescs = cx.build.Load(lltydescs);
    auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
    auto llfn = cx.build.Load(llfnptr);
2119 2120 2121 2122 2123

    // FIXME: this adjustment has to do with the ridiculous encoding of
    // glue-pointer-constants in the tydesc records: They are tydesc-relative
    // displacements.  This is purely for compatibility with rustboot and
    // should go when it is discarded.
2124 2125 2126
    llfn = vi2p(cx, cx.build.Add(vp2i(cx, llfn),
                                 vp2i(cx, tydesc)),
                val_ty(llfn));
2127

2128 2129 2130 2131 2132
    cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
                                cx.fcx.lltaskptr,
                                C_null(T_ptr(T_nil())),
                                lltydescs,
                                llrawptr));
2133 2134 2135
}

fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
2136 2137
    auto td = get_tydesc(cx, t);
    call_tydesc_glue_full(td.bcx, v, td.val, field);
2138 2139
}

2140 2141
fn incr_all_refcnts(@block_ctxt cx,
                    ValueRef v,
2142 2143
                    @ty.t t) -> result {
    if (!ty.type_is_scalar(t)) {
2144
        call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue_off);
2145
    }
2146
    ret res(cx, C_nil());
2147 2148
}

2149 2150
fn drop_slot(@block_ctxt cx,
             ValueRef slot,
2151
             @ty.t t) -> result {
2152
    auto llptr = load_scalar_or_boxed(cx, slot, t);
2153 2154 2155 2156 2157 2158
    auto re = drop_ty(cx, llptr, t);

    auto llty = val_ty(slot);
    auto llelemty = lib.llvm.llvm.LLVMGetElementType(llty);
    re.bcx.build.Store(C_null(llelemty), slot);
    ret re;
2159 2160
}

2161 2162
fn drop_ty(@block_ctxt cx,
           ValueRef v,
2163
           @ty.t t) -> result {
2164

2165
    if (!ty.type_is_scalar(t)) {
2166
        call_tydesc_glue(cx, v, t, abi.tydesc_field_drop_glue_off);
2167
    }
2168
    ret res(cx, C_nil());
2169 2170
}

2171 2172 2173 2174
fn call_memcpy(@block_ctxt cx,
               ValueRef dst,
               ValueRef src,
               ValueRef n_bytes) -> result {
2175 2176
    auto src_ptr = cx.build.PointerCast(src, T_ptr(T_i8()));
    auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
2177 2178 2179
    auto size = cx.build.IntCast(n_bytes, T_int());
    ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.memcpy_glue,
                                  vec(dst_ptr, src_ptr, size)));
2180 2181
}

2182 2183 2184 2185 2186 2187 2188 2189 2190
fn call_bzero(@block_ctxt cx,
              ValueRef dst,
              ValueRef n_bytes) -> result {
    auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
    auto size = cx.build.IntCast(n_bytes, T_int());
    ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.bzero_glue,
                                  vec(dst_ptr, size)));
}

2191 2192 2193 2194 2195 2196
fn memcpy_ty(@block_ctxt cx,
             ValueRef dst,
             ValueRef src,
             @ty.t t) -> result {
    if (ty.type_has_dynamic_size(t)) {
        auto llszptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
2197 2198
        auto llsz = llszptr.bcx.build.Load(llszptr.val);
        ret call_memcpy(llszptr.bcx, dst, src, llsz);
2199 2200 2201 2202 2203 2204

    } else {
        ret res(cx, cx.build.Store(cx.build.Load(src), dst));
    }
}

2205 2206 2207 2208 2209
tag copy_action {
    INIT;
    DROP_EXISTING;
}

2210
fn copy_ty(@block_ctxt cx,
2211
           copy_action action,
2212 2213
           ValueRef dst,
           ValueRef src,
2214
           @ty.t t) -> result {
2215
    if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
2216 2217
        ret res(cx, cx.build.Store(src, dst));

2218
    } else if (ty.type_is_nil(t)) {
2219 2220
        ret res(cx, C_nil());

2221
    } else if (ty.type_is_boxed(t)) {
2222
        auto r = incr_all_refcnts(cx, src, t);
2223
        if (action == DROP_EXISTING) {
2224
            r = drop_ty(r.bcx, r.bcx.build.Load(dst), t);
2225 2226 2227
        }
        ret res(r.bcx, r.bcx.build.Store(src, dst));

2228 2229
    } else if (ty.type_is_structural(t) ||
               ty.type_has_dynamic_size(t)) {
2230
        auto r = incr_all_refcnts(cx, src, t);
2231
        if (action == DROP_EXISTING) {
2232 2233
            r = drop_ty(r.bcx, dst, t);
        }
2234
        ret memcpy_ty(r.bcx, dst, src, t);
2235 2236 2237
    }

    cx.fcx.ccx.sess.bug("unexpected type in trans.copy_ty: " +
2238
                        ty.ty_to_str(t));
2239 2240 2241
    fail;
}

2242
fn trans_lit(@crate_ctxt cx, &ast.lit lit, &ast.ann ann) -> ValueRef {
2243
    alt (lit.node) {
2244
        case (ast.lit_int(?i)) {
2245
            ret C_int(i);
2246 2247
        }
        case (ast.lit_uint(?u)) {
2248
            ret C_int(u as int);
2249
        }
2250 2251 2252 2253 2254 2255
        case (ast.lit_mach_int(?tm, ?i)) {
            // FIXME: the entire handling of mach types falls apart
            // if target int width is larger than host, at the moment;
            // re-do the mach-int types using 'big' when that works.
            auto t = T_int();
            alt (tm) {
G
Graydon Hoare 已提交
2256 2257 2258 2259 2260 2261 2262 2263 2264
                case (common.ty_u8) { t = T_i8(); }
                case (common.ty_u16) { t = T_i16(); }
                case (common.ty_u32) { t = T_i32(); }
                case (common.ty_u64) { t = T_i64(); }

                case (common.ty_i8) { t = T_i8(); }
                case (common.ty_i16) { t = T_i16(); }
                case (common.ty_i32) { t = T_i32(); }
                case (common.ty_i64) { t = T_i64(); }
2265
            }
2266
            ret C_integral(i, t);
2267
        }
2268
        case (ast.lit_char(?c)) {
2269
            ret C_integral(c as int, T_char());
2270 2271
        }
        case (ast.lit_bool(?b)) {
2272
            ret C_bool(b);
2273 2274
        }
        case (ast.lit_nil) {
2275
            ret C_nil();
2276 2277
        }
        case (ast.lit_str(?s)) {
2278
            ret C_str(cx, s);
2279 2280 2281 2282
        }
    }
}

2283
fn target_type(@crate_ctxt cx, @ty.t t) -> @ty.t {
2284
    alt (t.struct) {
2285 2286
        case (ty.ty_int) {
            auto tm = ty.ty_machine(cx.sess.get_targ_cfg().int_type);
2287 2288
            ret @rec(struct=tm with *t);
        }
2289 2290
        case (ty.ty_uint) {
            auto tm = ty.ty_machine(cx.sess.get_targ_cfg().uint_type);
2291 2292
            ret @rec(struct=tm with *t);
        }
2293
        case (_) { /* fall through */ }
2294 2295 2296 2297
    }
    ret t;
}

2298
fn node_ann_type(@crate_ctxt cx, &ast.ann a) -> @ty.t {
2299 2300
    alt (a) {
        case (ast.ann_none) {
2301
            cx.sess.bug("missing type annotation");
2302
        }
2303
        case (ast.ann_type(?t, _)) {
2304
            ret target_type(cx, t);
2305 2306 2307 2308
        }
    }
}

2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
fn node_ann_ty_params(&ast.ann a) -> vec[@ty.t] {
    alt (a) {
        case (ast.ann_none) {
            log "missing type annotation";
            fail;
        }
        case (ast.ann_type(_, ?tps_opt)) {
            alt (tps_opt) {
                case (none[vec[@ty.t]]) {
                    log "type annotation has no ty params";
                    fail;
                }
                case (some[vec[@ty.t]](?tps)) { ret tps; }
            }
        }
    }
}

2327 2328 2329 2330
fn node_type(@crate_ctxt cx, &ast.ann a) -> TypeRef {
    ret type_of(cx, node_ann_type(cx, a));
}

2331 2332
fn trans_unary(@block_ctxt cx, ast.unop op,
               @ast.expr e, &ast.ann a) -> result {
2333 2334 2335

    auto sub = trans_expr(cx, e);

2336 2337
    alt (op) {
        case (ast.bitnot) {
2338
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2339
            ret res(sub.bcx, cx.build.Not(sub.val));
2340 2341
        }
        case (ast.not) {
2342
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2343
            ret res(sub.bcx, cx.build.Not(sub.val));
2344 2345
        }
        case (ast.neg) {
2346
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2347
            ret res(sub.bcx, cx.build.Neg(sub.val));
2348
        }
2349
        case (ast.box) {
2350
            auto e_ty = ty.expr_ty(e);
2351
            auto e_val = sub.val;
2352 2353 2354 2355 2356
            auto box_ty = node_ann_type(sub.bcx.fcx.ccx, a);
            sub = trans_malloc_boxed(sub.bcx, e_ty);
            find_scope_cx(cx).cleanups +=
                clean(bind drop_ty(_, sub.val, box_ty));

2357 2358
            auto box = sub.val;
            auto rc = sub.bcx.build.GEP(box,
2359 2360
                                        vec(C_int(0),
                                            C_int(abi.box_rc_field_refcnt)));
2361 2362 2363 2364
            auto body = sub.bcx.build.GEP(box,
                                          vec(C_int(0),
                                              C_int(abi.box_rc_field_body)));
            sub.bcx.build.Store(C_int(1), rc);
2365 2366 2367 2368 2369 2370 2371 2372 2373

            // Cast the body type to the type of the value. This is needed to
            // make tags work, since tags have a different LLVM type depending
            // on whether they're boxed or not.
            if (!ty.type_has_dynamic_size(e_ty)) {
                auto llety = T_ptr(type_of(sub.bcx.fcx.ccx, e_ty));
                body = sub.bcx.build.PointerCast(body, llety);
            }

2374
            sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
2375
            ret res(sub.bcx, box);
2376
        }
2377
        case (ast.deref) {
2378 2379 2380
            auto val = sub.bcx.build.GEP(sub.val,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));
2381
            auto e_ty = node_ann_type(sub.bcx.fcx.ccx, a);
2382 2383
            if (ty.type_is_scalar(e_ty) ||
                ty.type_is_nil(e_ty)) {
2384
                val = sub.bcx.build.Load(val);
2385
            }
2386
            ret res(sub.bcx, val);
2387
        }
G
Graydon Hoare 已提交
2388 2389 2390
        case (ast._mutable) {
            ret trans_expr(cx, e);
        }
2391 2392 2393 2394
    }
    fail;
}

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
fn trans_compare(@block_ctxt cx0, ast.binop op, @ty.t t0,
                 ValueRef lhs0, ValueRef rhs0) -> result {

    auto cx = cx0;

    auto lhs_r = autoderef(cx, lhs0, t0);
    auto lhs = lhs_r.val;
    cx = lhs_r.bcx;

    auto rhs_r = autoderef(cx, rhs0, t0);
    auto rhs = rhs_r.val;
    cx = rhs_r.bcx;

    auto t = autoderefed_ty(t0);
2409 2410 2411 2412

    if (ty.type_is_scalar(t)) {
        ret res(cx, trans_scalar_compare(cx, op, t, lhs, rhs));

2413 2414 2415
    } else if (ty.type_is_structural(t)
               || ty.type_is_sequence(t)) {

2416 2417
        auto scx = new_sub_block_ctxt(cx, "structural compare start");
        auto next = new_sub_block_ctxt(cx, "structural compare end");
2418 2419
        cx.build.Br(scx.llbb);

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
        /*
         * We're doing lexicographic comparison here. We start with the
         * assumption that the two input elements are equal. Depending on
         * operator, this means that the result is either true or false;
         * equality produces 'true' for ==, <= and >=. It produces 'false' for
         * !=, < and >.
         *
         * We then move one element at a time through the structure checking
         * for pairwise element equality. If we have equality, our assumption
         * about overall sequence equality is not modified, so we have to move
         * to the next element.
         *
         * If we do not have pairwise element equality, we have reached an
         * element that 'decides' the lexicographic comparison. So we exit the
         * loop with a flag that indicates the true/false sense of that
         * decision, by testing the element again with the operator we're
         * interested in.
         *
         * When we're lucky, LLVM should be able to fold some of these two
         * tests together (as they're applied to the same operands and in some
         * cases are sometimes redundant). But we don't bother trying to
         * optimize combinations like that, at this level.
         */

2444 2445
        auto flag = scx.build.Alloca(T_i1());

2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
        if (ty.type_is_sequence(t)) {

            // If we hit == all the way through the minimum-shared-length
            // section, default to judging the relative sequence lengths.
            auto len_cmp =
                trans_integral_compare(scx, op, plain_ty(ty.ty_uint),
                                       vec_fill(scx, lhs),
                                       vec_fill(scx, rhs));
            scx.build.Store(len_cmp, flag);

        } else {
            auto T = C_integral(1, T_i1());
            auto F = C_integral(0, T_i1());

            alt (op) {
                // ==, <= and >= default to true if they find == all the way.
                case (ast.eq) { scx.build.Store(T, flag); }
                case (ast.le) { scx.build.Store(T, flag); }
                case (ast.ge) { scx.build.Store(T, flag); }
                case (_) {
                    // < > default to false if they find == all the way.
                    scx.build.Store(F, flag);
                }

2470 2471
            }
        }
2472

2473
        fn inner(@block_ctxt last_cx,
2474
                 bool load_inner,
2475 2476 2477
                 ValueRef flag,
                 ast.binop op,
                 @block_ctxt cx,
2478 2479
                 ValueRef av0,
                 ValueRef bv0,
2480
                 @ty.t t) -> result {
2481

2482 2483 2484
            auto cnt_cx = new_sub_block_ctxt(cx, "continue comparison");
            auto stop_cx = new_sub_block_ctxt(cx, "stop comparison");

2485 2486 2487 2488 2489 2490 2491
            auto av = av0;
            auto bv = bv0;
            if (load_inner) {
                av = load_scalar_or_boxed(cx, av, t);
                bv = load_scalar_or_boxed(cx, bv, t);
            }

2492 2493 2494
            // First 'eq' comparison: if so, continue to next elts.
            auto eq_r = trans_compare(cx, ast.eq, t, av, bv);
            eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
2495

2496 2497 2498 2499
            // Second 'op' comparison: find out how this elt-pair decides.
            auto stop_r = trans_compare(stop_cx, op, t, av, bv);
            stop_r.bcx.build.Store(stop_r.val, flag);
            stop_r.bcx.build.Br(last_cx.llbb);
2500 2501 2502
            ret res(cnt_cx, C_nil());
        }

2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
        auto r;
        if (ty.type_is_structural(t)) {
            r = iter_structural_ty_full(scx, lhs, rhs, t,
                                        bind inner(next, false, flag, op,
                                                   _, _, _, _));
        } else {
            auto lhs_p0 = vec_p0(scx, lhs);
            auto rhs_p0 = vec_p0(scx, rhs);
            auto min_len = umin(scx, vec_fill(scx, lhs), vec_fill(scx, rhs));
            auto rhs_lim = scx.build.GEP(rhs_p0, vec(min_len));
            auto elt_ty = ty.sequence_element_type(t);
            auto elt_llsz_r = size_of(scx, elt_ty);
            scx = elt_llsz_r.bcx;
            r = iter_sequence_raw(scx, lhs, rhs, rhs_lim,
                                  elt_llsz_r.val,
                                  bind inner(next, true, flag, op,
                                             _, _, _, elt_ty));
        }
2521

2522 2523
        r.bcx.build.Br(next.llbb);
        auto v = next.build.Load(flag);
2524 2525
        ret res(next, v);

2526

2527
    } else {
2528
        // FIXME: compare obj, fn by pointer?
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
        cx.fcx.ccx.sess.unimpl("type in trans_compare");
        ret res(cx, C_bool(false));
    }
}

fn trans_scalar_compare(@block_ctxt cx, ast.binop op, @ty.t t,
                        ValueRef lhs, ValueRef rhs) -> ValueRef {
    if (ty.type_is_fp(t)) {
        ret trans_fp_compare(cx, op, t, lhs, rhs);
    } else {
        ret trans_integral_compare(cx, op, t, lhs, rhs);
    }
}

fn trans_fp_compare(@block_ctxt cx, ast.binop op, @ty.t fptype,
                    ValueRef lhs, ValueRef rhs) -> ValueRef {
2545

2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
    auto cmp = lib.llvm.LLVMIntEQ;
    alt (op) {
        // FIXME: possibly use the unordered-or-< predicates here,
        // for now we're only going with ordered-and-< style (no NaNs).
        case (ast.eq) { cmp = lib.llvm.LLVMRealOEQ; }
        case (ast.ne) { cmp = lib.llvm.LLVMRealONE; }
        case (ast.lt) { cmp = lib.llvm.LLVMRealOLT; }
        case (ast.gt) { cmp = lib.llvm.LLVMRealOGT; }
        case (ast.le) { cmp = lib.llvm.LLVMRealOLE; }
        case (ast.ge) { cmp = lib.llvm.LLVMRealOGE; }
    }

    ret cx.build.FCmp(cmp, lhs, rhs);
}

fn trans_integral_compare(@block_ctxt cx, ast.binop op, @ty.t intype,
                          ValueRef lhs, ValueRef rhs) -> ValueRef {
2563 2564 2565 2566 2567
    auto cmp = lib.llvm.LLVMIntEQ;
    alt (op) {
        case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
        case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }

2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
        case (ast.lt) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSLT;
            } else {
                cmp = lib.llvm.LLVMIntULT;
            }
        }
        case (ast.le) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSLE;
            } else {
                cmp = lib.llvm.LLVMIntULE;
            }
        }
        case (ast.gt) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSGT;
            } else {
                cmp = lib.llvm.LLVMIntUGT;
            }
        }
        case (ast.ge) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSGE;
            } else {
                cmp = lib.llvm.LLVMIntUGE;
            }
        }
2596 2597 2598 2599
    }
    ret cx.build.ICmp(cmp, lhs, rhs);
}

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
fn trans_vec_append(@block_ctxt cx, @ty.t t,
                    ValueRef lhs, ValueRef rhs) -> result {

    auto elt_ty = ty.sequence_element_type(t);

    auto skip_null = C_bool(false);
    alt (t.struct) {
        case (ty.ty_str) { skip_null = C_bool(true); }
        case (_) { }
    }

    auto bcx = cx;

    auto llvec_tydesc = get_tydesc(bcx, t);
    bcx = llvec_tydesc.bcx;

    auto llelt_tydesc = get_tydesc(bcx, elt_ty);
    bcx = llelt_tydesc.bcx;

2619 2620 2621 2622 2623 2624 2625 2626
    auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
    auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());

    ret res(bcx, bcx.build.FastCall(cx.fcx.ccx.glues.vec_append_glue,
                                    vec(cx.fcx.lltaskptr,
                                        llvec_tydesc.val,
                                        llelt_tydesc.val,
                                        dst, src, skip_null)));
2627 2628
}

2629 2630
fn trans_vec_add(@block_ctxt cx, @ty.t t,
                 ValueRef lhs, ValueRef rhs) -> result {
2631
    auto r = alloc_ty(cx, t);
2632 2633
    auto tmp = r.val;
    r = copy_ty(r.bcx, INIT, tmp, lhs, t);
2634
    auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
2635 2636 2637
    tmp = load_scalar_or_boxed(bcx, tmp, t);
    find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tmp, t));
    ret res(bcx, tmp);
2638 2639 2640
}


2641
fn trans_eager_binop(@block_ctxt cx, ast.binop op, @ty.t intype,
2642
                     ValueRef lhs, ValueRef rhs) -> result {
2643 2644

    alt (op) {
2645 2646
        case (ast.add) {
            if (ty.type_is_sequence(intype)) {
2647
                ret trans_vec_add(cx, intype, lhs, rhs);
2648 2649 2650
            }
            ret res(cx, cx.build.Add(lhs, rhs));
        }
2651
        case (ast.sub) { ret res(cx, cx.build.Sub(lhs, rhs)); }
2652

2653
        case (ast.mul) { ret res(cx, cx.build.Mul(lhs, rhs)); }
2654 2655
        case (ast.div) {
            if (ty.type_is_signed(intype)) {
2656
                ret res(cx, cx.build.SDiv(lhs, rhs));
2657
            } else {
2658
                ret res(cx, cx.build.UDiv(lhs, rhs));
2659 2660 2661 2662
            }
        }
        case (ast.rem) {
            if (ty.type_is_signed(intype)) {
2663
                ret res(cx, cx.build.SRem(lhs, rhs));
2664
            } else {
2665
                ret res(cx, cx.build.URem(lhs, rhs));
2666 2667
            }
        }
2668

2669 2670 2671 2672 2673 2674
        case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
        case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
        case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
        case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
        case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
        case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
2675
        case (_) {
2676
            ret trans_compare(cx, op, intype, lhs, rhs);
2677 2678 2679 2680 2681
        }
    }
    fail;
}

2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
fn autoderef(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
    let ValueRef v1 = v;
    let @ty.t t1 = t;

    while (true) {
        alt (t1.struct) {
            case (ty.ty_box(?inner)) {
                auto body = cx.build.GEP(v1,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));
                t1 = inner;
                v1 = load_scalar_or_boxed(cx, body, inner);
            }
            case (_) {
                ret res(cx, v1);
            }
        }
    }
}

2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
fn autoderefed_ty(@ty.t t) -> @ty.t {
    let @ty.t t1 = t;

    while (true) {
        alt (t1.struct) {
            case (ty.ty_box(?inner)) {
                t1 = inner;
            }
            case (_) {
                ret t1;
            }
        }
    }
}

2717 2718
fn trans_binary(@block_ctxt cx, ast.binop op,
                @ast.expr a, @ast.expr b) -> result {
2719

2720 2721 2722 2723 2724 2725
    // First couple cases are lazy:

    alt (op) {
        case (ast.and) {
            // Lazy-eval and
            auto lhs_res = trans_expr(cx, a);
2726
            lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2727

2728
            auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2729
            auto rhs_res = trans_expr(rhs_cx, b);
2730
            rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
2731

2732
            auto lhs_false_cx = new_scope_block_ctxt(cx, "lhs false");
2733
            auto lhs_false_res = res(lhs_false_cx, C_bool(false));
2734 2735 2736

            lhs_res.bcx.build.CondBr(lhs_res.val,
                                     rhs_cx.llbb,
2737 2738 2739 2740
                                     lhs_false_cx.llbb);

            ret join_results(cx, T_bool(),
                             vec(lhs_false_res, rhs_res));
2741 2742 2743 2744 2745
        }

        case (ast.or) {
            // Lazy-eval or
            auto lhs_res = trans_expr(cx, a);
2746
            lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2747

2748
            auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2749
            auto rhs_res = trans_expr(rhs_cx, b);
2750
            rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
2751

2752
            auto lhs_true_cx = new_scope_block_ctxt(cx, "lhs true");
2753
            auto lhs_true_res = res(lhs_true_cx, C_bool(true));
2754 2755

            lhs_res.bcx.build.CondBr(lhs_res.val,
2756
                                     lhs_true_cx.llbb,
2757
                                     rhs_cx.llbb);
2758 2759 2760

            ret join_results(cx, T_bool(),
                             vec(lhs_true_res, rhs_res));
2761
        }
2762 2763

        case (_) {
2764 2765
            // Remaining cases are eager:
            auto lhs = trans_expr(cx, a);
2766 2767
            auto lhty = ty.expr_ty(a);
            lhs = autoderef(lhs.bcx, lhs.val, lhty);
2768
            auto rhs = trans_expr(lhs.bcx, b);
2769 2770
            auto rhty = ty.expr_ty(b);
            rhs = autoderef(rhs.bcx, rhs.val, rhty);
2771 2772 2773
            ret trans_eager_binop(rhs.bcx, op,
                                  autoderefed_ty(lhty),
                                  lhs.val, rhs.val);
2774
        }
2775 2776 2777 2778
    }
    fail;
}

2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
fn join_results(@block_ctxt parent_cx,
                TypeRef t,
                vec[result] ins)
    -> result {

    let vec[result] live = vec();
    let vec[ValueRef] vals = vec();
    let vec[BasicBlockRef] bbs = vec();

    for (result r in ins) {
        if (! is_terminated(r.bcx)) {
            live += r;
            vals += r.val;
            bbs += r.bcx.llbb;
        }
    }

    alt (_vec.len[result](live)) {
        case (0u) {
            // No incoming edges are live, so we're in dead-code-land.
            // Arbitrarily pick the first dead edge, since the caller
            // is just going to propagate it outward.
            check (_vec.len[result](ins) >= 1u);
            ret ins.(0);
        }

        case (1u) {
            // Only one incoming edge is live, so we just feed that block
            // onward.
            ret live.(0);
        }
2810 2811

        case (_) { /* fall through */ }
2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
    }

    // We have >1 incoming edges. Make a join block and br+phi them into it.
    auto join_cx = new_sub_block_ctxt(parent_cx, "join");
    for (result r in live) {
        r.bcx.build.Br(join_cx.llbb);
    }
    auto phi = join_cx.build.Phi(t, vals, bbs);
    ret res(join_cx, phi);
}

G
Graydon Hoare 已提交
2823 2824 2825
fn trans_if(@block_ctxt cx, @ast.expr cond, &ast.block thn,
            &vec[tup(@ast.expr, ast.block)] elifs,
            &option.t[ast.block] els) -> result {
2826 2827 2828

    auto cond_res = trans_expr(cx, cond);

2829
    auto then_cx = new_scope_block_ctxt(cx, "then");
2830 2831
    auto then_res = trans_block(then_cx, thn);

2832
    auto else_cx = new_scope_block_ctxt(cx, "else");
2833
    auto else_res = res(else_cx, C_nil());
2834

G
Graydon Hoare 已提交
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
    auto num_elifs = _vec.len[tup(@ast.expr, ast.block)](elifs);
    if (num_elifs > 0u) {
        auto next_elif = elifs.(0u);
        auto next_elifthn = next_elif._0;
        auto next_elifcnd = next_elif._1;
        auto rest_elifs = _vec.shift[tup(@ast.expr, ast.block)](elifs);
        else_res = trans_if(else_cx, next_elifthn, next_elifcnd,
                            rest_elifs, els);
    }

    /* else: FIXME: rustboot has a problem here
       with preconditions inside an else block */
    if (num_elifs == 0u)  {
        alt (els) {
            case (some[ast.block](?eblk)) {
                else_res = trans_block(else_cx, eblk);
            }
            case (_) { /* fall through */ }
2853 2854 2855
        }
    }

2856
    cond_res.bcx.build.CondBr(cond_res.val,
2857 2858
                              then_cx.llbb,
                              else_cx.llbb);
2859 2860 2861 2862

    // FIXME: use inferred type when available.
    ret join_results(cx, T_nil(),
                     vec(then_res, else_res));
2863 2864
}

G
Graydon Hoare 已提交
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
fn trans_for(@block_ctxt cx,
             @ast.decl decl,
             @ast.expr seq,
             &ast.block body) -> result {

    fn inner(@block_ctxt cx,
             @ast.local local, ValueRef curr,
             @ty.t t, ast.block body) -> result {

        auto scope_cx = new_scope_block_ctxt(cx, "for loop scope");
        auto next_cx = new_sub_block_ctxt(cx, "next");

        cx.build.Br(scope_cx.llbb);
        auto local_res = alloc_local(scope_cx, local);
2879
        auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
2880
        scope_cx.cleanups += clean(bind drop_slot(_, local_res.val, t));
2881
        bcx = trans_block(bcx, body).bcx;
G
Graydon Hoare 已提交
2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
        bcx.build.Br(next_cx.llbb);
        ret res(next_cx, C_nil());
    }


    let @ast.local local;
    alt (decl.node) {
        case (ast.decl_local(?loc)) {
            local = loc;
        }
    }

    auto seq_ty = ty.expr_ty(seq);
    auto seq_res = trans_expr(cx, seq);
    ret iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
                      bind inner(_, local, _, _, body));
}

2900 2901 2902 2903
fn trans_for_each(@block_ctxt cx,
                  @ast.decl decl,
                  @ast.expr seq,
                  &ast.block body) -> result {
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930

    /*
     * The translation is a little .. complex here. Code like:
     *
     *    let ty1 p = ...;
     *
     *    let ty1 q = ...;
     *
     *    foreach (ty v in foo(a,b)) { body(p,q,v) }
     *
     *
     * Turns into a something like so (C/Rust mishmash):
     *
     *    type env = { *ty1 p, *ty2 q, ... };
     *
     *    let env e = { &p, &q, ... };
     *
     *    fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
     *
     *    foo([foreach123_body, env*], a, b);
     *
     */

    // Step 1: walk body and figure out which references it makes
    // escape. This could be determined upstream, and probably ought
    // to be so, eventualy. For first cut, skip this. Null env.

2931
    auto env_ty = T_opaque_closure_ptr(cx.fcx.ccx.tn);
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953


    // Step 2: Declare foreach body function.

    // FIXME: possibly support alias-mode here?
    auto decl_ty = plain_ty(ty.ty_nil);
    alt (decl.node) {
        case (ast.decl_local(?local)) {
            decl_ty = node_ann_type(cx.fcx.ccx, local.ann);
        }
    }

    let str s =
        cx.fcx.ccx.names.next("_rust_foreach")
        + sep() + cx.fcx.ccx.path;

    // The 'env' arg entering the body function is a fake env member (as in
    // the env-part of the normal rust calling convention) that actually
    // points to a stack allocated env in this frame. We bundle that env
    // pointer along with the foreach-body-fn pointer into a 'normal' fn pair
    // and pass it in as a first class fn-arg to the iterator.

2954 2955 2956
    auto iter_body_llty = type_of_fn_full(cx.fcx.ccx, ast.proto_fn,
                                          none[TypeRef],
                                          vec(rec(mode=ast.val, ty=decl_ty)),
2957
                                          plain_ty(ty.ty_nil), 0u);
2958

2959 2960
    let ValueRef lliterbody = decl_fastcall_fn(cx.fcx.ccx.llmod,
                                               s, iter_body_llty);
2961 2962 2963 2964

    // FIXME: handle ty params properly.
    let vec[ast.ty_param] ty_params = vec();

2965
    auto fcx = new_fn_ctxt(cx.fcx.ccx, lliterbody);
2966 2967 2968 2969 2970 2971 2972
    auto bcx = new_top_block_ctxt(fcx);

    // FIXME: populate lllocals from llenv here.
    auto res = trans_block(bcx, body);
    res.bcx.build.RetVoid();


2973
    // Step 3: Call iter passing [lliterbody, llenv], plus other args.
2974 2975

    alt (seq.node) {
2976

2977
        case (ast.expr_call(?f, ?args, ?ann)) {
2978

2979 2980 2981 2982 2983 2984 2985
            auto pair = cx.build.Alloca(T_fn_pair(cx.fcx.ccx.tn,
                                                  iter_body_llty));
            auto code_cell = cx.build.GEP(pair,
                                          vec(C_int(0),
                                              C_int(abi.fn_field_code)));
            cx.build.Store(lliterbody, code_cell);

2986 2987
            // log "lliterbody: " + val_str(cx.fcx.ccx.tn, lliterbody);
            ret trans_call(cx, f,
2988
                           some[ValueRef](cx.build.Load(pair)),
2989 2990
                           args,
                           ann);
2991 2992
        }
    }
2993 2994 2995 2996
    fail;
}


2997 2998
fn trans_while(@block_ctxt cx, @ast.expr cond,
               &ast.block body) -> result {
2999

3000 3001
    auto cond_cx = new_scope_block_ctxt(cx, "while cond");
    auto body_cx = new_scope_block_ctxt(cx, "while loop body");
3002
    auto next_cx = new_sub_block_ctxt(cx, "next");
3003 3004

    auto body_res = trans_block(body_cx, body);
3005 3006 3007 3008 3009 3010 3011 3012
    auto cond_res = trans_expr(cond_cx, cond);

    body_res.bcx.build.Br(cond_cx.llbb);
    cond_res.bcx.build.CondBr(cond_res.val,
                              body_cx.llbb,
                              next_cx.llbb);

    cx.build.Br(cond_cx.llbb);
3013 3014 3015
    ret res(next_cx, C_nil());
}

3016 3017
fn trans_do_while(@block_ctxt cx, &ast.block body,
                  @ast.expr cond) -> result {
3018

3019
    auto body_cx = new_scope_block_ctxt(cx, "do-while loop body");
3020
    auto next_cx = new_sub_block_ctxt(cx, "next");
3021 3022

    auto body_res = trans_block(body_cx, body);
3023 3024 3025 3026 3027 3028
    auto cond_res = trans_expr(body_res.bcx, cond);

    cond_res.bcx.build.CondBr(cond_res.val,
                              body_cx.llbb,
                              next_cx.llbb);
    cx.build.Br(body_cx.llbb);
3029 3030 3031
    ret res(next_cx, body_res.val);
}

P
Patrick Walton 已提交
3032 3033
// Pattern matching translation

3034 3035
fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
                   @block_ctxt next_cx) -> result {
P
Patrick Walton 已提交
3036 3037 3038
    alt (pat.node) {
        case (ast.pat_wild(_)) { ret res(cx, llval); }
        case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
3039 3040 3041

        case (ast.pat_lit(?lt, ?ann)) {
            auto lllit = trans_lit(cx.fcx.ccx, *lt, ann);
3042 3043
            auto lltype = ty.ann_to_type(ann);
            auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
3044

3045 3046
            auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
            lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
3047 3048 3049
            ret res(matched_cx, llval);
        }

P
Patrick Walton 已提交
3050
        case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
3051 3052 3053 3054 3055
            auto lltagptr = cx.build.PointerCast(llval,
                T_opaque_tag_ptr(cx.fcx.ccx.tn));

            auto lldiscrimptr = cx.build.GEP(lltagptr,
                                             vec(C_int(0), C_int(0)));
3056
            auto lldiscrim = cx.build.Load(lldiscrimptr);
3057

P
Patrick Walton 已提交
3058 3059 3060
            auto vdef = option.get[ast.variant_def](vdef_opt);
            auto variant_id = vdef._1;
            auto variant_tag = 0;
3061 3062

            auto variants = tag_variants(cx.fcx.ccx, vdef._0);
P
Patrick Walton 已提交
3063
            auto i = 0;
3064 3065
            for (ast.variant v in variants) {
                auto this_variant_id = v.id;
P
Patrick Walton 已提交
3066
                if (variant_id._0 == this_variant_id._0 &&
G
Graydon Hoare 已提交
3067
                    variant_id._1 == this_variant_id._1) {
P
Patrick Walton 已提交
3068 3069 3070 3071 3072 3073 3074
                    variant_tag = i;
                }
                i += 1;
            }

            auto matched_cx = new_sub_block_ctxt(cx, "matched_cx");

3075
            auto lleq = cx.build.ICmp(lib.llvm.LLVMIntEQ, lldiscrim,
P
Patrick Walton 已提交
3076 3077 3078
                                      C_int(variant_tag));
            cx.build.CondBr(lleq, matched_cx.llbb, next_cx.llbb);

3079 3080
            auto ty_params = node_ann_ty_params(ann);

P
Patrick Walton 已提交
3081
            if (_vec.len[@ast.pat](subpats) > 0u) {
3082 3083
                auto llblobptr = matched_cx.build.GEP(lltagptr,
                    vec(C_int(0), C_int(1)));
P
Patrick Walton 已提交
3084 3085
                auto i = 0;
                for (@ast.pat subpat in subpats) {
3086 3087
                    auto rslt = GEP_tag(matched_cx, llblobptr, vdef._0,
                                        vdef._1, ty_params, i);
3088 3089 3090
                    auto llsubvalptr = rslt.val;
                    matched_cx = rslt.bcx;

3091
                    auto llsubval = load_scalar_or_boxed(matched_cx,
G
Graydon Hoare 已提交
3092 3093
                                                         llsubvalptr,
                                                         pat_ty(subpat));
P
Patrick Walton 已提交
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106
                    auto subpat_res = trans_pat_match(matched_cx, subpat,
                                                      llsubval, next_cx);
                    matched_cx = subpat_res.bcx;
                }
            }

            ret res(matched_cx, llval);
        }
    }

    fail;
}

3107 3108
fn trans_pat_binding(@block_ctxt cx, @ast.pat pat, ValueRef llval)
    -> result {
P
Patrick Walton 已提交
3109 3110
    alt (pat.node) {
        case (ast.pat_wild(_)) { ret res(cx, llval); }
3111
        case (ast.pat_lit(_, _)) { ret res(cx, llval); }
P
Patrick Walton 已提交
3112 3113 3114
        case (ast.pat_bind(?id, ?def_id, ?ann)) {
            auto ty = node_ann_type(cx.fcx.ccx, ann);

3115 3116 3117 3118
            auto rslt = alloc_ty(cx, ty);
            auto dst = rslt.val;
            auto bcx = rslt.bcx;

P
Patrick Walton 已提交
3119
            llvm.LLVMSetValueName(dst, _str.buf(id));
3120 3121
            bcx.fcx.lllocals.insert(def_id, dst);
            bcx.cleanups += clean(bind drop_slot(_, dst, ty));
P
Patrick Walton 已提交
3122

3123
            ret copy_ty(bcx, INIT, dst, llval, ty);
P
Patrick Walton 已提交
3124
        }
3125
        case (ast.pat_tag(_, ?subpats, ?vdef_opt, ?ann)) {
P
Patrick Walton 已提交
3126 3127
            if (_vec.len[@ast.pat](subpats) == 0u) { ret res(cx, llval); }

3128 3129 3130 3131 3132 3133 3134
            // Get the appropriate variant for this tag.
            auto vdef = option.get[ast.variant_def](vdef_opt);
            auto variant = tag_variant_with_id(cx.fcx.ccx, vdef._0, vdef._1);

            auto lltagptr = cx.build.PointerCast(llval,
                T_opaque_tag_ptr(cx.fcx.ccx.tn));
            auto llblobptr = cx.build.GEP(lltagptr, vec(C_int(0), C_int(1)));
P
Patrick Walton 已提交
3135

3136 3137
            auto ty_param_substs = node_ann_ty_params(ann);

P
Patrick Walton 已提交
3138 3139 3140
            auto this_cx = cx;
            auto i = 0;
            for (@ast.pat subpat in subpats) {
3141 3142
                auto rslt = GEP_tag(this_cx, llblobptr, vdef._0, vdef._1,
                                    ty_param_substs, i);
3143 3144 3145
                this_cx = rslt.bcx;
                auto llsubvalptr = rslt.val;

3146
                auto llsubval = load_scalar_or_boxed(this_cx, llsubvalptr,
G
Graydon Hoare 已提交
3147
                                                     pat_ty(subpat));
P
Patrick Walton 已提交
3148 3149 3150
                auto subpat_res = trans_pat_binding(this_cx, subpat,
                                                    llsubval);
                this_cx = subpat_res.bcx;
3151
                i += 1;
P
Patrick Walton 已提交
3152 3153 3154 3155 3156 3157 3158
            }

            ret res(this_cx, llval);
        }
    }
}

3159 3160
fn trans_alt(@block_ctxt cx, @ast.expr expr, vec[ast.arm] arms)
    -> result {
P
Patrick Walton 已提交
3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
    auto expr_res = trans_expr(cx, expr);

    auto last_cx = new_sub_block_ctxt(expr_res.bcx, "last");

    auto this_cx = expr_res.bcx;
    for (ast.arm arm in arms) {
        auto next_cx = new_sub_block_ctxt(expr_res.bcx, "next");
        auto match_res = trans_pat_match(this_cx, arm.pat, expr_res.val,
                                         next_cx);

        auto binding_cx = new_scope_block_ctxt(match_res.bcx, "binding");
        match_res.bcx.build.Br(binding_cx.llbb);

        auto binding_res = trans_pat_binding(binding_cx, arm.pat,
                                             expr_res.val);

        auto block_res = trans_block(binding_res.bcx, arm.block);
        if (!is_terminated(block_res.bcx)) {
            block_res.bcx.build.Br(last_cx.llbb);
        }

        this_cx = next_cx;
    }

    // FIXME: This is executed when none of the patterns match; it should fail
    // instead!
    this_cx.build.Br(last_cx.llbb);

    // FIXME: This is very wrong; we should phi together all the arm blocks,
    // since this is an expression.
    ret res(last_cx, C_nil());
}

3194
type generic_info = rec(@ty.t item_type,
3195 3196
                        vec[ValueRef] tydescs);

3197 3198
type lval_result = rec(result res,
                       bool is_mem,
3199
                       option.t[generic_info] generic,
3200 3201 3202 3203 3204
                       option.t[ValueRef] llobj);

fn lval_mem(@block_ctxt cx, ValueRef val) -> lval_result {
    ret rec(res=res(cx, val),
            is_mem=true,
3205
            generic=none[generic_info],
3206 3207 3208 3209 3210 3211
            llobj=none[ValueRef]);
}

fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
    ret rec(res=res(cx, val),
            is_mem=false,
3212
            generic=none[generic_info],
3213 3214
            llobj=none[ValueRef]);
}
3215

3216 3217 3218 3219 3220 3221 3222 3223
fn lval_generic_fn(@block_ctxt cx,
                   ty.ty_params_and_ty tpt,
                   ast.def_id fn_id,
                   &ast.ann ann)
    -> lval_result {

    check (cx.fcx.ccx.fn_pairs.contains_key(fn_id));
    auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(fn_id));
3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236

    auto monoty;
    auto tys;
    alt (ann) {
        case (ast.ann_none) {
            cx.fcx.ccx.sess.bug("no type annotation for path!");
            fail;
        }
        case (ast.ann_type(?monoty_, ?tps)) {
            monoty = monoty_;
            tys = option.get[vec[@ty.t]](tps);
        }
    }
3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254

    if (_vec.len[@ty.t](tys) != 0u) {
        auto bcx = cx;
        let vec[ValueRef] tydescs = vec();
        for (@ty.t t in tys) {
            auto td = get_tydesc(bcx, t);
            bcx = td.bcx;
            append[ValueRef](tydescs, td.val);
        }
        auto gen = rec( item_type = tpt._1,
                        tydescs = tydescs );
        lv = rec(res = res(bcx, lv.res.val),
                 generic = some[generic_info](gen)
                 with lv);
    }
    ret lv;
}

3255
fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
G
Graydon Hoare 已提交
3256
              &ast.ann ann) -> lval_result {
3257 3258 3259 3260 3261
    alt (dopt) {
        case (some[ast.def](?def)) {
            alt (def) {
                case (ast.def_arg(?did)) {
                    check (cx.fcx.llargs.contains_key(did));
3262
                    ret lval_mem(cx, cx.fcx.llargs.get(did));
3263 3264 3265
                }
                case (ast.def_local(?did)) {
                    check (cx.fcx.lllocals.contains_key(did));
3266
                    ret lval_mem(cx, cx.fcx.lllocals.get(did));
G
Graydon Hoare 已提交
3267
                }
P
Patrick Walton 已提交
3268 3269
                case (ast.def_binding(?did)) {
                    check (cx.fcx.lllocals.contains_key(did));
3270
                    ret lval_mem(cx, cx.fcx.lllocals.get(did));
P
Patrick Walton 已提交
3271
                }
3272 3273 3274 3275
                case (ast.def_obj_field(?did)) {
                    check (cx.fcx.llobjfields.contains_key(did));
                    ret lval_mem(cx, cx.fcx.llobjfields.get(did));
                }
3276
                case (ast.def_fn(?did)) {
3277
                    check (cx.fcx.ccx.items.contains_key(did));
3278
                    auto fn_item = cx.fcx.ccx.items.get(did);
3279
                    ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
3280
                }
3281
                case (ast.def_obj(?did)) {
3282 3283 3284
                    check (cx.fcx.ccx.items.contains_key(did));
                    auto fn_item = cx.fcx.ccx.items.get(did);
                    ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
3285
                }
3286
                case (ast.def_variant(?tid, ?vid)) {
3287
                    if (cx.fcx.ccx.fn_pairs.contains_key(vid)) {
3288 3289 3290
                        check (cx.fcx.ccx.items.contains_key(tid));
                        auto tag_item = cx.fcx.ccx.items.get(tid);
                        auto params = ty.item_ty(tag_item)._0;
3291
                        auto fty = plain_ty(ty.ty_nil);
3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
                        alt (tag_item.node) {
                            case (ast.item_tag(_, ?variants, _, _)) {
                                for (ast.variant v in variants) {
                                    if (v.id == vid) {
                                        fty = node_ann_type(cx.fcx.ccx,
                                                            v.ann);
                                    }
                                }
                            }
                        }
                        ret lval_generic_fn(cx, tup(params, fty), vid, ann);
3303
                    } else {
3304 3305
                        // Nullary variant.
                        auto tag_ty = node_ann_type(cx.fcx.ccx, ann);
3306
                        auto lldiscrim_gv = cx.fcx.ccx.discrims.get(vid);
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
                        auto lldiscrim = cx.build.Load(lldiscrim_gv);

                        auto alloc_result = alloc_ty(cx, tag_ty);
                        auto lltagblob = alloc_result.val;
                        auto lltagptr = alloc_result.bcx.build.PointerCast(
                            lltagblob, T_ptr(type_of(cx.fcx.ccx, tag_ty)));

                        auto lldiscrimptr = alloc_result.bcx.build.GEP(
                            lltagptr, vec(C_int(0), C_int(0)));
                        alloc_result.bcx.build.Store(lldiscrim, lldiscrimptr);

                        ret lval_val(alloc_result.bcx, lltagptr);
3319
                    }
3320
                }
3321 3322 3323 3324
                case (ast.def_const(?did)) {
                    check (cx.fcx.ccx.consts.contains_key(did));
                    ret lval_mem(cx, cx.fcx.ccx.consts.get(did));
                }
3325 3326 3327 3328 3329 3330
                case (ast.def_native_fn(?did)) {
                    check (cx.fcx.ccx.native_items.contains_key(did));
                    auto fn_item = cx.fcx.ccx.native_items.get(did);
                    ret lval_generic_fn(cx, ty.native_item_ty(fn_item),
                                        did, ann);
                }
3331 3332
                case (_) {
                    cx.fcx.ccx.sess.unimpl("def variant in trans");
G
Graydon Hoare 已提交
3333 3334 3335
                }
            }
        }
3336
        case (none[ast.def]) {
3337
            cx.fcx.ccx.sess.err("unresolved expr_path in trans");
3338 3339 3340 3341 3342
        }
    }
    fail;
}

3343 3344
fn trans_field(@block_ctxt cx, &ast.span sp, @ast.expr base,
               &ast.ident field, &ast.ann ann) -> lval_result {
3345
    auto r = trans_expr(cx, base);
3346
    auto t = ty.expr_ty(base);
3347 3348
    r = autoderef(r.bcx, r.val, t);
    t = autoderefed_ty(t);
3349 3350 3351
    alt (t.struct) {
        case (ty.ty_tup(?fields)) {
            let uint ix = ty.field_num(cx.fcx.ccx.sess, sp, field);
3352
            auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3353
            ret lval_mem(v.bcx, v.val);
3354
        }
3355 3356
        case (ty.ty_rec(?fields)) {
            let uint ix = ty.field_idx(cx.fcx.ccx.sess, sp, field, fields);
3357
            auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3358
            ret lval_mem(v.bcx, v.val);
3359
        }
3360 3361
        case (ty.ty_obj(?methods)) {
            let uint ix = ty.method_idx(cx.fcx.ccx.sess, sp, field, methods);
3362 3363 3364 3365 3366 3367
            auto vtbl = r.bcx.build.GEP(r.val,
                                        vec(C_int(0),
                                            C_int(abi.obj_field_vtbl)));
            vtbl = r.bcx.build.Load(vtbl);
            auto v =  r.bcx.build.GEP(vtbl, vec(C_int(0),
                                                C_int(ix as int)));
3368 3369 3370

            auto lvo = lval_mem(r.bcx, v);
            ret rec(llobj = some[ValueRef](r.val) with lvo);
3371
        }
3372
        case (_) { cx.fcx.ccx.sess.unimpl("field variant in trans_field"); }
3373 3374 3375 3376
    }
    fail;
}

3377 3378
fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
               @ast.expr idx, &ast.ann ann) -> lval_result {
3379

G
Graydon Hoare 已提交
3380
    auto lv = trans_expr(cx, base);
3381
    lv = autoderef(lv.bcx, lv.val, ty.expr_ty(base));
G
Graydon Hoare 已提交
3382 3383
    auto ix = trans_expr(lv.bcx, idx);
    auto v = lv.val;
3384
    auto bcx = ix.bcx;
3385

3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
    // Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
    auto ix_val;
    auto ix_size = llsize_of_real(cx.fcx.ccx, val_ty(ix.val));
    auto int_size = llsize_of_real(cx.fcx.ccx, T_int());
    if (ix_size < int_size) {
        ix_val = bcx.build.ZExt(ix.val, T_int());
    } else if (ix_size > int_size) {
        ix_val = bcx.build.Trunc(ix.val, T_int());
    } else {
        ix_val = ix.val;
    }

3398 3399
    auto unit_sz = size_of(bcx, node_ann_type(cx.fcx.ccx, ann));
    bcx = unit_sz.bcx;
3400 3401

    auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
3402

3403 3404
    auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
    lim = bcx.build.Load(lim);
3405

3406 3407
    auto bounds_check = bcx.build.ICmp(lib.llvm.LLVMIntULT,
                                       scaled_ix, lim);
3408

3409 3410 3411
    auto fail_cx = new_sub_block_ctxt(bcx, "fail");
    auto next_cx = new_sub_block_ctxt(bcx, "next");
    bcx.build.CondBr(bounds_check, next_cx.llbb, fail_cx.llbb);
3412 3413

    // fail: bad bounds check.
B
Brian Anderson 已提交
3414
    auto fail_res = trans_fail(fail_cx, sp, "bounds check");
3415 3416 3417
    fail_res.bcx.build.Br(next_cx.llbb);

    auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
3418
    auto elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
3419
    ret lval_mem(next_cx, elt);
3420 3421
}

3422 3423 3424 3425
// The additional bool returned indicates whether it's mem (that is
// represented as an alloca or heap, hence needs a 'load' to be used as an
// immediate).

3426
fn trans_lval(@block_ctxt cx, @ast.expr e) -> lval_result {
3427
    alt (e.node) {
3428 3429
        case (ast.expr_path(?p, ?dopt, ?ann)) {
            ret trans_path(cx, p, dopt, ann);
3430 3431 3432 3433
        }
        case (ast.expr_field(?base, ?ident, ?ann)) {
            ret trans_field(cx, e.span, base, ident, ann);
        }
3434 3435 3436
        case (ast.expr_index(?base, ?idx, ?ann)) {
            ret trans_index(cx, e.span, base, idx, ann);
        }
3437
        case (_) { cx.fcx.ccx.sess.unimpl("expr variant in trans_lval"); }
G
Graydon Hoare 已提交
3438 3439 3440 3441
    }
    fail;
}

3442
fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
3443 3444 3445 3446
    auto e_res = trans_expr(cx, e);
    auto llsrctype = val_ty(e_res.val);
    auto t = node_ann_type(cx.fcx.ccx, ann);
    auto lldsttype = type_of(cx.fcx.ccx, t);
3447
    if (!ty.type_is_fp(t)) {
3448 3449
        if (llvm.LLVMGetIntTypeWidth(lldsttype) >
            llvm.LLVMGetIntTypeWidth(llsrctype)) {
3450
            if (ty.type_is_signed(t)) {
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
                // Widening signed cast.
                e_res.val =
                    e_res.bcx.build.SExtOrBitCast(e_res.val,
                                                  lldsttype);
            } else {
                // Widening unsigned cast.
                e_res.val =
                    e_res.bcx.build.ZExtOrBitCast(e_res.val,
                                                  lldsttype);
            }
        } else {
            // Narrowing cast.
            e_res.val =
                e_res.bcx.build.TruncOrBitCast(e_res.val,
                                               lldsttype);
        }
    } else {
        cx.fcx.ccx.sess.unimpl("fp cast");
    }
    ret e_res;
}

3473 3474 3475 3476
fn trans_bind_thunk(@crate_ctxt cx,
                    @ty.t incoming_fty,
                    @ty.t outgoing_fty,
                    vec[option.t[@ast.expr]] args,
3477
                    @ty.t closure_ty,
3478 3479
                    vec[@ty.t] bound_tys,
                    uint ty_param_count) -> ValueRef {
3480 3481 3482
    // Construct a thunk-call with signature incoming_fty, and that copies
    // args forward into a call to outgoing_fty.

3483
    let str s = cx.names.next("_rust_thunk") + sep() + cx.path;
3484 3485 3486
    let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx, incoming_fty));
    let ValueRef llthunk = decl_fastcall_fn(cx.llmod, s, llthunk_ty);

3487
    auto fcx = new_fn_ctxt(cx, llthunk);
3488 3489
    auto bcx = new_top_block_ctxt(fcx);

3490 3491
    auto llclosure_ptr_ty = type_of(cx, plain_ty(ty.ty_box(closure_ty)));
    auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ptr_ty);
3492

3493 3494 3495 3496 3497 3498
    auto lltarget = GEP_tup_like(bcx, closure_ty, llclosure,
                                 vec(0,
                                     abi.box_rc_field_body,
                                     abi.closure_elt_target));
    bcx = lltarget.bcx;
    auto lltargetclosure = bcx.build.GEP(lltarget.val,
3499 3500 3501
                                         vec(C_int(0),
                                             C_int(abi.fn_field_box)));
    lltargetclosure = bcx.build.Load(lltargetclosure);
3502 3503

    auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
3504
    auto outgoing_args = ty.ty_fn_args(outgoing_fty);
3505 3506 3507 3508 3509 3510 3511

    auto llretptr = fcx.llretptr;
    if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
        llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.tn));
    }

    let vec[ValueRef] llargs = vec(llretptr,
3512
                                   fcx.lltaskptr,
3513
                                   lltargetclosure);
3514 3515 3516 3517 3518

    // Copy in the type parameters.
    let uint i = 0u;
    while (i < ty_param_count) {
        auto lltyparam_ptr =
3519 3520 3521 3522 3523 3524 3525
            GEP_tup_like(bcx, closure_ty, llclosure,
                         vec(0,
                             abi.box_rc_field_body,
                             abi.closure_elt_ty_params,
                             (i as int)));
        bcx = lltyparam_ptr.bcx;
        llargs += vec(bcx.build.Load(lltyparam_ptr.val));
3526 3527 3528 3529
        i += 1u;
    }

    let uint a = 2u + i;    // retptr, task ptr, env come first
3530
    let int b = 0;
3531
    let uint outgoing_arg_index = 0u;
3532 3533 3534
    let vec[TypeRef] llout_arg_tys =
        type_of_explicit_args(cx, outgoing_args);

3535
    for (option.t[@ast.expr] arg in args) {
3536 3537 3538 3539

        auto out_arg = outgoing_args.(outgoing_arg_index);
        auto llout_arg_ty = llout_arg_tys.(outgoing_arg_index);

3540 3541 3542 3543
        alt (arg) {

            // Arg provided at binding time; thunk copies it from closure.
            case (some[@ast.expr](_)) {
3544 3545 3546 3547 3548 3549
                auto bound_arg =
                    GEP_tup_like(bcx, closure_ty, llclosure,
                                 vec(0,
                                     abi.box_rc_field_body,
                                     abi.closure_elt_bindings,
                                     b));
3550

3551
                bcx = bound_arg.bcx;
3552 3553 3554 3555 3556 3557 3558 3559 3560 3561
                auto val = bound_arg.val;

                if (out_arg.mode == ast.val) {
                    val = bcx.build.Load(val);
                } else if (ty.count_ty_params(out_arg.ty) > 0u) {
                    check (out_arg.mode == ast.alias);
                    val = bcx.build.PointerCast(val, llout_arg_ty);
                }

                llargs += val;
3562 3563 3564 3565 3566 3567
                b += 1;
            }

            // Arg will be provided when the thunk is invoked.
            case (none[@ast.expr]) {
                let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
3568 3569 3570

                if (ty.count_ty_params(out_arg.ty) > 0u) {
                    check (out_arg.mode == ast.alias);
3571
                    passed_arg = bcx.build.PointerCast(passed_arg,
3572
                                                       llout_arg_ty);
3573
                }
3574

3575 3576 3577 3578
                llargs += passed_arg;
                a += 1u;
            }
        }
3579 3580

        outgoing_arg_index += 0u;
3581 3582 3583
    }

    // FIXME: turn this call + ret into a tail call.
3584
    auto lltargetfn = bcx.build.GEP(lltarget.val,
3585 3586
                                    vec(C_int(0),
                                        C_int(abi.fn_field_code)));
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596

    // Cast the outgoing function to the appropriate type (see the comments in
    // trans_bind below for why this is necessary).
    auto lltargetty = type_of_fn(bcx.fcx.ccx,
                                 ty.ty_fn_proto(outgoing_fty),
                                 outgoing_args,
                                 outgoing_ret_ty,
                                 ty_param_count);
    lltargetfn = bcx.build.PointerCast(lltargetfn, T_ptr(T_ptr(lltargetty)));

3597
    lltargetfn = bcx.build.Load(lltargetfn);
3598

3599
    auto r = bcx.build.FastCall(lltargetfn, llargs);
3600
    bcx.build.RetVoid();
3601 3602 3603 3604

    ret llthunk;
}

3605 3606 3607
fn trans_bind(@block_ctxt cx, @ast.expr f,
              vec[option.t[@ast.expr]] args,
              &ast.ann ann) -> result {
3608 3609 3610 3611
    auto f_res = trans_lval(cx, f);
    if (f_res.is_mem) {
        cx.fcx.ccx.sess.unimpl("re-binding existing function");
    } else {
3612 3613
        let vec[@ast.expr] bound = vec();

3614 3615 3616 3617 3618
        for (option.t[@ast.expr] argopt in args) {
            alt (argopt) {
                case (none[@ast.expr]) {
                }
                case (some[@ast.expr](?e)) {
3619
                    append[@ast.expr](bound, e);
3620 3621 3622
                }
            }
        }
3623 3624

        // Figure out which tydescs we need to pass, if any.
B
Brian Anderson 已提交
3625 3626
        let @ty.t outgoing_fty;
        let vec[ValueRef] lltydescs;
3627 3628 3629
        alt (f_res.generic) {
            case (none[generic_info]) {
                outgoing_fty = ty.expr_ty(f);
B
Brian Anderson 已提交
3630
                lltydescs = vec();
3631 3632 3633 3634 3635 3636 3637 3638 3639
            }
            case (some[generic_info](?ginfo)) {
                outgoing_fty = ginfo.item_type;
                lltydescs = ginfo.tydescs;
            }
        }
        auto ty_param_count = _vec.len[ValueRef](lltydescs);

        if (_vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
3640 3641 3642 3643 3644 3645
            // Trivial 'binding': just return the static pair-ptr.
            ret f_res.res;
        } else {
            auto bcx = f_res.res.bcx;
            auto pair_t = node_type(cx.fcx.ccx, ann);
            auto pair_v = bcx.build.Alloca(pair_t);
3646 3647 3648 3649

            // Translate the bound expressions.
            let vec[@ty.t] bound_tys = vec();
            let vec[ValueRef] bound_vals = vec();
3650
            auto i = 0u;
3651 3652 3653
            for (@ast.expr e in bound) {
                auto arg = trans_expr(bcx, e);
                bcx = arg.bcx;
3654

3655 3656
                append[ValueRef](bound_vals, arg.val);
                append[@ty.t](bound_tys, ty.expr_ty(e));
3657 3658

                i += 1u;
3659 3660 3661
            }

            // Synthesize a closure type.
3662
            let @ty.t bindings_ty = plain_ty(ty.ty_tup(bound_tys));
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680

            // NB: keep this in sync with T_closure_ptr; we're making
            // a ty.t structure that has the same "shape" as the LLVM type
            // it constructs.
            let @ty.t tydesc_ty = plain_ty(ty.ty_type);

            let vec[@ty.t] captured_tys =
                _vec.init_elt[@ty.t](tydesc_ty, ty_param_count);

            let vec[@ty.t] closure_tys =
                vec(tydesc_ty,
                    outgoing_fty,
                    bindings_ty,
                    plain_ty(ty.ty_tup(captured_tys)));

            let @ty.t closure_ty = plain_ty(ty.ty_tup(closure_tys));

            auto r = trans_malloc_boxed(bcx, closure_ty);
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
            auto box = r.val;
            bcx = r.bcx;
            auto rc = bcx.build.GEP(box,
                                    vec(C_int(0),
                                        C_int(abi.box_rc_field_refcnt)));
            auto closure =
                bcx.build.GEP(box,
                              vec(C_int(0),
                                  C_int(abi.box_rc_field_body)));
            bcx.build.Store(C_int(1), rc);

            // Store bindings tydesc.
            auto bound_tydesc =
                bcx.build.GEP(closure,
                              vec(C_int(0),
                                  C_int(abi.closure_elt_tydesc)));
            auto bindings_tydesc = get_tydesc(bcx, bindings_ty);
3698 3699
            bcx = bindings_tydesc.bcx;
            bcx.build.Store(bindings_tydesc.val, bound_tydesc);
3700

3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713
            // Determine the LLVM type for the outgoing function type. This
            // may be different from the type returned by trans_malloc_boxed()
            // since we have more information than that function does;
            // specifically, we know how many type descriptors the outgoing
            // function has, which type_of() doesn't, as only we know which
            // item the function refers to.
            auto llfnty = type_of_fn(bcx.fcx.ccx,
                                     ty.ty_fn_proto(outgoing_fty),
                                     ty.ty_fn_args(outgoing_fty),
                                     ty.ty_fn_ret(outgoing_fty),
                                     ty_param_count);
            auto llclosurety = T_ptr(T_fn_pair(bcx.fcx.ccx.tn, llfnty));

3714 3715
            // Store thunk-target.
            auto bound_target =
3716 3717
                bcx.build.GEP(closure,
                              vec(C_int(0),
3718
                                  C_int(abi.closure_elt_target)));
3719
            auto src = bcx.build.Load(f_res.res.val);
3720
            bound_target = bcx.build.PointerCast(bound_target, llclosurety);
3721
            bcx.build.Store(src, bound_target);
3722

3723
            // Copy expr values into boxed bindings.
3724
            i = 0u;
3725 3726 3727 3728
            auto bindings =
                bcx.build.GEP(closure,
                              vec(C_int(0),
                                  C_int(abi.closure_elt_bindings)));
3729 3730
            for (ValueRef v in bound_vals) {
                auto bound = bcx.build.GEP(bindings,
3731
                                           vec(C_int(0), C_int(i as int)));
3732
                bcx = copy_ty(r.bcx, INIT, bound, v, bound_tys.(i)).bcx;
3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753
                i += 1u;
            }

            // If necessary, copy tydescs describing type parameters into the
            // appropriate slot in the closure.
            alt (f_res.generic) {
                case (none[generic_info]) { /* nothing to do */ }
                case (some[generic_info](?ginfo)) {
                    auto ty_params_slot =
                        bcx.build.GEP(closure,
                                      vec(C_int(0),
                                          C_int(abi.closure_elt_ty_params)));
                    auto i = 0;
                    for (ValueRef td in ginfo.tydescs) {
                        auto ty_param_slot = bcx.build.GEP(ty_params_slot,
                                                           vec(C_int(0),
                                                               C_int(i)));
                        bcx.build.Store(td, ty_param_slot);
                        i += 1;
                    }
                }
3754 3755
            }

3756 3757 3758 3759
            // Make thunk and store thunk-ptr in outer pair's code slot.
            auto pair_code = bcx.build.GEP(pair_v,
                                           vec(C_int(0),
                                               C_int(abi.fn_field_code)));
3760 3761

            let @ty.t pair_ty = node_ann_type(cx.fcx.ccx, ann);
3762

3763
            let ValueRef llthunk =
3764
                trans_bind_thunk(cx.fcx.ccx, pair_ty, outgoing_fty,
3765
                                 args, closure_ty, bound_tys,
3766
                                 ty_param_count);
3767 3768 3769 3770 3771 3772 3773

            bcx.build.Store(llthunk, pair_code);

            // Store box ptr in outer pair's box slot.
            auto pair_box = bcx.build.GEP(pair_v,
                                          vec(C_int(0),
                                              C_int(abi.fn_field_box)));
3774 3775 3776 3777 3778
            bcx.build.Store
                (bcx.build.PointerCast
                 (box,
                  T_opaque_closure_ptr(bcx.fcx.ccx.tn)),
                 pair_box);
3779

3780 3781 3782
            find_scope_cx(cx).cleanups +=
                clean(bind drop_slot(_, pair_v, pair_ty));

3783 3784
            ret res(bcx, pair_v);
        }
3785 3786 3787
    }
}

3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

fn trans_args(@block_ctxt cx,
              ValueRef llenv,
              option.t[ValueRef] llobj,
              option.t[generic_info] gen,
3799
              option.t[ValueRef] lliterbody,
3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
              &vec[@ast.expr] es,
              @ty.t fn_ty)
    -> tup(@block_ctxt, vec[ValueRef], ValueRef) {

    let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
    let vec[ValueRef] llargs = vec();
    let vec[ValueRef] lltydescs = vec();
    let @block_ctxt bcx = cx;


    // Arg 0: Output pointer.
    auto retty = ty.ty_fn_ret(fn_ty);
3812 3813 3814 3815
    auto llretslot_res = alloc_ty(bcx, retty);
    bcx = llretslot_res.bcx;
    auto llretslot = llretslot_res.val;

3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
    alt (gen) {
        case (some[generic_info](?g)) {
            lltydescs = g.tydescs;
            args = ty.ty_fn_args(g.item_type);
            retty = ty.ty_fn_ret(g.item_type);
        }
        case (_) {
        }
    }
    if (ty.type_has_dynamic_size(retty)) {
3826 3827
        llargs += bcx.build.PointerCast(llretslot,
                                        T_typaram_ptr(cx.fcx.ccx.tn));
3828 3829 3830 3831 3832 3833 3834
    } else if (ty.count_ty_params(retty) != 0u) {
        // It's possible that the callee has some generic-ness somewhere in
        // its return value -- say a method signature within an obj or a fn
        // type deep in a structure -- which the caller has a concrete view
        // of. If so, cast the caller's view of the restlot to the callee's
        // view, for the sake of making a type-compatible call.
        llargs += cx.build.PointerCast(llretslot,
3835
                                       T_ptr(type_of(bcx.fcx.ccx, retty)));
3836 3837 3838 3839 3840 3841
    } else {
        llargs += llretslot;
    }


    // Arg 1: Task pointer.
3842
    llargs += bcx.fcx.lltaskptr;
3843 3844 3845 3846 3847 3848 3849

    // Arg 2: Env (closure-bindings / self-obj)
    alt (llobj) {
        case (some[ValueRef](?ob)) {
            // Every object is always found in memory,
            // and not-yet-loaded (as part of an lval x.y
            // doted method-call).
3850
            llargs += bcx.build.Load(ob);
3851 3852 3853 3854 3855 3856 3857 3858 3859
        }
        case (_) {
            llargs += llenv;
        }
    }

    // Args >3: ty_params ...
    llargs += lltydescs;

3860 3861 3862 3863 3864 3865 3866 3867
    // ... then possibly an lliterbody argument.
    alt (lliterbody) {
        case (none[ValueRef]) {}
        case (some[ValueRef](?lli)) {
            llargs += lli;
        }
    }

3868
    // ... then explicit args.
3869 3870 3871 3872 3873 3874

    // First we figure out the caller's view of the types of the arguments.
    // This will be needed if this is a generic call, because the callee has
    // to cast her view of the arguments to the caller's view.
    auto arg_tys = type_of_explicit_args(cx.fcx.ccx, args);

3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915
    auto i = 0u;
    for (@ast.expr e in es) {
        auto mode = args.(i).mode;

        auto val;
        if (ty.type_is_structural(ty.expr_ty(e))) {
            auto re = trans_expr(bcx, e);
            val = re.val;
            bcx = re.bcx;
            if (mode == ast.val) {
                // Until here we've been treating structures by pointer;
                // we are now passing it as an arg, so need to load it.
                val = bcx.build.Load(val);
            }
        } else if (mode == ast.alias) {
            let lval_result lv;
            if (ty.is_lval(e)) {
                lv = trans_lval(bcx, e);
            } else {
                auto r = trans_expr(bcx, e);
                lv = lval_val(r.bcx, r.val);
            }
            bcx = lv.res.bcx;

            if (lv.is_mem) {
                val = lv.res.val;
            } else {
                // Non-mem but we're trying to alias; synthesize an
                // alloca, spill to it and pass its address.
                auto llty = val_ty(lv.res.val);
                auto llptr = lv.res.bcx.build.Alloca(llty);
                lv.res.bcx.build.Store(lv.res.val, llptr);
                val = llptr;
            }

        } else {
            auto re = trans_expr(bcx, e);
            val = re.val;
            bcx = re.bcx;
        }

3916 3917 3918
        if (ty.count_ty_params(args.(i).ty) > 0u) {
            auto lldestty = arg_tys.(i);
            val = bcx.build.PointerCast(val, lldestty);
3919 3920 3921 3922 3923 3924 3925 3926 3927
        }

        llargs += val;
        i += 1u;
    }

    ret tup(bcx, llargs, llretslot);
}

3928
fn trans_call(@block_ctxt cx, @ast.expr f,
3929 3930 3931
              option.t[ValueRef] lliterbody,
              vec[@ast.expr] args,
              &ast.ann ann) -> result {
3932
    auto f_res = trans_lval(cx, f);
3933
    auto faddr = f_res.res.val;
3934
    auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.ccx.tn));
3935 3936 3937 3938 3939 3940 3941 3942 3943

    alt (f_res.llobj) {
        case (some[ValueRef](_)) {
            // It's a vtbl entry.
            faddr = f_res.res.bcx.build.Load(faddr);
        }
        case (none[ValueRef]) {
            // It's a closure.
            auto bcx = f_res.res.bcx;
3944 3945
            auto pair = faddr;
            faddr = bcx.build.GEP(pair, vec(C_int(0),
G
Graydon Hoare 已提交
3946
                                            C_int(abi.fn_field_code)));
3947
            faddr = bcx.build.Load(faddr);
3948

3949 3950 3951 3952
            auto llclosure = bcx.build.GEP(pair,
                                           vec(C_int(0),
                                               C_int(abi.fn_field_box)));
            llenv = bcx.build.Load(llclosure);
3953
        }
3954
    }
3955 3956
    auto fn_ty = ty.expr_ty(f);
    auto ret_ty = ty.ann_to_type(ann);
G
Graydon Hoare 已提交
3957
    auto args_res = trans_args(f_res.res.bcx,
3958
                               llenv, f_res.llobj,
3959
                               f_res.generic,
3960
                               lliterbody,
3961
                               args, fn_ty);
G
Graydon Hoare 已提交
3962

3963
    auto bcx = args_res._0;
3964 3965 3966
    auto llargs = args_res._1;
    auto llretslot = args_res._2;

3967 3968 3969 3970 3971 3972 3973 3974
    /*
    log "calling: " + val_str(cx.fcx.ccx.tn, faddr);

    for (ValueRef arg in llargs) {
        log "arg: " + val_str(cx.fcx.ccx.tn, arg);
    }
    */

3975 3976 3977 3978 3979 3980 3981 3982 3983
    bcx.build.FastCall(faddr, llargs);
    auto retval = C_nil();

    if (!ty.type_is_nil(ret_ty)) {
        retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
        // Retval doesn't correspond to anything really tangible in the frame,
        // but it's a ref all the same, so we put a note here to drop it when
        // we're done in this scope.
        find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
3984
    }
G
Graydon Hoare 已提交
3985

3986
    ret res(bcx, retval);
3987 3988
}

3989 3990
fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
             &ast.ann ann) -> result {
3991 3992 3993 3994 3995 3996
    auto bcx = cx;
    auto t = node_ann_type(bcx.fcx.ccx, ann);
    auto tup_res = alloc_ty(bcx, t);
    auto tup_val = tup_res.val;
    bcx = tup_res.bcx;

3997
    find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tup_val, t));
G
Graydon Hoare 已提交
3998
    let int i = 0;
3999

4000
    for (ast.elt e in elts) {
4001 4002 4003 4004 4005 4006
        auto e_ty = ty.expr_ty(e.expr);
        auto src_res = trans_expr(bcx, e.expr);
        bcx = src_res.bcx;
        auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
        bcx = dst_res.bcx;
        bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
G
Graydon Hoare 已提交
4007 4008
        i += 1;
    }
4009
    ret res(bcx, tup_val);
G
Graydon Hoare 已提交
4010 4011
}

4012 4013
fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
             &ast.ann ann) -> result {
4014 4015 4016 4017
    auto t = node_ann_type(cx.fcx.ccx, ann);
    auto unit_ty = t;
    alt (t.struct) {
        case (ty.ty_vec(?t)) {
G
Graydon Hoare 已提交
4018 4019 4020 4021 4022 4023 4024 4025
            unit_ty = t;
        }
        case (_) {
            cx.fcx.ccx.sess.bug("non-vec type in trans_vec");
        }
    }

    auto llunit_ty = type_of(cx.fcx.ccx, unit_ty);
4026 4027 4028
    auto bcx = cx;
    auto unit_sz = size_of(bcx, unit_ty);
    bcx = unit_sz.bcx;
G
Graydon Hoare 已提交
4029
    auto data_sz = llvm.LLVMConstMul(C_int(_vec.len[@ast.expr](args) as int),
4030
                                     unit_sz.val);
G
Graydon Hoare 已提交
4031 4032

    // FIXME: pass tydesc properly.
4033
    auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)));
4034
    bcx = sub.bcx;
G
Graydon Hoare 已提交
4035

4036
    auto llty = type_of(bcx.fcx.ccx, t);
4037
    auto vec_val = vi2p(bcx, sub.val, llty);
4038
    find_scope_cx(bcx).cleanups += clean(bind drop_ty(_, vec_val, t));
G
Graydon Hoare 已提交
4039

4040 4041 4042 4043 4044 4045
    auto body = bcx.build.GEP(vec_val, vec(C_int(0),
                                           C_int(abi.vec_elt_data)));

    auto pseudo_tup_ty =
        plain_ty(ty.ty_tup(_vec.init_elt[@ty.t](unit_ty,
                                                _vec.len[@ast.expr](args))));
G
Graydon Hoare 已提交
4046
    let int i = 0;
4047

G
Graydon Hoare 已提交
4048
    for (@ast.expr e in args) {
4049 4050 4051 4052 4053
        auto src_res = trans_expr(bcx, e);
        bcx = src_res.bcx;
        auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
        bcx = dst_res.bcx;
        bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, unit_ty).bcx;
G
Graydon Hoare 已提交
4054 4055
        i += 1;
    }
4056 4057 4058
    auto fill = bcx.build.GEP(vec_val,
                              vec(C_int(0), C_int(abi.vec_elt_fill)));
    bcx.build.Store(data_sz, fill);
4059

4060
    ret res(bcx, vec_val);
G
Graydon Hoare 已提交
4061 4062
}

4063
fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
4064 4065
             option.t[@ast.expr] base, &ast.ann ann) -> result {

4066 4067 4068 4069 4070 4071 4072
    auto bcx = cx;
    auto t = node_ann_type(bcx.fcx.ccx, ann);
    auto llty = type_of(bcx.fcx.ccx, t);
    auto rec_res = alloc_ty(bcx, t);
    auto rec_val = rec_res.val;
    bcx = rec_res.bcx;

4073
    find_scope_cx(cx).cleanups += clean(bind drop_ty(_, rec_val, t));
4074
    let int i = 0;
4075

G
Graydon Hoare 已提交
4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
    auto base_val = C_nil();

    alt (base) {
        case (none[@ast.expr]) { }
        case (some[@ast.expr](?bexp)) {
            auto base_res = trans_expr(bcx, bexp);
            bcx = base_res.bcx;
            base_val = base_res.val;
        }
    }

    let vec[ty.field] ty_fields = vec();
    alt (t.struct) {
        case (ty.ty_rec(?flds)) { ty_fields = flds; }
    }

    for (ty.field tf in ty_fields) {
        auto e_ty = tf.ty;
4094 4095
        auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
        bcx = dst_res.bcx;
G
Graydon Hoare 已提交
4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113

        auto expr_provided = false;
        auto src_res = res(bcx, C_nil());

        for (ast.field f in fields) {
            if (_str.eq(f.ident, tf.ident)) {
                expr_provided = true;
                src_res = trans_expr(bcx, f.expr);
            }
        }
        if (!expr_provided) {
            src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
            src_res = res(src_res.bcx,
                          load_scalar_or_boxed(bcx, src_res.val, e_ty));
        }

        bcx = src_res.bcx;
        bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
4114 4115
        i += 1;
    }
4116
    ret res(bcx, rec_val);
4117 4118
}

G
Graydon Hoare 已提交
4119

G
Graydon Hoare 已提交
4120

4121
fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
4122
    alt (e.node) {
4123
        case (ast.expr_lit(?lit, ?ann)) {
4124
            ret res(cx, trans_lit(cx.fcx.ccx, *lit, ann));
4125 4126
        }

4127
        case (ast.expr_unary(?op, ?x, ?ann)) {
4128
            ret trans_unary(cx, op, x, ann);
4129 4130
        }

P
Patrick Walton 已提交
4131
        case (ast.expr_binary(?op, ?x, ?y, _)) {
4132
            ret trans_binary(cx, op, x, y);
4133
        }
4134

G
Graydon Hoare 已提交
4135 4136
        case (ast.expr_if(?cond, ?thn, ?elifs, ?els, _)) {
            ret trans_if(cx, cond, thn, elifs, els);
4137 4138
        }

G
Graydon Hoare 已提交
4139 4140 4141 4142
        case (ast.expr_for(?decl, ?seq, ?body, _)) {
            ret trans_for(cx, decl, seq, body);
        }

4143 4144 4145 4146
        case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
            ret trans_for_each(cx, decl, seq, body);
        }

4147
        case (ast.expr_while(?cond, ?body, _)) {
4148
            ret trans_while(cx, cond, body);
4149 4150
        }

4151
        case (ast.expr_do_while(?body, ?cond, _)) {
4152
            ret trans_do_while(cx, body, cond);
4153 4154
        }

P
Patrick Walton 已提交
4155 4156 4157 4158
        case (ast.expr_alt(?expr, ?arms, _)) {
            ret trans_alt(cx, expr, arms);
        }

P
Patrick Walton 已提交
4159
        case (ast.expr_block(?blk, _)) {
4160
            auto sub_cx = new_scope_block_ctxt(cx, "block-expr body");
4161
            auto next_cx = new_sub_block_ctxt(cx, "next");
4162 4163 4164 4165 4166 4167 4168
            auto sub = trans_block(sub_cx, blk);

            cx.build.Br(sub_cx.llbb);
            sub.bcx.build.Br(next_cx.llbb);

            ret res(next_cx, sub.val);
        }
4169

4170
        case (ast.expr_assign(?dst, ?src, ?ann)) {
4171
            auto lhs_res = trans_lval(cx, dst);
4172 4173
            check (lhs_res.is_mem);
            auto rhs_res = trans_expr(lhs_res.res.bcx, src);
4174
            auto t = node_ann_type(cx.fcx.ccx, ann);
G
Graydon Hoare 已提交
4175
            // FIXME: calculate copy init-ness in typestate.
4176 4177
            ret copy_ty(rhs_res.bcx, DROP_EXISTING,
                        lhs_res.res.val, rhs_res.val, t);
4178
        }
G
Graydon Hoare 已提交
4179

4180 4181 4182
        case (ast.expr_assign_op(?op, ?dst, ?src, ?ann)) {
            auto t = node_ann_type(cx.fcx.ccx, ann);
            auto lhs_res = trans_lval(cx, dst);
4183
            check (lhs_res.is_mem);
4184
            auto lhs_val = load_scalar_or_boxed(lhs_res.res.bcx,
G
Graydon Hoare 已提交
4185
                                                lhs_res.res.val, t);
4186
            auto rhs_res = trans_expr(lhs_res.res.bcx, src);
4187 4188
            auto v = trans_eager_binop(rhs_res.bcx, op, t,
                                       lhs_val, rhs_res.val);
4189
            // FIXME: calculate copy init-ness in typestate.
4190 4191
            ret copy_ty(v.bcx, DROP_EXISTING,
                        lhs_res.res.val, v.val, t);
4192 4193
        }

4194 4195 4196 4197
        case (ast.expr_bind(?f, ?args, ?ann)) {
            ret trans_bind(cx, f, args, ann);
        }

G
Graydon Hoare 已提交
4198
        case (ast.expr_call(?f, ?args, ?ann)) {
4199
            ret trans_call(cx, f, none[ValueRef], args, ann);
4200 4201
        }

4202
        case (ast.expr_cast(?e, _, ?ann)) {
4203
            ret trans_cast(cx, e, ann);
4204
        }
G
Graydon Hoare 已提交
4205

G
Graydon Hoare 已提交
4206 4207 4208 4209
        case (ast.expr_vec(?args, ?ann)) {
            ret trans_vec(cx, args, ann);
        }

G
Graydon Hoare 已提交
4210 4211 4212
        case (ast.expr_tup(?args, ?ann)) {
            ret trans_tup(cx, args, ann);
        }
G
Graydon Hoare 已提交
4213

4214 4215
        case (ast.expr_rec(?args, ?base, ?ann)) {
            ret trans_rec(cx, args, base, ann);
4216 4217
        }

4218
        case (ast.expr_ext(_, _, _, ?expanded, _)) {
4219
            ret trans_expr(cx, expanded);
4220 4221
        }

4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237
        case (ast.expr_fail) {
            ret trans_fail(cx, e.span, "explicit failure");
        }

        case (ast.expr_log(?a)) {
            ret trans_log(cx, a);
        }

        case (ast.expr_check_expr(?a)) {
            ret trans_check_expr(cx, a);
        }

        case (ast.expr_ret(?e)) {
            ret trans_ret(cx, e);
        }

4238
        case (ast.expr_put(?e)) {
4239 4240 4241
            ret trans_put(cx, e);
        }

4242 4243 4244 4245
        case (ast.expr_be(?e)) {
            ret trans_be(cx, e);
        }

4246 4247
        // lval cases fall through to trans_lval and then
        // possibly load the result (if it's non-structural).
G
Graydon Hoare 已提交
4248

4249
        case (_) {
4250
            auto t = ty.expr_ty(e);
4251
            auto sub = trans_lval(cx, e);
4252
            ret res(sub.res.bcx,
4253
                    load_scalar_or_boxed(sub.res.bcx, sub.res.val, t));
4254
        }
4255
    }
4256
    cx.fcx.ccx.sess.unimpl("expr variant in trans_expr");
4257 4258 4259
    fail;
}

4260
// We pass structural values around the compiler "by pointer" and
4261 4262
// non-structural values (scalars and boxes) "by value". This function selects
// whether to load a pointer or pass it.
4263

4264 4265 4266
fn load_scalar_or_boxed(@block_ctxt cx,
                        ValueRef v,
                        @ty.t t) -> ValueRef {
4267
    if (ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t)) {
4268
        ret cx.build.Load(v);
4269 4270
    } else {
        ret v;
4271 4272 4273
    }
}

4274
fn trans_log(@block_ctxt cx, @ast.expr e) -> result {
4275

4276
    auto sub = trans_expr(cx, e);
4277
    auto e_ty = ty.expr_ty(e);
4278
    alt (e_ty.struct) {
4279
        case (ty.ty_str) {
4280
            auto v = vp2i(sub.bcx, sub.val);
4281 4282 4283
            ret trans_upcall(sub.bcx,
                             "upcall_log_str",
                             vec(v));
4284 4285
        }
        case (_) {
4286 4287 4288
            ret trans_upcall(sub.bcx,
                             "upcall_log_int",
                             vec(sub.val));
4289 4290
        }
    }
4291
    fail;
4292 4293
}

4294
fn trans_check_expr(@block_ctxt cx, @ast.expr e) -> result {
4295 4296 4297
    auto cond_res = trans_expr(cx, e);

    // FIXME: need pretty-printer.
B
Brian Anderson 已提交
4298
    auto expr_str = "<expr>";
4299
    auto fail_cx = new_sub_block_ctxt(cx, "fail");
B
Brian Anderson 已提交
4300
    auto fail_res = trans_fail(fail_cx, e.span, expr_str);
4301

4302
    auto next_cx = new_sub_block_ctxt(cx, "next");
4303 4304 4305 4306 4307 4308 4309
    fail_res.bcx.build.Br(next_cx.llbb);
    cond_res.bcx.build.CondBr(cond_res.val,
                              next_cx.llbb,
                              fail_cx.llbb);
    ret res(next_cx, C_nil());
}

B
Brian Anderson 已提交
4310 4311 4312 4313 4314 4315 4316 4317 4318
fn trans_fail(@block_ctxt cx, common.span sp, str fail_str) -> result {
    auto V_fail_str = p2i(C_cstr(cx.fcx.ccx, fail_str));
    auto V_filename = p2i(C_cstr(cx.fcx.ccx, sp.filename));
    auto V_line = sp.lo.line as int;
    auto args = vec(V_fail_str, V_filename, C_int(V_line));

    ret trans_upcall(cx, "upcall_fail", args);
}

4319
fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348
    auto llcallee = C_nil();
    auto llenv = C_nil();

    alt (cx.fcx.lliterbody) {
        case (some[ValueRef](?lli)) {
            auto slot = cx.build.Alloca(val_ty(lli));
            cx.build.Store(lli, slot);

            llcallee = cx.build.GEP(slot, vec(C_int(0),
                                              C_int(abi.fn_field_code)));
            llcallee = cx.build.Load(llcallee);

            llenv = cx.build.GEP(slot, vec(C_int(0),
                                           C_int(abi.fn_field_box)));
            llenv = cx.build.Load(llenv);
        }
    }
    auto bcx = cx;
    auto dummy_retslot = bcx.build.Alloca(T_nil());
    let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
    alt (e) {
        case (none[@ast.expr]) { }
        case (some[@ast.expr](?x)) {
            auto r = trans_expr(bcx, x);
            llargs += r.val;
            bcx = r.bcx;
        }
    }
    ret res(bcx, bcx.build.FastCall(llcallee, llargs));
4349 4350
}

4351 4352 4353 4354
fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
    auto bcx = cx;
    auto val = C_nil();

4355 4356
    alt (e) {
        case (some[@ast.expr](?x)) {
4357
            auto t = ty.expr_ty(x);
4358 4359 4360
            auto r = trans_expr(cx, x);
            bcx = r.bcx;
            val = r.val;
4361
            bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
4362
        }
4363
        case (_) { /* fall through */  }
4364 4365
    }

4366 4367
    // Run all cleanups and back out.
    let bool more_cleanups = true;
4368
    auto cleanup_cx = cx;
4369
    while (more_cleanups) {
4370
        bcx = trans_block_cleanups(bcx, cleanup_cx);
4371
        alt (cleanup_cx.parent) {
4372
            case (parent_some(?b)) {
4373
                cleanup_cx = b;
4374 4375 4376 4377 4378 4379 4380
            }
            case (parent_none) {
                more_cleanups = false;
            }
        }
    }

4381 4382
    bcx.build.RetVoid();
    ret res(bcx, C_nil());
4383 4384
}

4385
fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
4386
    // FIXME: This should be a typestate precondition
4387
    check (ast.is_call_expr(e));
4388 4389
    // FIXME: Turn this into a real tail call once
    // calling convention issues are settled
4390 4391 4392
    ret trans_ret(cx, some(e));
}

4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406
fn init_local(@block_ctxt cx, @ast.local local) -> result {

    // Make a note to drop this slot on the way out.
    check (cx.fcx.lllocals.contains_key(local.id));
    auto llptr = cx.fcx.lllocals.get(local.id);
    auto ty = node_ann_type(cx.fcx.ccx, local.ann);
    auto bcx = cx;

    find_scope_cx(cx).cleanups +=
        clean(bind drop_slot(_, llptr, ty));

    alt (local.init) {
        case (some[@ast.expr](?e)) {
            auto sub = trans_expr(bcx, e);
4407
            bcx = copy_ty(sub.bcx, INIT, llptr, sub.val, ty).bcx;
4408 4409 4410
        }
        case (_) {
            if (middle.ty.type_has_dynamic_size(ty)) {
4411 4412
                auto llsz = size_of(bcx, ty);
                bcx = call_bzero(llsz.bcx, llptr, llsz.val).bcx;
4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425

            } else {
                auto llty = type_of(bcx.fcx.ccx, ty);
                auto null = lib.llvm.llvm.LLVMConstNull(llty);
                bcx.build.Store(null, llptr);
            }
        }
    }
    ret res(bcx, llptr);
}

fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
    auto bcx = cx;
4426
    alt (s.node) {
4427
        case (ast.stmt_expr(?e)) {
4428
            bcx = trans_expr(cx, e).bcx;
4429
        }
4430

4431 4432 4433
        case (ast.stmt_decl(?d)) {
            alt (d.node) {
                case (ast.decl_local(?local)) {
4434
                    bcx = init_local(bcx, local).bcx;
4435
                }
G
Graydon Hoare 已提交
4436 4437 4438
                case (ast.decl_item(?i)) {
                    trans_item(cx.fcx.ccx, *i);
                }
4439 4440
            }
        }
4441
        case (_) {
4442
            cx.fcx.ccx.sess.unimpl("stmt variant");
4443 4444
        }
    }
4445
    ret res(bcx, C_nil());
4446 4447
}

4448
fn new_builder(BasicBlockRef llbb) -> builder {
4449 4450 4451 4452 4453
    let BuilderRef llbuild = llvm.LLVMCreateBuilder();
    llvm.LLVMPositionBuilderAtEnd(llbuild, llbb);
    ret builder(llbuild);
}

4454 4455
// You probably don't want to use this one. See the
// next three functions instead.
4456
fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
4457
                  block_kind kind,
4458
                  str name) -> @block_ctxt {
4459
    let vec[cleanup] cleanups = vec();
4460
    let BasicBlockRef llbb =
4461
        llvm.LLVMAppendBasicBlock(cx.llfn,
4462
                                  _str.buf(cx.ccx.names.next(name)));
4463

4464
    ret @rec(llbb=llbb,
4465
             build=new_builder(llbb),
4466
             parent=parent,
4467
             kind=kind,
4468
             mutable cleanups=cleanups,
4469 4470 4471
             fcx=cx);
}

4472 4473
// Use this when you're at the top block of a function or the like.
fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
4474 4475 4476 4477 4478 4479 4480
    auto cx = new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
                             "function top level");

    // FIXME: hack to give us some spill room to make up for an LLVM
    // bug where it destroys its own callee-saves.
    cx.build.Alloca(T_array(T_int(), 10u));
    ret cx;
4481
}
4482

4483 4484
// Use this when you're at a curly-brace or similar lexical scope.
fn new_scope_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
4485
    ret new_block_ctxt(bcx.fcx, parent_some(bcx), SCOPE_BLOCK, n);
4486 4487
}

4488
// Use this when you're making a general CFG BB within a scope.
4489
fn new_sub_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
4490
    ret new_block_ctxt(bcx.fcx, parent_some(bcx), NON_SCOPE_BLOCK, n);
4491
}
4492

4493

4494 4495
fn trans_block_cleanups(@block_ctxt cx,
                        @block_ctxt cleanup_cx) -> @block_ctxt {
4496
    auto bcx = cx;
4497

4498
    if (cleanup_cx.kind != SCOPE_BLOCK) {
4499 4500 4501
        check (_vec.len[cleanup](cleanup_cx.cleanups) == 0u);
    }

4502 4503 4504 4505
    auto i = _vec.len[cleanup](cleanup_cx.cleanups);
    while (i > 0u) {
        i -= 1u;
        auto c = cleanup_cx.cleanups.(i);
4506
        alt (c) {
4507
            case (clean(?cfn)) {
4508
                bcx = cfn(bcx).bcx;
4509 4510 4511
            }
        }
    }
4512 4513 4514
    ret bcx;
}

4515 4516 4517 4518 4519 4520 4521 4522 4523 4524
iter block_locals(&ast.block b) -> @ast.local {
    // FIXME: putting from inside an iter block doesn't work, so we can't
    // use the index here.
    for (@ast.stmt s in b.node.stmts) {
        alt (s.node) {
            case (ast.stmt_decl(?d)) {
                alt (d.node) {
                    case (ast.decl_local(?local)) {
                        put local;
                    }
4525
                    case (_) { /* fall through */ }
4526 4527
                }
            }
4528
            case (_) { /* fall through */ }
4529 4530 4531 4532
        }
    }
}

4533
fn alloc_ty(@block_ctxt cx, @ty.t t) -> result {
4534
    auto val = C_int(0);
4535
    auto bcx = cx;
4536
    if (ty.type_has_dynamic_size(t)) {
4537 4538 4539
        auto n = size_of(bcx, t);
        bcx = n.bcx;
        val = bcx.build.ArrayAlloca(T_i8(), n.val);
4540
    } else {
4541
        val = bcx.build.Alloca(type_of(cx.fcx.ccx, t));
4542
    }
4543
    ret res(bcx, val);
4544 4545
}

4546 4547 4548 4549 4550 4551 4552
fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
    auto t = node_ann_type(cx.fcx.ccx, local.ann);
    auto r = alloc_ty(cx, t);
    r.bcx.fcx.lllocals.insert(local.id, r.val);
    ret r;
}

4553
fn trans_block(@block_ctxt cx, &ast.block b) -> result {
4554 4555
    auto bcx = cx;

4556
    for each (@ast.local local in block_locals(b)) {
4557
        bcx = alloc_local(bcx, local).bcx;
4558
    }
4559
    auto r = res(bcx, C_nil());
4560

4561
    for (@ast.stmt s in b.node.stmts) {
4562 4563
        r = trans_stmt(bcx, *s);
        bcx = r.bcx;
4564 4565 4566 4567 4568
        // If we hit a terminator, control won't go any further so
        // we're in dead-code land. Stop here.
        if (is_terminated(bcx)) {
            ret r;
        }
4569
    }
4570

4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583
    alt (b.node.expr) {
        case (some[@ast.expr](?e)) {
            r = trans_expr(bcx, e);
            bcx = r.bcx;
            if (is_terminated(bcx)) {
                ret r;
            }
        }
        case (none[@ast.expr]) {
            r = res(bcx, C_nil());
        }
    }

4584
    bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
4585
    ret res(bcx, r.val);
4586 4587
}

4588 4589 4590 4591 4592 4593 4594
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

4595
fn new_fn_ctxt(@crate_ctxt cx,
4596
               ValueRef llfndecl) -> @fn_ctxt {
4597

4598 4599 4600
    let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
    let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
    let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
4601 4602

    let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
4603 4604
    let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
    let hashmap[ast.def_id, ValueRef] lllocals = new_def_hash[ValueRef]();
4605
    let hashmap[ast.def_id, ValueRef] lltydescs = new_def_hash[ValueRef]();
4606

4607
    ret @rec(llfn=llfndecl,
4608
             lltaskptr=lltaskptr,
4609 4610
             llenv=llenv,
             llretptr=llretptr,
4611
             mutable llself=none[ValueRef],
4612
             mutable lliterbody=none[ValueRef],
4613
             llargs=llargs,
4614
             llobjfields=llobjfields,
4615
             lllocals=lllocals,
4616
             lltydescs=lltydescs,
4617
             ccx=cx);
4618 4619
}

4620 4621 4622 4623 4624 4625 4626
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

4627
fn create_llargs_for_fn_args(&@fn_ctxt cx,
4628
                             ast.proto proto,
4629
                             option.t[TypeRef] ty_self,
4630
                             @ty.t ret_ty,
4631
                             &vec[ast.arg] args,
4632
                             &vec[ast.ty_param] ty_params) {
4633 4634 4635 4636 4637 4638 4639 4640 4641 4642

    alt (ty_self) {
        case (some[TypeRef](_)) {
            cx.llself = some[ValueRef](cx.llenv);
        }
        case (_) {
        }
    }

    auto arg_n = 3u;
4643

4644 4645 4646 4647 4648 4649 4650
    if (ty_self == none[TypeRef]) {
        for (ast.ty_param tp in ty_params) {
            auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
            check (llarg as int != 0);
            cx.lltydescs.insert(tp.id, llarg);
            arg_n += 1u;
        }
4651 4652
    }

4653
    if (proto == ast.proto_iter) {
4654 4655 4656 4657 4658
        auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
        check (llarg as int != 0);
        cx.lliterbody = some[ValueRef](llarg);
        arg_n += 1u;
    }
4659

4660 4661 4662 4663 4664 4665 4666 4667
    for (ast.arg arg in args) {
        auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
        check (llarg as int != 0);
        cx.llargs.insert(arg.id, llarg);
        arg_n += 1u;
    }
}

4668 4669 4670 4671
// Recommended LLVM style, strange though this is, is to copy from args to
// allocas immediately upon entry; this permits us to GEP into structures we
// were passed and whatnot. Apparently mem2reg will mop up.

4672 4673 4674 4675
fn copy_args_to_allocas(@block_ctxt cx,
                        option.t[TypeRef] ty_self,
                        vec[ast.arg] args,
                        vec[ty.arg] arg_tys) {
4676 4677 4678

    let uint arg_n = 0u;

4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692
    alt (cx.fcx.llself) {
        case (some[ValueRef](?self_v)) {
            alt (ty_self) {
                case (some[TypeRef](?self_t)) {
                    auto alloca = cx.build.Alloca(self_t);
                    cx.build.Store(self_v, alloca);
                    cx.fcx.llself = some[ValueRef](alloca);
                }
            }
        }
        case (_) {
        }
    }

4693
    for (ast.arg aarg in args) {
4694 4695 4696 4697 4698 4699 4700 4701 4702
        if (aarg.mode != ast.alias) {
            auto arg_t = type_of_arg(cx.fcx.ccx, arg_tys.(arg_n));
            auto alloca = cx.build.Alloca(arg_t);
            auto argval = cx.fcx.llargs.get(aarg.id);
            cx.build.Store(argval, alloca);
            // Overwrite the llargs entry for this arg with its alloca.
            cx.fcx.llargs.insert(aarg.id, alloca);
        }

4703 4704 4705 4706
        arg_n += 1u;
    }
}

4707 4708 4709 4710 4711
fn is_terminated(@block_ctxt cx) -> bool {
    auto inst = llvm.LLVMGetLastInstruction(cx.llbb);
    ret llvm.LLVMIsATerminatorInst(inst) as int != 0;
}

4712 4713
fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
    alt (ty.ann_to_type(ann).struct) {
4714
        case (ty.ty_fn(_, ?arg_tys, _)) {
4715 4716 4717 4718 4719 4720
            ret arg_tys;
        }
    }
    fail;
}

4721 4722
fn ret_ty_of_fn_ty(@ty.t t) -> @ty.t {
    alt (t.struct) {
4723
        case (ty.ty_fn(_, _, ?ret_ty)) {
4724 4725 4726 4727 4728 4729
            ret ret_ty;
        }
    }
    fail;
}

4730 4731 4732 4733 4734

fn ret_ty_of_fn(ast.ann ann) -> @ty.t {
    ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
}

4735 4736
fn populate_fn_ctxt_from_llself(@block_ctxt cx, ValueRef llself) -> result {
    auto bcx = cx;
4737

4738
    let vec[@ty.t] field_tys = vec();
4739

4740 4741
    for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
        field_tys += vec(node_ann_type(bcx.fcx.ccx, f.ann));
4742 4743
    }

4744 4745 4746 4747 4748 4749
    // Synthesize a tuple type for the fields so that GEP_tup_like() can work
    // its magic.
    auto fields_tup_ty = ty.plain_ty(ty.ty_tup(field_tys));

    auto n_typarams = _vec.len[ast.ty_param](bcx.fcx.ccx.obj_typarams);
    let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.ccx.tn, n_typarams);
4750 4751

    auto box_cell =
4752 4753 4754
        bcx.build.GEP(llself,
                      vec(C_int(0),
                          C_int(abi.obj_field_box)));
4755

4756
    auto box_ptr = bcx.build.Load(box_cell);
4757

4758
    box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
4759

4760
    auto obj_typarams = bcx.build.GEP(box_ptr,
4761 4762 4763 4764
                                     vec(C_int(0),
                                         C_int(abi.box_rc_field_body),
                                         C_int(abi.obj_body_elt_typarams)));

4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779
    // The object fields immediately follow the type parameters, so we skip
    // over them to get the pointer.
    auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
        llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));

    // If we can (i.e. the type is statically sized), then cast the resulting
    // fields pointer to the appropriate LLVM type. If not, just leave it as
    // i8 *.
    if (!ty.type_has_dynamic_size(fields_tup_ty)) {
        auto llfields_ty = type_of(bcx.fcx.ccx, fields_tup_ty);
        obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
    } else {
        obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
    }

4780 4781

    let int i = 0;
4782

4783 4784 4785 4786 4787 4788
    for (ast.ty_param p in bcx.fcx.ccx.obj_typarams) {
        let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
                                               vec(C_int(0),
                                                   C_int(i)));
        lltyparam = bcx.build.Load(lltyparam);
        bcx.fcx.lltydescs.insert(p.id, lltyparam);
4789 4790 4791 4792
        i += 1;
    }

    i = 0;
4793 4794 4795 4796
    for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
        auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
        bcx = rslt.bcx;
        auto llfield = rslt.val;
4797 4798 4799
        cx.fcx.llobjfields.insert(f.id, llfield);
        i += 1;
    }
4800 4801

    ret res(bcx, C_nil());
4802 4803
}

4804 4805 4806
fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
            option.t[TypeRef] ty_self,
            &vec[ast.ty_param] ty_params, &ast.ann ann) {
4807

4808 4809 4810
    auto llfndecl = cx.item_ids.get(fid);
    cx.item_names.insert(cx.path, llfndecl);

4811
    auto fcx = new_fn_ctxt(cx, llfndecl);
4812
    create_llargs_for_fn_args(fcx, f.proto,
4813
                              ty_self, ret_ty_of_fn(ann),
4814
                              f.decl.inputs, ty_params);
4815
    auto bcx = new_top_block_ctxt(fcx);
4816

4817
    copy_args_to_allocas(bcx, ty_self, f.decl.inputs,
4818 4819 4820 4821
                         arg_tys_of_fn(ann));

    alt (fcx.llself) {
        case (some[ValueRef](?llself)) {
4822
            bcx = populate_fn_ctxt_from_llself(bcx, llself).bcx;
4823 4824 4825 4826
        }
        case (_) {
        }
    }
4827

4828 4829
    auto res = trans_block(bcx, f.body);
    if (!is_terminated(res.bcx)) {
4830 4831
        // FIXME: until LLVM has a unit type, we are moving around
        // C_nil values rather than their void type.
4832
        res.bcx.build.RetVoid();
4833
    }
4834 4835
}

4836 4837 4838
fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
              &ast._obj ob,
              &vec[ast.ty_param] ty_params) -> ValueRef {
G
Graydon Hoare 已提交
4839
    let vec[ValueRef] methods = vec();
4840 4841 4842 4843 4844 4845 4846 4847 4848

    fn meth_lteq(&@ast.method a, &@ast.method b) -> bool {
        ret _str.lteq(a.node.ident, b.node.ident);
    }

    auto meths = std.sort.merge_sort[@ast.method](bind meth_lteq(_,_),
                                                  ob.methods);

    for (@ast.method m in meths) {
4849

4850 4851
        auto llfnty = T_nil();
        alt (node_ann_type(cx, m.node.ann).struct) {
4852 4853
            case (ty.ty_fn(?proto, ?inputs, ?output)) {
                llfnty = type_of_fn_full(cx, proto,
4854
                                         some[TypeRef](self_ty),
4855 4856
                                         inputs, output,
                                         _vec.len[ast.ty_param](ty_params));
4857 4858 4859
            }
        }

4860
        let @crate_ctxt mcx = @rec(path=cx.path + sep() + m.node.ident
4861 4862
                                   with *cx);

4863
        let str s = cx.names.next("_rust_method") + sep() + mcx.path;
4864 4865 4866
        let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfnty);
        cx.item_ids.insert(m.node.id, llfn);

4867 4868
        trans_fn(mcx, m.node.meth, m.node.id, some[TypeRef](self_ty),
                 ty_params, m.node.ann);
4869
        methods += llfn;
G
Graydon Hoare 已提交
4870
    }
4871 4872 4873
    auto vtbl = C_struct(methods);
    auto gvar = llvm.LLVMAddGlobal(cx.llmod,
                                   val_ty(vtbl),
4874
                                   _str.buf("_rust_vtbl" + sep() + cx.path));
4875 4876
    llvm.LLVMSetInitializer(gvar, vtbl);
    llvm.LLVMSetGlobalConstant(gvar, True);
4877 4878
    llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
                        as llvm.Linkage);
4879
    ret gvar;
G
Graydon Hoare 已提交
4880 4881
}

4882 4883
fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
             &vec[ast.ty_param] ty_params, &ast.ann ann) {
4884 4885 4886 4887

    auto llctor_decl = cx.item_ids.get(oid);
    cx.item_names.insert(cx.path, llctor_decl);

4888
    // Translate obj ctor args to function arguments.
4889 4890 4891 4892 4893 4894 4895 4896
    let vec[ast.arg] fn_args = vec();
    for (ast.obj_field f in ob.fields) {
        fn_args += vec(rec(mode=ast.alias,
                           ty=f.ty,
                           ident=f.ident,
                           id=f.id));
    }

4897
    auto fcx = new_fn_ctxt(cx, llctor_decl);
4898
    create_llargs_for_fn_args(fcx, ast.proto_fn,
4899
                              none[TypeRef], ret_ty_of_fn(ann),
4900
                              fn_args, ty_params);
4901 4902 4903

    auto bcx = new_top_block_ctxt(fcx);

4904
    let vec[ty.arg] arg_tys = arg_tys_of_fn(ann);
4905
    copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
4906

4907
    auto llself_ty = type_of(cx, ret_ty_of_fn(ann));
4908
    auto pair = bcx.fcx.llretptr;
4909
    auto vtbl = trans_vtbl(cx, llself_ty, ob, ty_params);
4910 4911 4912
    auto pair_vtbl = bcx.build.GEP(pair,
                                   vec(C_int(0),
                                       C_int(abi.obj_field_vtbl)));
4913 4914 4915
    auto pair_box = bcx.build.GEP(pair,
                                  vec(C_int(0),
                                      C_int(abi.obj_field_box)));
4916
    bcx.build.Store(vtbl, pair_vtbl);
4917

4918
    let TypeRef llbox_ty = T_opaque_obj_ptr(cx.tn);
4919 4920 4921 4922

    if (_vec.len[ast.ty_param](ty_params) == 0u &&
        _vec.len[ty.arg](arg_tys) == 0u) {
        // Store null into pair, if no args or typarams.
4923 4924 4925 4926 4927 4928 4929
        bcx.build.Store(C_null(llbox_ty), pair_box);
    } else {
        // Malloc a box for the body and copy args in.
        let vec[@ty.t] obj_fields = vec();
        for (ty.arg a in arg_tys) {
            append[@ty.t](obj_fields, a.ty);
        }
4930 4931

        // Synthesize an obj body type.
4932 4933 4934 4935 4936 4937 4938
        auto tydesc_ty = plain_ty(ty.ty_type);
        let vec[@ty.t] tps = vec();
        for (ast.ty_param tp in ty_params) {
            append[@ty.t](tps, tydesc_ty);
        }

        let @ty.t typarams_ty = plain_ty(ty.ty_tup(tps));
4939
        let @ty.t fields_ty = plain_ty(ty.ty_tup(obj_fields));
4940 4941
        let @ty.t body_ty = plain_ty(ty.ty_tup(vec(tydesc_ty,
                                                   typarams_ty,
4942 4943 4944
                                                   fields_ty)));
        let @ty.t boxed_body_ty = plain_ty(ty.ty_box(body_ty));

4945
        // Malloc a box for the body.
4946
        auto box = trans_malloc_boxed(bcx, body_ty);
4947 4948 4949 4950 4951 4952 4953 4954
        bcx = box.bcx;
        auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
                               vec(0, abi.box_rc_field_refcnt));
        bcx = rc.bcx;
        auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
                                 vec(0, abi.box_rc_field_body));
        bcx = body.bcx;
        bcx.build.Store(C_int(1), rc.val);
4955

4956 4957
        // Store body tydesc.
        auto body_tydesc =
4958 4959 4960
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_tydesc));
        bcx = body_tydesc.bcx;
4961

4962 4963 4964
        auto body_td = get_tydesc(bcx, body_ty);
        bcx = body_td.bcx;
        bcx.build.Store(body_td.val, body_tydesc.val);
4965

4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980
        // Copy typarams into captured typarams.
        auto body_typarams =
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_typarams));
        bcx = body_typarams.bcx;
        let int i = 0;
        for (ast.ty_param tp in ty_params) {
            auto typaram = bcx.fcx.lltydescs.get(tp.id);
            auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
                                        vec(0, i));
            bcx = capture.bcx;
            bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
            i += 1;
        }

4981 4982
        // Copy args into body fields.
        auto body_fields =
4983 4984 4985
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_fields));
        bcx = body_fields.bcx;
4986

4987
        i = 0;
4988
        for (ast.obj_field f in ob.fields) {
4989 4990
            auto arg = bcx.fcx.llargs.get(f.id);
            arg = load_scalar_or_boxed(bcx, arg, arg_tys.(i).ty);
4991 4992 4993 4994
            auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
                                      vec(0, i));
            bcx = field.bcx;
            bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
4995 4996 4997
            i += 1;
        }
        // Store box ptr in outer pair.
4998
        auto p = bcx.build.PointerCast(box.val, llbox_ty);
4999
        bcx.build.Store(p, pair_box);
5000
    }
5001
    bcx.build.RetVoid();
5002 5003
}

5004
fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
5005 5006
                     &ast.variant variant, int index,
                     &vec[ast.ty_param] ty_params) {
5007
    if (_vec.len[ast.variant_arg](variant.args) == 0u) {
5008 5009 5010
        ret;    // nullary constructors are just constants
    }

5011 5012 5013 5014 5015 5016 5017 5018 5019 5020
    // Translate variant arguments to function arguments.
    let vec[ast.arg] fn_args = vec();
    auto i = 0u;
    for (ast.variant_arg varg in variant.args) {
        fn_args += vec(rec(mode=ast.alias,
                           ty=varg.ty,
                           ident="arg" + _uint.to_str(i, 10u),
                           id=varg.id));
    }

5021
    check (cx.item_ids.contains_key(variant.id));
5022 5023
    let ValueRef llfndecl = cx.item_ids.get(variant.id);

5024
    auto fcx = new_fn_ctxt(cx, llfndecl);
5025
    create_llargs_for_fn_args(fcx, ast.proto_fn,
5026
                              none[TypeRef], ret_ty_of_fn(variant.ann),
5027
                              fn_args, ty_params);
5028

5029 5030 5031 5032 5033
    let vec[@ty.t] ty_param_substs = vec();
    for (ast.ty_param tp in ty_params) {
        ty_param_substs += vec(plain_ty(ty.ty_param(tp.id)));
    }

5034 5035 5036
    auto bcx = new_top_block_ctxt(fcx);

    auto arg_tys = arg_tys_of_fn(variant.ann);
5037
    copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
5038

5039 5040 5041 5042 5043
    // Cast the tag to a type we can GEP into.
    auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
                                          T_opaque_tag_ptr(fcx.ccx.tn));

    auto lldiscrimptr = bcx.build.GEP(lltagptr,
5044
                                      vec(C_int(0), C_int(0)));
5045 5046
    bcx.build.Store(C_int(index), lldiscrimptr);

5047
    auto llblobptr = bcx.build.GEP(lltagptr,
5048
                                   vec(C_int(0), C_int(1)));
5049 5050 5051

    i = 0u;
    for (ast.variant_arg va in variant.args) {
5052 5053
        auto rslt = GEP_tag(bcx, llblobptr, tag_id, variant.id,
                            ty_param_substs, i as int);
5054 5055
        bcx = rslt.bcx;
        auto lldestptr = rslt.val;
5056

5057 5058 5059 5060 5061 5062
        // If this argument to this function is a tag, it'll have come in to
        // this function as an opaque blob due to the way that type_of()
        // works. So we have to cast to the destination's view of the type.
        auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
            val_ty(lldestptr));

5063 5064
        auto arg_ty = arg_tys.(i).ty;
        auto llargval;
5065 5066
        if (ty.type_is_structural(arg_ty) ||
                ty.type_has_dynamic_size(arg_ty)) {
5067 5068 5069 5070 5071 5072 5073 5074
            llargval = llargptr;
        } else {
            llargval = bcx.build.Load(llargptr);
        }

        rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
        bcx = rslt.bcx;

5075 5076 5077 5078
        i += 1u;
    }

    bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
5079
    bcx.build.RetVoid();
5080 5081
}

5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112
// FIXME: this should do some structural hash-consing to avoid
// duplicate constants. I think. Maybe LLVM has a magical mode
// that does so later on?

fn trans_const_expr(@crate_ctxt cx, @ast.expr e) -> ValueRef {
    alt (e.node) {
        case (ast.expr_lit(?lit, ?ann)) {
            ret trans_lit(cx, *lit, ann);
        }
    }
}

fn trans_const(@crate_ctxt cx, @ast.expr e,
               &ast.def_id cid, &ast.ann ann) {
    auto t = node_ann_type(cx, ann);
    auto v = trans_const_expr(cx, e);
    if (ty.type_is_scalar(t)) {
        // The scalars come back as 1st class LLVM vals
        // which we have to stick into global constants.
        auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(v),
                                    _str.buf(cx.names.next(cx.path)));
        llvm.LLVMSetInitializer(g, v);
        llvm.LLVMSetGlobalConstant(g, True);
        llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
                            as llvm.Linkage);
        cx.consts.insert(cid, g);
    } else {
        cx.consts.insert(cid, v);
    }
}

5113
fn trans_item(@crate_ctxt cx, &ast.item item) {
5114
    alt (item.node) {
5115
        case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
5116
            auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
5117
            trans_fn(sub_cx, f, fid, none[TypeRef], tps, ann);
5118
        }
5119
        case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
5120
            auto sub_cx = @rec(path=cx.path + sep() + name,
5121
                               obj_typarams=tps,
5122
                               obj_fields=ob.fields with *cx);
5123
            trans_obj(sub_cx, ob, oid, tps, ann);
5124
        }
5125
        case (ast.item_mod(?name, ?m, _)) {
5126
            auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
5127
            trans_mod(sub_cx, m);
5128
        }
5129
        case (ast.item_tag(?name, ?variants, ?tps, ?tag_id)) {
5130
            auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
5131
            auto i = 0;
5132
            for (ast.variant variant in variants) {
5133
                trans_tag_variant(sub_cx, tag_id, variant, i, tps);
5134
                i += 1;
5135 5136
            }
        }
5137
        case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
5138
            auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
5139 5140
            trans_const(sub_cx, expr, cid, ann);
        }
5141
        case (_) { /* fall through */ }
5142 5143 5144
    }
}

5145
fn trans_mod(@crate_ctxt cx, &ast._mod m) {
5146 5147
    for (@ast.item item in m.items) {
        trans_item(cx, *item);
5148 5149 5150
    }
}

5151 5152 5153 5154 5155 5156 5157 5158
fn get_pair_fn_ty(TypeRef llpairty) -> TypeRef {
    // Bit of a kludge: pick the fn typeref out of the pair.
    let vec[TypeRef] pair_tys = vec(T_nil(), T_nil());
    llvm.LLVMGetStructElementTypes(llpairty,
                                   _vec.buf[TypeRef](pair_tys));
    ret llvm.LLVMGetElementType(pair_tys.(0));
}

5159 5160 5161
fn decl_fn_and_pair(@crate_ctxt cx,
                    str kind,
                    str name,
5162
                    vec[ast.ty_param] ty_params,
5163 5164 5165
                    &ast.ann ann,
                    ast.def_id id) {

5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178
    auto llfty;
    auto llpairty;
    alt (node_ann_type(cx, ann).struct) {
        case (ty.ty_fn(?proto, ?inputs, ?output)) {
            llfty = type_of_fn(cx, proto, inputs, output,
                               _vec.len[ast.ty_param](ty_params));
            llpairty = T_fn_pair(cx.tn, llfty);
        }
        case (_) {
            cx.sess.bug("decl_fn_and_pair(): fn item doesn't have fn type?!");
            fail;
        }
    }
5179 5180

    // Declare the function itself.
5181
    let str s = cx.names.next("_rust_" + kind) + sep() + name;
5182 5183 5184
    let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfty);

    // Declare the global constant pair that points to it.
5185
    let str ps = cx.names.next("_rust_" + kind + "_pair") + sep() + name;
5186 5187 5188 5189 5190 5191

    register_fn_pair(cx, ps, llpairty, llfn, id);
}

fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
                    ast.def_id id) {
5192 5193 5194
    let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
                                           _str.buf(ps));
    auto pair = C_struct(vec(llfn,
5195
                             C_null(T_opaque_closure_ptr(cx.tn))));
5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206

    llvm.LLVMSetInitializer(gvar, pair);
    llvm.LLVMSetGlobalConstant(gvar, True);
    llvm.LLVMSetLinkage(gvar,
                        lib.llvm.LLVMPrivateLinkage
                        as llvm.Linkage);

    cx.item_ids.insert(id, llfn);
    cx.fn_pairs.insert(id, gvar);
}

5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225
// Returns the number of type parameters that the given native function has.
fn native_fn_ty_param_count(@crate_ctxt cx, &ast.def_id id) -> uint {
    auto count;
    auto native_item = cx.native_items.get(id);
    alt (native_item.node) {
        case (ast.native_item_ty(_,_)) {
            cx.sess.bug("decl_native_fn_and_pair(): native fn isn't " +
                        "actually a fn?!");
            fail;
        }
        case (ast.native_item_fn(_, _, ?tps, _, _)) {
            count = _vec.len[ast.ty_param](tps);
        }
    }
    ret count;
}

fn native_fn_wrapper_type(@crate_ctxt cx, uint ty_param_count, &ast.ann ann)
        -> TypeRef {
5226 5227 5228
    auto x = node_ann_type(cx, ann);
    alt (x.struct) {
        case (ty.ty_native_fn(?abi, ?args, ?out)) {
5229
            ret type_of_fn(cx, ast.proto_fn, args, out, ty_param_count);
5230 5231 5232 5233 5234
        }
    }
    fail;
}

5235 5236 5237 5238
fn decl_native_fn_and_pair(@crate_ctxt cx,
                           str name,
                           &ast.ann ann,
                           ast.def_id id) {
5239 5240
    auto num_ty_param = native_fn_ty_param_count(cx, id);

5241
    // Declare the wrapper.
5242
    auto wrapper_type = native_fn_wrapper_type(cx, num_ty_param, ann);
5243 5244
    let str s = cx.names.next("_rust_wrapper") + sep() + name;
    let ValueRef wrapper_fn = decl_fastcall_fn(cx.llmod, s, wrapper_type);
5245

5246 5247 5248
    // Declare the global constant pair that points to it.
    auto wrapper_pair_type = T_fn_pair(cx.tn, wrapper_type);
    let str ps = cx.names.next("_rust_wrapper_pair") + sep() + name;
5249

5250 5251 5252 5253
    register_fn_pair(cx, ps, wrapper_pair_type, wrapper_fn, id);

    // Declare the function itself.
    auto llfty = get_pair_fn_ty(node_type(cx, ann));
5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283
    auto function = decl_cdecl_fn(cx.llmod, name, llfty);

    // Build the wrapper.
    auto fcx = new_fn_ctxt(cx, wrapper_fn);
    auto bcx = new_top_block_ctxt(fcx);
    auto fn_type = node_ann_type(cx, ann);

    let vec[ValueRef] call_args = vec();
    auto abi = ty.ty_fn_abi(fn_type);
    auto arg_n = 3u;
    alt (abi) {
        case (ast.native_abi_rust) {
            call_args += vec(fcx.lltaskptr);
            for each (uint i in _uint.range(0u, num_ty_param)) {
                auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
                check (llarg as int != 0);
                call_args += vec(llarg);
                arg_n += 1u;
            }
        }
        case (ast.native_abi_cdecl) {
        }
    }
    auto args = ty.ty_fn_args(fn_type);
    for (ty.arg arg in args) {
        auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
        check (llarg as int != 0);
        call_args += vec(llarg);
        arg_n += 1u;
    }
5284

5285 5286 5287
    auto r = bcx.build.Call(function, call_args);
    bcx.build.Store(r, fcx.llretptr);
    bcx.build.RetVoid();
5288 5289
}

5290 5291 5292 5293 5294
fn collect_native_item(&@crate_ctxt cx, @ast.native_item i) -> @crate_ctxt {
    alt (i.node) {
        case (ast.native_item_fn(?name, _, _, ?fid, ?ann)) {
            cx.native_items.insert(fid, i);
            if (! cx.obj_methods.contains_key(fid)) {
5295
                decl_native_fn_and_pair(cx, name, ann, fid);
5296 5297 5298 5299 5300 5301
            }
        }
        case (_) { /* fall through */ }
    }
    ret cx;
}
5302

5303
fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
5304

5305
    alt (i.node) {
5306
        case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
5307
            cx.items.insert(fid, i);
5308
            if (! cx.obj_methods.contains_key(fid)) {
5309
                decl_fn_and_pair(cx, "fn", name, tps, ann, fid);
5310
            }
5311 5312
        }

5313
        case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
5314
            cx.items.insert(oid, i);
5315
            decl_fn_and_pair(cx, "obj_ctor", name, tps, ann, oid);
5316 5317 5318
            for (@ast.method m in ob.methods) {
                cx.obj_methods.insert(m.node.id, ());
            }
5319 5320
        }

5321 5322 5323 5324
        case (ast.item_const(?name, _, _, ?cid, _)) {
            cx.items.insert(cid, i);
        }

5325 5326 5327
        case (ast.item_mod(?name, ?m, ?mid)) {
            cx.items.insert(mid, i);
        }
5328

5329
        case (ast.item_tag(_, ?variants, ?tps, ?tag_id)) {
5330
            cx.items.insert(tag_id, i);
5331 5332
        }

5333
        case (_) { /* fall through */ }
5334 5335 5336 5337 5338
    }
    ret cx;
}


5339
fn collect_items(@crate_ctxt cx, @ast.crate crate) {
5340

5341 5342
    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();
5343

5344 5345
    fld = @rec( update_env_for_item = bind collect_item(_,_),
                update_env_for_native_item = bind collect_native_item(_,_)
5346 5347
                with *fld );

5348
    fold.fold_crate[@crate_ctxt](cx, fld, crate);
5349 5350
}

5351 5352 5353 5354
fn collect_tag_ctor(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {

    alt (i.node) {

5355
        case (ast.item_tag(_, ?variants, ?tps, _)) {
5356 5357 5358
            for (ast.variant variant in variants) {
                if (_vec.len[ast.variant_arg](variant.args) != 0u) {
                    decl_fn_and_pair(cx, "tag", variant.name,
5359
                                     tps, variant.ann, variant.id);
5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380
                }
            }
        }

        case (_) { /* fall through */ }
    }
    ret cx;
}

fn collect_tag_ctors(@crate_ctxt cx, @ast.crate crate) {

    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();

    fld = @rec( update_env_for_item = bind collect_tag_ctor(_,_)
                with *fld );

    fold.fold_crate[@crate_ctxt](cx, fld, crate);
}


5381 5382 5383 5384 5385 5386
// The constant translation pass.

fn trans_constant(&@crate_ctxt cx, @ast.item it) -> @crate_ctxt {
    alt (it.node) {
        case (ast.item_tag(_, ?variants, _, ?tag_id)) {
            auto i = 0u;
5387 5388 5389
            auto n_variants = _vec.len[ast.variant](variants);
            while (i < n_variants) {
                auto variant = variants.(i);
5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403

                auto discrim_val = C_int(i as int);

                // FIXME: better name.
                auto discrim_gvar = llvm.LLVMAddGlobal(cx.llmod, T_int(),
                    _str.buf("tag_discrim"));

                // FIXME: Eventually we do want to export these, but we need
                // to figure out what name they get first!
                llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
                llvm.LLVMSetGlobalConstant(discrim_gvar, True);
                llvm.LLVMSetLinkage(discrim_gvar, lib.llvm.LLVMPrivateLinkage
                                    as llvm.Linkage);

5404
                cx.discrims.insert(variant.id, discrim_gvar);
5405 5406 5407 5408

                i += 1u;
            }
        }
5409 5410 5411 5412 5413 5414 5415 5416

        case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
            // FIXME: The whole expr-translation system needs cloning to deal
            // with consts.
            auto v = C_int(1);
            cx.item_ids.insert(cid, v);
        }

5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433
        case (_) {
            // empty
        }
    }

    ret cx;
}

fn trans_constants(@crate_ctxt cx, @ast.crate crate) {
    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();

    fld = @rec(update_env_for_item = bind trans_constant(_,_) with *fld);

    fold.fold_crate[@crate_ctxt](cx, fld, crate);
}

5434 5435 5436 5437 5438 5439 5440 5441 5442 5443

fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
    ret cx.build.PtrToInt(v, T_int());
}


fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
    ret cx.build.IntToPtr(v, t);
}

5444 5445 5446 5447
fn p2i(ValueRef v) -> ValueRef {
    ret llvm.LLVMConstPtrToInt(v, T_int());
}

5448 5449 5450 5451
fn i2p(ValueRef v, TypeRef t) -> ValueRef {
    ret llvm.LLVMConstIntToPtr(v, t);
}

5452
fn trans_exit_task_glue(@crate_ctxt cx) {
5453 5454 5455 5456
    let vec[TypeRef] T_args = vec();
    let vec[ValueRef] V_args = vec();

    auto llfn = cx.glues.exit_task_glue;
5457
    let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 3u);
5458 5459
    auto fcx = @rec(llfn=llfn,
                    lltaskptr=lltaskptr,
5460
                    llenv=C_null(T_opaque_closure_ptr(cx.tn)),
5461
                    llretptr=C_null(T_ptr(T_nil())),
5462
                    mutable llself=none[ValueRef],
5463
                    mutable lliterbody=none[ValueRef],
5464
                    llargs=new_def_hash[ValueRef](),
5465
                    llobjfields=new_def_hash[ValueRef](),
5466
                    lllocals=new_def_hash[ValueRef](),
5467
                    lltydescs=new_def_hash[ValueRef](),
5468
                    ccx=cx);
5469

5470
    auto bcx = new_top_block_ctxt(fcx);
5471
    trans_upcall(bcx, "upcall_exit", V_args);
5472
    bcx.build.RetVoid();
5473 5474
}

5475
fn create_typedefs(@crate_ctxt cx) {
5476 5477 5478
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("crate"), T_crate(cx.tn));
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("task"), T_task(cx.tn));
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("tydesc"), T_tydesc(cx.tn));
5479 5480
}

5481
fn create_crate_constant(@crate_ctxt cx) {
5482

5483
    let ValueRef crate_addr = p2i(cx.crate_ptr);
5484 5485 5486 5487 5488 5489 5490

    let ValueRef activate_glue_off =
        llvm.LLVMConstSub(p2i(cx.glues.activate_glue), crate_addr);

    let ValueRef yield_glue_off =
        llvm.LLVMConstSub(p2i(cx.glues.yield_glue), crate_addr);

5491 5492
    let ValueRef exit_task_glue_off =
        llvm.LLVMConstSub(p2i(cx.glues.exit_task_glue), crate_addr);
5493 5494 5495

    let ValueRef crate_val =
        C_struct(vec(C_null(T_int()),     // ptrdiff_t image_base_off
5496
                     p2i(cx.crate_ptr),   // uintptr_t self_addr
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507
                     C_null(T_int()),     // ptrdiff_t debug_abbrev_off
                     C_null(T_int()),     // size_t debug_abbrev_sz
                     C_null(T_int()),     // ptrdiff_t debug_info_off
                     C_null(T_int()),     // size_t debug_info_sz
                     activate_glue_off,   // size_t activate_glue_off
                     yield_glue_off,      // size_t yield_glue_off
                     C_null(T_int()),     // size_t unwind_glue_off
                     C_null(T_int()),     // size_t gc_glue_off
                     exit_task_glue_off,  // size_t main_exit_task_glue_off
                     C_null(T_int()),     // int n_rust_syms
                     C_null(T_int()),     // int n_c_syms
5508 5509
                     C_null(T_int()),     // int n_libs
                     C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
5510 5511
                     ));

5512
    llvm.LLVMSetInitializer(cx.crate_ptr, crate_val);
5513 5514
}

5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538
fn find_main_fn(@crate_ctxt cx) -> ValueRef {
    auto e = sep() + "main";
    let ValueRef v = C_nil();
    let uint n = 0u;
    for each (tup(str,ValueRef) i in cx.item_names.items()) {
        if (_str.ends_with(i._0, e)) {
            n += 1u;
            v = i._1;
        }
    }
    alt (n) {
        case (0u) {
            cx.sess.err("main fn not found");
        }
        case (1u) {
            ret v;
        }
        case (_) {
            cx.sess.err("multiple main fns found");
        }
    }
    fail;
}

5539
fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
5540 5541 5542
    auto T_main_args = vec(T_int(), T_int());
    auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int());

5543 5544 5545 5546 5547 5548 5549
    auto main_name;
    if (_str.eq(std.os.target_os(), "win32")) {
        main_name = "WinMain@16";
    } else {
        main_name = "main";
    }

5550
    auto llmain =
5551
        decl_cdecl_fn(cx.llmod, main_name, T_fn(T_main_args, T_int()));
5552

5553 5554
    auto llrust_start = decl_cdecl_fn(cx.llmod, "rust_start",
                                      T_fn(T_rust_start_args, T_int()));
5555 5556 5557

    auto llargc = llvm.LLVMGetParam(llmain, 0u);
    auto llargv = llvm.LLVMGetParam(llmain, 1u);
5558
    auto llrust_main = find_main_fn(cx);
5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569

    //
    // Emit the moral equivalent of:
    //
    // main(int argc, char **argv) {
    //     rust_start(&_rust.main, &crate, argc, argv);
    // }
    //

    let BasicBlockRef llbb =
        llvm.LLVMAppendBasicBlock(llmain, _str.buf(""));
5570
    auto b = new_builder(llbb);
5571 5572 5573 5574 5575 5576

    auto start_args = vec(p2i(llrust_main), p2i(llcrate), llargc, llargv);

    b.Ret(b.Call(llrust_start, start_args));
}

5577 5578
fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {

5579
    let vec[TypeRef] T_trap_args = vec();
5580 5581 5582 5583 5584 5585
    auto trap = decl_cdecl_fn(llmod, "llvm.trap",
                              T_fn(T_trap_args, T_void()));

    auto intrinsics = new_str_hash[ValueRef]();
    intrinsics.insert("llvm.trap", trap);
    ret intrinsics;
5586 5587
}

5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605

fn trace_str(@block_ctxt cx, str s) {
    trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.ccx, s))));
}

fn trace_word(@block_ctxt cx, ValueRef v) {
    trans_upcall(cx, "upcall_trace_word", vec(v));
}

fn trace_ptr(@block_ctxt cx, ValueRef v) {
    trace_word(cx, cx.build.PtrToInt(v, T_int()));
}

fn trap(@block_ctxt bcx) {
    let vec[ValueRef] v = vec();
    bcx.build.Call(bcx.fcx.ccx.intrinsics.get("llvm.trap"), v);
}

5606 5607 5608 5609 5610 5611 5612 5613
fn check_module(ModuleRef llmod) {
    auto pm = mk_pass_manager();
    llvm.LLVMAddVerifierPass(pm.llpm);
    llvm.LLVMRunPassManager(pm.llpm, llmod);

    // TODO: run the linter here also, once there are llvm-c bindings for it.
}

5614 5615
fn make_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
    auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
5616
    auto fun = decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
5617 5618
    auto bb_name = _str.buf("_rust_no_op_type_glue_bb");
    auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
5619
    new_builder(llbb).RetVoid();
5620 5621 5622
    ret fun;
}

5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
fn make_memcpy_glue(ModuleRef llmod) -> ValueRef {

    // We're not using the LLVM memcpy intrinsic. It appears to call through
    // to the platform memcpy in some cases, which is not terribly safe to run
    // on a rust stack.

    auto p8 = T_ptr(T_i8());

    auto ty = T_fn(vec(p8, p8, T_int()), T_void());
    auto fun = decl_fastcall_fn(llmod, abi.memcpy_glue_name(), ty);

    auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
    auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
    auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
    auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));

    auto dst = llvm.LLVMGetParam(fun, 0u);
    auto src = llvm.LLVMGetParam(fun, 1u);
    auto count = llvm.LLVMGetParam(fun, 2u);

    // Init block.
    auto ib = new_builder(initbb);
    auto ip = ib.Alloca(T_int());
    ib.Store(C_int(0), ip);
    ib.Br(hdrbb);

    // Loop-header block
    auto hb = new_builder(hdrbb);
    auto i = hb.Load(ip);
    hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);

    // Loop-body block
    auto lb = new_builder(loopbb);
    i = lb.Load(ip);
    lb.Store(lb.Load(lb.GEP(src, vec(i))),
             lb.GEP(dst, vec(i)));
    lb.Store(lb.Add(i, C_int(1)), ip);
    lb.Br(hdrbb);

    // End block
    auto eb = new_builder(endbb);
    eb.RetVoid();
    ret fun;
}

5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708
fn make_bzero_glue(ModuleRef llmod) -> ValueRef {

    // We're not using the LLVM memset intrinsic. Same as with memcpy.

    auto p8 = T_ptr(T_i8());

    auto ty = T_fn(vec(p8, T_int()), T_void());
    auto fun = decl_fastcall_fn(llmod, abi.bzero_glue_name(), ty);

    auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
    auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
    auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
    auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));

    auto dst = llvm.LLVMGetParam(fun, 0u);
    auto count = llvm.LLVMGetParam(fun, 1u);

    // Init block.
    auto ib = new_builder(initbb);
    auto ip = ib.Alloca(T_int());
    ib.Store(C_int(0), ip);
    ib.Br(hdrbb);

    // Loop-header block
    auto hb = new_builder(hdrbb);
    auto i = hb.Load(ip);
    hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);

    // Loop-body block
    auto lb = new_builder(loopbb);
    i = lb.Load(ip);
    lb.Store(C_integral(0, T_i8()), lb.GEP(dst, vec(i)));
    lb.Store(lb.Add(i, C_int(1)), ip);
    lb.Br(hdrbb);

    // End block
    auto eb = new_builder(endbb);
    eb.RetVoid();
    ret fun;
}

5709
fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
5710
    /*
5711
     * Args to vec_append_glue:
5712 5713 5714 5715 5716 5717 5718 5719 5720 5721
     *
     *   0. (Implicit) task ptr
     *
     *   1. Pointer to the tydesc of the vec, so that we can tell if it's gc
     *      mem, and have a tydesc to pass to malloc if we're allocating anew.
     *
     *   2. Pointer to the tydesc of the vec's stored element type, so that
     *      elements can be copied to a newly alloc'ed vec if one must be
     *      created.
     *
5722
     *   3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
5723 5724
     *
     *   4. Src vec (i.e. ptr to rust_vec).
5725
     *
5726
     *   5. Flag indicating whether to skip trailing null on dst.
5727 5728 5729 5730 5731 5732
     *
     */

    auto ty = T_fn(vec(T_taskptr(tn),
                       T_ptr(T_tydesc(tn)),
                       T_ptr(T_tydesc(tn)),
5733 5734 5735
                       T_ptr(T_opaque_vec_ptr()),
                       T_opaque_vec_ptr(), T_bool()),
                   T_void());
5736

5737
    auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
5738 5739 5740
    ret llfn;
}

5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779

fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
    ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
                                            C_int(abi.vec_elt_fill))));
}

fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
    ret bcx.build.Store(fill,
                        bcx.build.GEP(v,
                                      vec(C_int(0),
                                          C_int(abi.vec_elt_fill))));
}

fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
                     ValueRef skipnull) -> ValueRef {
    auto f = bcx.build.Load(bcx.build.GEP(v,
                                          vec(C_int(0),
                                              C_int(abi.vec_elt_fill))));
    ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
}

fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
    auto p = bcx.build.GEP(v, vec(C_int(0),
                                  C_int(abi.vec_elt_data)));
    ret bcx.build.PointerCast(p, T_ptr(T_i8()));
}


fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
    auto len = vec_fill(bcx, v);
    ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}

fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
                   ValueRef skipnull) -> ValueRef {
    auto len = vec_fill_adjusted(bcx, v, skipnull);
    ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}

5780
fn trans_vec_append_glue(@crate_ctxt cx) {
5781

5782
    auto llfn = cx.glues.vec_append_glue;
5783 5784 5785 5786

    let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
    let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
    let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
5787 5788 5789
    let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
    let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
    let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804

    auto fcx = @rec(llfn=llfn,
                    lltaskptr=lltaskptr,
                    llenv=C_null(T_ptr(T_nil())),
                    llretptr=C_null(T_ptr(T_nil())),
                    mutable llself=none[ValueRef],
                    mutable lliterbody=none[ValueRef],
                    llargs=new_def_hash[ValueRef](),
                    llobjfields=new_def_hash[ValueRef](),
                    lllocals=new_def_hash[ValueRef](),
                    lltydescs=new_def_hash[ValueRef](),
                    ccx=cx);

    auto bcx = new_top_block_ctxt(fcx);

5805 5806
    auto lldst_vec = bcx.build.Load(lldst_vec_ptr);

5807 5808
    // First the dst vec needs to grow to accommodate the src vec.
    // To do this we have to figure out how many bytes to add.
5809

5810
    auto llcopy_dst_ptr = bcx.build.Alloca(T_int());
5811 5812
    auto llnew_vec_res =
        trans_upcall(bcx, "upcall_vec_grow",
5813 5814 5815
                     vec(vp2i(bcx, lldst_vec),
                         vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
                         vp2i(bcx, llcopy_dst_ptr),
5816 5817 5818
                         vp2i(bcx, llvec_tydesc)));

    bcx = llnew_vec_res.bcx;
5819 5820
    auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
                          T_opaque_vec_ptr());
5821

5822
    put_vec_fill(bcx, llnew_vec, C_int(0));
5823

5824 5825
    auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
    auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
5826

5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850
    auto pp0 = bcx.build.Alloca(T_ptr(T_i8()));
    bcx.build.Store(vec_p0(bcx, llnew_vec), pp0);

    bcx.build.CondBr(bcx.build.TruncOrBitCast
                     (bcx.build.Load(llcopy_dst_ptr),
                      T_i1()),
                     copy_dst_cx.llbb,
                     copy_src_cx.llbb);


    fn copy_elts(@block_ctxt cx,
                 ValueRef elt_tydesc,
                 ValueRef dst,
                 ValueRef src,
                 ValueRef n_bytes) -> result {

        auto src_lim = cx.build.GEP(src, vec(n_bytes));

        auto elt_llsz =
            cx.build.Load(cx.build.GEP(elt_tydesc,
                                       vec(C_int(0),
                                           C_int(abi.tydesc_field_size))));

        fn take_one(ValueRef elt_tydesc,
5851 5852 5853
                    @block_ctxt cx,
                    ValueRef dst, ValueRef src) -> result {
            call_tydesc_glue_full(cx, src,
5854 5855
                                  elt_tydesc,
                                  abi.tydesc_field_take_glue_off);
5856
            ret res(cx, src);
5857 5858
        }

5859
        auto bcx = iter_sequence_raw(cx, dst, src, src_lim,
5860
                                     elt_llsz, bind take_one(elt_tydesc,
5861
                                                             _, _, _)).bcx;
5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874

        ret call_memcpy(bcx, dst, src, n_bytes);
    }

    // Copy any dst elements in, omitting null if doing str.
    auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
    copy_dst_cx = copy_elts(copy_dst_cx,
                            llelt_tydesc,
                            copy_dst_cx.build.Load(pp0),
                            vec_p0(copy_dst_cx, lldst_vec),
                            n_bytes).bcx;

    put_vec_fill(copy_dst_cx, llnew_vec, n_bytes);
5875
    copy_dst_cx.build.Store(vec_p1(copy_dst_cx, llnew_vec), pp0);
5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894
    copy_dst_cx.build.Br(copy_src_cx.llbb);


    // Copy any src elements in, carrying along null if doing str.
    n_bytes = vec_fill(copy_src_cx, llsrc_vec);
    copy_src_cx = copy_elts(copy_src_cx,
                            llelt_tydesc,
                            copy_src_cx.build.Load(pp0),
                            vec_p0(copy_src_cx, llsrc_vec),
                            n_bytes).bcx;

    put_vec_fill(copy_src_cx, llnew_vec,
                 copy_src_cx.build.Add(vec_fill(copy_src_cx,
                                                llnew_vec),
                                        n_bytes));

    // Write new_vec back through the alias we were given.
    copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
    copy_src_cx.build.RetVoid();
5895 5896 5897
}


5898 5899 5900
fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
    ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
             yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
             /*
              * Note: the signature passed to decl_cdecl_fn here looks unusual
              * because it is. It corresponds neither to an upcall signature
              * nor a normal rust-ABI signature. In fact it is a fake
              * signature, that exists solely to acquire the task pointer as
              * an argument to the upcall. It so happens that the runtime sets
              * up the task pointer as the sole incoming argument to the frame
              * that we return into when returning to the exit task glue. So
              * this is the signature required to retrieve it.
              */
             exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
5912 5913 5914 5915
                                            T_fn(vec(T_int(),
                                                     T_int(),
                                                     T_int(),
                                                     T_taskptr(tn)),
5916
                                                 T_void())),
5917 5918

             upcall_glues =
5919
             _vec.init_fn[ValueRef](bind decl_upcall_glue(llmod, tn, _),
G
Graydon Hoare 已提交
5920
                                    abi.n_upcall_glues as uint),
5921
             no_op_type_glue = make_no_op_type_glue(llmod, tn),
5922
             memcpy_glue = make_memcpy_glue(llmod),
5923
             bzero_glue = make_bzero_glue(llmod),
5924
             vec_append_glue = make_vec_append_glue(llmod, tn));
5925 5926
}

5927 5928
fn trans_crate(session.session sess, @ast.crate crate, str output,
               bool shared) {
5929 5930 5931 5932
    auto llmod =
        llvm.LLVMModuleCreateWithNameInContext(_str.buf("rust_out"),
                                               llvm.LLVMGetGlobalContext());

5933 5934
    llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
    llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
5935
    auto td = mk_target_data(x86.get_data_layout());
5936
    auto tn = mk_type_names();
5937
    let ValueRef crate_ptr =
5938
        llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));
5939 5940

    llvm.LLVMSetModuleInlineAsm(llmod, _str.buf(x86.get_module_asm()));
5941

5942
    auto intrinsics = declare_intrinsics(llmod);
5943

5944
    auto glues = make_glues(llmod, tn);
5945 5946
    auto hasher = ty.hash_ty;
    auto eqer = ty.eq_ty;
5947
    auto tag_sizes = map.mk_hashmap[@ty.t,uint](hasher, eqer);
5948
    auto tydescs = map.mk_hashmap[@ty.t,@tydesc_info](hasher, eqer);
5949
    let vec[ast.ty_param] obj_typarams = vec();
5950
    let vec[ast.obj_field] obj_fields = vec();
5951

5952 5953
    auto cx = @rec(sess = sess,
                   llmod = llmod,
5954
                   td = td,
5955
                   tn = tn,
5956
                   crate_ptr = crate_ptr,
5957
                   upcalls = new_str_hash[ValueRef](),
5958
                   intrinsics = intrinsics,
5959 5960
                   item_names = new_str_hash[ValueRef](),
                   item_ids = new_def_hash[ValueRef](),
5961
                   items = new_def_hash[@ast.item](),
5962
                   native_items = new_def_hash[@ast.native_item](),
5963
                   tag_sizes = tag_sizes,
5964
                   discrims = new_def_hash[ValueRef](),
5965
                   fn_pairs = new_def_hash[ValueRef](),
5966
                   consts = new_def_hash[ValueRef](),
5967
                   obj_methods = new_def_hash[()](),
5968
                   tydescs = tydescs,
5969
                   obj_typarams = obj_typarams,
5970
                   obj_fields = obj_fields,
5971
                   glues = glues,
5972
                   names = namegen(0),
5973
                   path = "_rust");
5974

5975 5976
    create_typedefs(cx);

5977
    collect_items(cx, crate);
5978
    collect_tag_ctors(cx, crate);
5979
    trans_constants(cx, crate);
5980

5981
    trans_mod(cx, crate.node.module);
5982
    trans_exit_task_glue(cx);
5983
    trans_vec_append_glue(cx);
5984
    create_crate_constant(cx);
5985
    if (!shared) {
5986
        trans_main_fn(cx, cx.crate_ptr);
5987
    }
5988

5989 5990
    check_module(llmod);

5991
    llvm.LLVMWriteBitcodeToFile(llmod, _str.buf(output));
5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004
    llvm.LLVMDisposeModule(llmod);
}

//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C ../.. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//