trans.rs 235.2 KB
Newer Older
1
import std._int;
2
import std._str;
3
import std._uint;
4 5 6
import std._vec;
import std._str.rustrt.sbuf;
import std._vec.rustrt.vbuf;
7
import std.map;
8
import std.map.hashmap;
9 10 11
import std.option;
import std.option.some;
import std.option.none;
12

13
import front.ast;
14
import front.creader;
15
import pretty.pprust;
16
import driver.session;
17
import middle.ty;
18
import back.x86;
19 20
import back.abi;

21 22
import pretty.pprust;

23
import middle.ty.pat_ty;
24
import middle.ty.plain_ty;
P
Patrick Walton 已提交
25

26
import util.common;
27
import util.common.istr;
28
import util.common.new_def_hash;
29
import util.common.new_str_hash;
30 31 32

import lib.llvm.llvm;
import lib.llvm.builder;
33
import lib.llvm.target_data;
34
import lib.llvm.type_handle;
35
import lib.llvm.type_names;
36
import lib.llvm.mk_pass_manager;
37
import lib.llvm.mk_target_data;
38
import lib.llvm.mk_type_handle;
39
import lib.llvm.mk_type_names;
40 41 42
import lib.llvm.llvm.ModuleRef;
import lib.llvm.llvm.ValueRef;
import lib.llvm.llvm.TypeRef;
43
import lib.llvm.llvm.TypeHandleRef;
44 45
import lib.llvm.llvm.BuilderRef;
import lib.llvm.llvm.BasicBlockRef;
46

47 48
import lib.llvm.False;
import lib.llvm.True;
49

50 51 52 53 54 55 56
state obj namegen(mutable int i) {
    fn next(str prefix) -> str {
        i += 1;
        ret prefix + istr(i);
    }
}

57 58
type glue_fns = rec(ValueRef activate_glue,
                    ValueRef yield_glue,
59
                    ValueRef exit_task_glue,
60 61
                    vec[ValueRef] native_glues_rust,
                    vec[ValueRef] native_glues_cdecl,
62
                    ValueRef no_op_type_glue,
63
                    ValueRef memcpy_glue,
64
                    ValueRef bzero_glue,
65
                    ValueRef vec_append_glue);
66

67 68 69 70
type tydesc_info = rec(ValueRef tydesc,
                       ValueRef take_glue,
                       ValueRef drop_glue);

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * A note on nomenclature of linking: "upcall", "extern" and "native".
 *
 * An "extern" is an LLVM symbol we wind up emitting an undefined external
 * reference to. This means "we don't have the thing in this compilation unit,
 * please make sure you link it in at runtime". This could be a reference to
 * C code found in a C library, or rust code found in a rust crate.
 *
 * A "native" is a combination of an extern that references C code, plus a
 * glue-code stub that "looks like" a rust function, emitted here, plus a
 * generic N-ary bit of asm glue (found over in back/x86.rs) that performs a
 * control transfer into C from rust. Natives may be normal C library code.
 *
 * An upcall is a native call generated by the compiler (not corresponding to
 * any user-written call in the code) into librustrt, to perform some helper
 * task such as bringing a task to life, allocating memory, etc.
 *
 */

90
state type crate_ctxt = rec(session.session sess,
91
                            ModuleRef llmod,
92
                            target_data td,
93
                            type_names tn,
94
                            ValueRef crate_ptr,
95
                            hashmap[str, ValueRef] externs,
96
                            hashmap[str, ValueRef] intrinsics,
97
                            hashmap[ast.def_id, ValueRef] item_ids,
98
                            hashmap[ast.def_id, @ast.item] items,
G
Graydon Hoare 已提交
99 100
                            hashmap[ast.def_id,
                                    @ast.native_item] native_items,
101
                            ty.type_cache type_cache,
P
Patrick Walton 已提交
102
                            hashmap[ast.def_id, str] item_symbols,
103 104
                            // TODO: hashmap[tup(tag_id,subtys), @tag_info]
                            hashmap[@ty.t, uint] tag_sizes,
105
                            hashmap[ast.def_id, ValueRef] discrims,
P
Patrick Walton 已提交
106
                            hashmap[ast.def_id, str] discrim_symbols,
107
                            hashmap[ast.def_id, ValueRef] fn_pairs,
108
                            hashmap[ast.def_id, ValueRef] consts,
109
                            hashmap[ast.def_id,()] obj_methods,
110
                            hashmap[@ty.t, @tydesc_info] tydescs,
111
                            vec[ast.ty_param] obj_typarams,
112
                            vec[ast.obj_field] obj_fields,
113 114
                            @glue_fns glues,
                            namegen names,
115 116
                            vec[str] path,
                            std.sha1.sha1 sha);
117

118 119
type self_vt = rec(ValueRef v, @ty.t t);

120 121
state type fn_ctxt = rec(ValueRef llfn,
                         ValueRef lltaskptr,
122 123
                         ValueRef llenv,
                         ValueRef llretptr,
124
                         mutable BasicBlockRef llallocas,
125
                         mutable option.t[self_vt] llself,
126
                         mutable option.t[ValueRef] lliterbody,
127
                         hashmap[ast.def_id, ValueRef] llargs,
128
                         hashmap[ast.def_id, ValueRef] llobjfields,
129
                         hashmap[ast.def_id, ValueRef] lllocals,
130
                         hashmap[ast.def_id, ValueRef] llupvars,
131
                         hashmap[ast.def_id, ValueRef] lltydescs,
132
                         @crate_ctxt ccx);
133

134
tag cleanup {
135
    clean(fn(@block_ctxt cx) -> result);
136 137
}

138 139 140

tag block_kind {
    SCOPE_BLOCK;
141
    LOOP_SCOPE_BLOCK(option.t[@block_ctxt], @block_ctxt);
142 143 144
    NON_SCOPE_BLOCK;
}

145 146
state type block_ctxt = rec(BasicBlockRef llbb,
                            builder build,
147
                            block_parent parent,
148
                            block_kind kind,
149 150 151
                            mutable vec[cleanup] cleanups,
                            @fn_ctxt fcx);

152 153 154 155 156 157 158 159
// FIXME: we should be able to use option.t[@block_parent] here but
// the infinite-tag check in rustboot gets upset.

tag block_parent {
    parent_none;
    parent_some(@block_ctxt);
}

160

161 162 163
state type result = rec(mutable @block_ctxt bcx,
                        mutable ValueRef val);

164 165 166 167
fn sep() -> str {
    ret "_";
}

168
fn extend_path(@crate_ctxt cx, str name) -> @crate_ctxt {
169
  ret @rec(path = cx.path + vec(name) with *cx);
170 171 172 173 174 175 176 177 178 179 180 181
}

fn path_name(vec[str] path) -> str {
    ret _str.connect(path, sep());
}


fn mangle_name_by_type(@crate_ctxt cx, @ty.t t) -> str {
    cx.sha.reset();
    auto f = metadata.def_to_str;
    cx.sha.input_str(metadata.ty_str(t, f));
    ret sep() + "rust" + sep()
182
        + _str.substr(cx.sha.result_str(), 0u, 16u) + sep()
183 184 185 186 187 188 189 190 191
        + path_name(cx.path);
}

fn mangle_name_by_seq(@crate_ctxt cx, str flav) -> str {
    ret sep() + "rust" + sep()
        + cx.names.next(flav) + sep()
        + path_name(cx.path);
}

192 193 194 195 196
fn res(@block_ctxt bcx, ValueRef val) -> result {
    ret rec(mutable bcx = bcx,
            mutable val = val);
}

197 198
fn ty_str(type_names tn, TypeRef t) -> str {
    ret lib.llvm.type_to_str(tn, t);
199 200 201 202 203 204
}

fn val_ty(ValueRef v) -> TypeRef {
    ret llvm.LLVMTypeOf(v);
}

205 206
fn val_str(type_names tn, ValueRef v) -> str {
    ret ty_str(tn, val_ty(v));
207
}
208 209 210 211


// LLVM type constructors.

212 213 214 215 216 217 218 219 220 221 222
fn T_void() -> TypeRef {
    // Note: For the time being llvm is kinda busted here, it has the notion
    // of a 'void' type that can only occur as part of the signature of a
    // function, but no general unit type of 0-sized value. This is, afaict,
    // vestigial from its C heritage, and we'll be attempting to submit a
    // patch upstream to fix it. In the mean time we only model function
    // outputs (Rust functions and C functions) using T_void, and model the
    // Rust general purpose nil type you can construct as 1-bit (always
    // zero). This makes the result incorrect for now -- things like a tuple
    // of 10 nil values will have 10-bit size -- but it doesn't seem like we
    // have any other options until it's fixed upstream.
223 224 225
    ret llvm.LLVMVoidType();
}

226 227 228 229 230
fn T_nil() -> TypeRef {
    // NB: See above in T_void().
    ret llvm.LLVMInt1Type();
}

231 232 233 234
fn T_i1() -> TypeRef {
    ret llvm.LLVMInt1Type();
}

235 236 237 238 239 240 241 242 243
fn T_i8() -> TypeRef {
    ret llvm.LLVMInt8Type();
}

fn T_i16() -> TypeRef {
    ret llvm.LLVMInt16Type();
}

fn T_i32() -> TypeRef {
244 245 246
    ret llvm.LLVMInt32Type();
}

247 248 249 250
fn T_i64() -> TypeRef {
    ret llvm.LLVMInt64Type();
}

251 252 253 254 255 256 257 258
fn T_f32() -> TypeRef {
    ret llvm.LLVMFloatType();
}

fn T_f64() -> TypeRef {
    ret llvm.LLVMDoubleType();
}

259 260 261 262
fn T_bool() -> TypeRef {
    ret T_i1();
}

263 264 265 266 267
fn T_int() -> TypeRef {
    // FIXME: switch on target type.
    ret T_i32();
}

268 269 270 271 272
fn T_float() -> TypeRef {
    // FIXME: switch on target type.
    ret T_f64();
}

273 274 275 276
fn T_char() -> TypeRef {
    ret T_i32();
}

277 278 279 280
fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
    ret llvm.LLVMFunctionType(output,
                              _vec.buf[TypeRef](inputs),
                              _vec.len[TypeRef](inputs),
281 282 283
                              False);
}

284
fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
285
    ret T_struct(vec(T_ptr(tfn),
286
                     T_opaque_closure_ptr(tn)));
287 288
}

289 290 291 292 293 294 295 296 297 298 299 300 301 302
fn T_ptr(TypeRef t) -> TypeRef {
    ret llvm.LLVMPointerType(t, 0u);
}

fn T_struct(vec[TypeRef] elts) -> TypeRef {
    ret llvm.LLVMStructType(_vec.buf[TypeRef](elts),
                            _vec.len[TypeRef](elts),
                            False);
}

fn T_opaque() -> TypeRef {
    ret llvm.LLVMOpaqueType();
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
fn T_task(type_names tn) -> TypeRef {
    auto s = "task";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

    auto t = T_struct(vec(T_int(),      // Refcount
                          T_int(),      // Delegate pointer
                          T_int(),      // Stack segment pointer
                          T_int(),      // Runtime SP
                          T_int(),      // Rust SP
                          T_int(),      // GC chain
                          T_int(),      // Domain pointer
                          T_int()       // Crate cache pointer
                          ));
    tn.associate(s, t);
    ret t;
320 321
}

322 323 324 325 326 327
fn T_glue_fn(type_names tn) -> TypeRef {
    auto s = "glue_fn";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

328 329
    // Bit of a kludge: pick the fn typeref out of the tydesc..
    let vec[TypeRef] tydesc_elts = _vec.init_elt[TypeRef](T_nil(), 10u);
330
    llvm.LLVMGetStructElementTypes(T_tydesc(tn),
331
                                   _vec.buf[TypeRef](tydesc_elts));
332 333 334 335 336
    auto t =
        llvm.LLVMGetElementType
        (tydesc_elts.(abi.tydesc_field_drop_glue_off));
    tn.associate(s, t);
    ret t;
337 338
}

339 340 341 342 343 344
fn T_tydesc(type_names tn) -> TypeRef {

    auto s = "tydesc";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
345 346 347

    auto th = mk_type_handle();
    auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
348
    auto tydescpp = T_ptr(T_ptr(abs_tydesc));
349
    auto pvoid = T_ptr(T_i8());
350
    auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
351
                                     T_taskptr(tn),
352
                                     T_ptr(T_nil()),
353
                                     tydescpp,
354
                                     pvoid), T_void()));
355
    auto tydesc = T_struct(vec(tydescpp,          // first_param
356 357 358 359 360 361 362 363 364 365 366
                               T_int(),           // size
                               T_int(),           // align
                               glue_fn_ty,        // take_glue_off
                               glue_fn_ty,        // drop_glue_off
                               glue_fn_ty,        // free_glue_off
                               glue_fn_ty,        // sever_glue_off
                               glue_fn_ty,        // mark_glue_off
                               glue_fn_ty,        // obj_drop_glue_off
                               glue_fn_ty));      // is_stateful

    llvm.LLVMRefineType(abs_tydesc, tydesc);
367 368 369
    auto t = llvm.LLVMResolveTypeHandle(th.llth);
    tn.associate(s, t);
    ret t;
370 371
}

372 373 374 375
fn T_array(TypeRef t, uint n) -> TypeRef {
    ret llvm.LLVMArrayType(t, n);
}

376 377 378 379
fn T_vec(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int(),       // Refcount
                     T_int(),       // Alloc
                     T_int(),       // Fill
380
                     T_int(),       // Pad
381
                     T_array(t, 0u) // Body elements
382 383 384
                     ));
}

385 386 387 388
fn T_opaque_vec_ptr() -> TypeRef {
    ret T_ptr(T_vec(T_int()));
}

389 390
fn T_str() -> TypeRef {
    ret T_vec(T_i8());
391 392
}

393 394 395 396
fn T_box(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int(), t));
}

B
Brian Anderson 已提交
397 398 399 400 401 402 403 404
fn T_port(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int())); // Refcount
}

fn T_chan(TypeRef t) -> TypeRef {
    ret T_struct(vec(T_int())); // Refcount
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
fn T_crate(type_names tn) -> TypeRef {
    auto s = "crate";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

    auto t = T_struct(vec(T_int(),      // ptrdiff_t image_base_off
                          T_int(),      // uintptr_t self_addr
                          T_int(),      // ptrdiff_t debug_abbrev_off
                          T_int(),      // size_t debug_abbrev_sz
                          T_int(),      // ptrdiff_t debug_info_off
                          T_int(),      // size_t debug_info_sz
                          T_int(),      // size_t activate_glue_off
                          T_int(),      // size_t yield_glue_off
                          T_int(),      // size_t unwind_glue_off
                          T_int(),      // size_t gc_glue_off
                          T_int(),      // size_t main_exit_task_glue_off
                          T_int(),      // int n_rust_syms
                          T_int(),      // int n_c_syms
424 425
                          T_int(),      // int n_libs
                          T_int()       // uintptr_t abi_tag
426 427 428
                          ));
    tn.associate(s, t);
    ret t;
429 430
}

431 432
fn T_taskptr(type_names tn) -> TypeRef {
    ret T_ptr(T_task(tn));
433 434
}

435 436
// This type must never be used directly; it must always be cast away.
fn T_typaram(type_names tn) -> TypeRef {
437 438 439 440 441
    auto s = "typaram";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }

442
    auto t = T_i8();
443 444
    tn.associate(s, t);
    ret t;
445 446
}

447 448 449 450
fn T_typaram_ptr(type_names tn) -> TypeRef {
    ret T_ptr(T_typaram(tn));
}

451 452
fn T_closure_ptr(type_names tn,
                 TypeRef lltarget_ty,
453 454
                 TypeRef llbindings_ty,
                 uint n_ty_params) -> TypeRef {
455 456 457 458

    // NB: keep this in sync with code in trans_bind; we're making
    // an LLVM typeref structure that has the same "shape" as the ty.t
    // it constructs.
459
    ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
460
                                 lltarget_ty,
461 462
                                 llbindings_ty,
                                 T_captured_tydescs(tn, n_ty_params))
463 464 465
                             )));
}

466 467 468 469 470 471 472
fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
    auto s = "*closure";
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
    auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
                                            T_ptr(T_nil()))),
473 474
                           T_nil(),
                           0u);
475 476
    tn.associate(s, t);
    ret t;
477 478
}

479 480 481 482 483 484 485 486 487 488 489 490
fn T_tag(type_names tn, uint size) -> TypeRef {
    auto s = "tag_" + _uint.to_str(size, 10u);
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
    auto t = T_struct(vec(T_int(), T_array(T_i8(), size)));
    tn.associate(s, t);
    ret t;
}

fn T_opaque_tag(type_names tn) -> TypeRef {
    auto s = "tag";
491 492 493
    if (tn.name_has_type(s)) {
        ret tn.get_type(s);
    }
494
    auto t = T_struct(vec(T_int(), T_i8()));
495 496 497 498
    tn.associate(s, t);
    ret t;
}

499 500 501 502
fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
    ret T_ptr(T_opaque_tag(tn));
}

503 504
fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
    ret T_struct(_vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
505 506
}

507 508 509 510 511 512 513
fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
    // This function is not publicly exposed because it returns an incomplete
    // type. The dynamically-sized fields follow the captured tydescs.
    fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
        ret T_struct(vec(T_ptr(T_tydesc(tn)),
                         T_captured_tydescs(tn, n_captured_tydescs)));
    }
514

515
    ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
516 517
}

518
fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
519
    ret T_obj_ptr(tn, 0u);
520 521
}

522

523 524 525 526
// This function now fails if called on a type with dynamic size (as its
// return value was always meaningless in that case anyhow). Beware!
//
// TODO: Enforce via a predicate.
527
fn type_of(@crate_ctxt cx, @ty.t t) -> TypeRef {
528 529 530 531 532 533
    if (ty.type_has_dynamic_size(t)) {
        log "type_of() called on a type with dynamic size: " +
            ty.ty_to_str(t);
        fail;
    }

534
    ret type_of_inner(cx, t, false);
535 536
}

537
fn type_of_explicit_args(@crate_ctxt cx,
538
                     vec[ty.arg] inputs) -> vec[TypeRef] {
539 540 541 542
    let vec[TypeRef] atys = vec();
    for (ty.arg arg in inputs) {
        if (ty.type_has_dynamic_size(arg.ty)) {
            check (arg.mode == ast.alias);
543
            atys += vec(T_typaram_ptr(cx.tn));
544
        } else {
545
            let TypeRef t;
546 547
            alt (arg.mode) {
                case (ast.alias) {
548 549 550 551
                    t = T_ptr(type_of_inner(cx, arg.ty, true));
                }
                case (_) {
                    t = type_of_inner(cx, arg.ty, false);
552 553
                }
            }
554
            atys += vec(t);
555 556 557 558
        }
    }
    ret atys;
}
559 560 561 562 563 564 565 566

// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

567
fn type_of_fn_full(@crate_ctxt cx,
568
                   ast.proto proto,
569 570
                   option.t[TypeRef] obj_self,
                   vec[ty.arg] inputs,
571 572
                   @ty.t output,
                   uint ty_param_count) -> TypeRef {
573
    let vec[TypeRef] atys = vec();
574

575
    // Arg 0: Output pointer.
576
    if (ty.type_has_dynamic_size(output)) {
577
        atys += vec(T_typaram_ptr(cx.tn));
578
    } else {
579
        atys += vec(T_ptr(type_of_inner(cx, output, false)));
580 581
    }

582
    // Arg 1: Task pointer.
583
    atys += vec(T_taskptr(cx.tn));
584 585

    // Arg 2: Env (closure-bindings / self-obj)
586 587 588
    alt (obj_self) {
        case (some[TypeRef](?t)) {
            check (t as int != 0);
589
            atys += vec(t);
590
        }
591
        case (_) {
592
            atys += vec(T_opaque_closure_ptr(cx.tn));
593
        }
594 595
    }

596 597 598 599
    // Args >3: ty params, if not acquired via capture...
    if (obj_self == none[TypeRef]) {
        auto i = 0u;
        while (i < ty_param_count) {
600
            atys += vec(T_ptr(T_tydesc(cx.tn)));
601 602
            i += 1u;
        }
603 604
    }

605
    if (proto == ast.proto_iter) {
606 607 608
        // If it's an iter, the 'output' type of the iter is actually the
        // *input* type of the function we're given as our iter-block
        // argument.
609 610
        atys +=
            vec(T_fn_pair(cx.tn,
611
                          type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
612
                                          vec(rec(mode=ast.val, ty=output)),
613
                                          plain_ty(ty.ty_nil), 0u)));
614 615
    }

616
    // ... then explicit args.
617
    atys += type_of_explicit_args(cx, inputs);
618

619
    ret T_fn(atys, llvm.LLVMVoidType());
620 621
}

622
fn type_of_fn(@crate_ctxt cx,
623
              ast.proto proto,
624 625 626 627 628
              vec[ty.arg] inputs,
              @ty.t output,
              uint ty_param_count) -> TypeRef {
    ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output,
                        ty_param_count);
629 630
}

631 632
fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
                     vec[ty.arg] inputs,
633 634
                     @ty.t output,
                     uint ty_param_count) -> TypeRef {
635 636
    let vec[TypeRef] atys = vec();
    if (abi == ast.native_abi_rust) {
637
        atys += vec(T_taskptr(cx.tn));
638 639 640
        auto t = ty.ty_native_fn(abi, inputs, output);
        auto i = 0u;
        while (i < ty_param_count) {
641
            atys += vec(T_ptr(T_tydesc(cx.tn)));
642 643 644 645
            i += 1u;
        }
    }
    atys += type_of_explicit_args(cx, inputs);
646
    ret T_fn(atys, type_of_inner(cx, output, false));
647 648
}

649
fn type_of_inner(@crate_ctxt cx, @ty.t t, bool boxed) -> TypeRef {
650 651
    let TypeRef llty = 0 as TypeRef;

652
    alt (t.struct) {
653 654 655 656
        case (ty.ty_native) { llty = T_ptr(T_i8()); }
        case (ty.ty_nil) { llty = T_nil(); }
        case (ty.ty_bool) { llty = T_bool(); }
        case (ty.ty_int) { llty = T_int(); }
657
        case (ty.ty_float) { llty = T_float(); }
658
        case (ty.ty_uint) { llty = T_int(); }
659
        case (ty.ty_machine(?tm)) {
660
            alt (tm) {
661 662 663 664 665 666 667 668 669 670
                case (common.ty_i8) { llty = T_i8(); }
                case (common.ty_u8) { llty = T_i8(); }
                case (common.ty_i16) { llty = T_i16(); }
                case (common.ty_u16) { llty = T_i16(); }
                case (common.ty_i32) { llty = T_i32(); }
                case (common.ty_u32) { llty = T_i32(); }
                case (common.ty_i64) { llty = T_i64(); }
                case (common.ty_u64) { llty = T_i64(); }
                case (common.ty_f32) { llty = T_f32(); }
                case (common.ty_f64) { llty = T_f64(); }
671 672
            }
        }
673 674
        case (ty.ty_char) { llty = T_char(); }
        case (ty.ty_str) { llty = T_ptr(T_str()); }
675 676 677 678 679 680 681
        case (ty.ty_tag(_, _)) {
            if (boxed) {
                llty = T_opaque_tag(cx.tn);
            } else {
                auto size = static_size_of_tag(cx, t);
                llty = T_tag(cx.tn, size);
            }
682
        }
683 684
        case (ty.ty_box(?mt)) {
            llty = T_ptr(T_box(type_of_inner(cx, mt.ty, true)));
685
        }
686 687
        case (ty.ty_vec(?mt)) {
            llty = T_ptr(T_vec(type_of_inner(cx, mt.ty, true)));
688
        }
B
Brian Anderson 已提交
689 690 691 692 693 694
        case (ty.ty_port(?t)) {
            llty = T_ptr(T_port(type_of_inner(cx, t, true)));
        }
        case (ty.ty_chan(?t)) {
            llty = T_ptr(T_chan(type_of_inner(cx, t, true)));
        }
695
        case (ty.ty_tup(?elts)) {
696
            let vec[TypeRef] tys = vec();
697 698
            for (ty.mt elt in elts) {
                tys += vec(type_of_inner(cx, elt.ty, boxed));
699
            }
700
            llty = T_struct(tys);
701
        }
702
        case (ty.ty_rec(?fields)) {
703
            let vec[TypeRef] tys = vec();
704
            for (ty.field f in fields) {
705
                tys += vec(type_of_inner(cx, f.mt.ty, boxed));
706
            }
707
            llty = T_struct(tys);
708
        }
709
        case (ty.ty_fn(?proto, ?args, ?out)) {
710
            llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out, 0u));
711
        }
712
        case (ty.ty_native_fn(?abi, ?args, ?out)) {
713
            auto nft = native_fn_wrapper_type(cx, 0u, t);
714
            llty = T_fn_pair(cx.tn, nft);
715
        }
716
        case (ty.ty_obj(?meths)) {
717 718 719
            auto th = mk_type_handle();
            auto self_ty = llvm.LLVMResolveTypeHandle(th.llth);

720
            let vec[TypeRef] mtys = vec();
721
            for (ty.method m in meths) {
722
                let TypeRef mty =
723
                    type_of_fn_full(cx, m.proto,
724
                                    some[TypeRef](self_ty),
725
                                    m.inputs, m.output, 0u);
726
                mtys += vec(T_ptr(mty));
727
            }
728
            let TypeRef vtbl = T_struct(mtys);
729
            let TypeRef pair = T_struct(vec(T_ptr(vtbl),
730
                                            T_opaque_obj_ptr(cx.tn)));
731

732 733 734
            auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
            llvm.LLVMRefineType(abs_pair, pair);
            abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
735
            llty = abs_pair;
736
        }
737
        case (ty.ty_var(_)) {
738
            log "ty_var in trans.type_of";
739 740
            fail;
        }
741
        case (ty.ty_param(_)) {
742
            llty = T_i8();
743
        }
744
        case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
745
    }
746 747 748 749

    check (llty as int != 0);
    llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
    ret llty;
750 751
}

752
fn type_of_arg(@crate_ctxt cx, &ty.arg arg) -> TypeRef {
753 754 755 756 757 758 759 760 761 762 763
    alt (arg.ty.struct) {
        case (ty.ty_param(_)) {
            if (arg.mode == ast.alias) {
                ret T_typaram_ptr(cx.tn);
            }
        }
        case (_) {
            // fall through
        }
    }

764
    auto typ;
765
    if (arg.mode == ast.alias) {
766 767 768
        typ = T_ptr(type_of_inner(cx, arg.ty, true));
    } else {
        typ = type_of_inner(cx, arg.ty, false);
769
    }
770
    ret typ;
771 772
}

773
fn type_of_ty_params_opt_and_ty(@crate_ctxt ccx, ty.ty_params_opt_and_ty tpt)
774
        -> TypeRef {
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
    alt (tpt._1.struct) {
        case (ty.ty_fn(?proto, ?inputs, ?output)) {
            auto ty_params = option.get[vec[ast.def_id]](tpt._0);
            auto ty_param_count = _vec.len[ast.def_id](ty_params);
            auto llfnty = type_of_fn(ccx, proto, inputs, output,
                                     ty_param_count);
            ret T_fn_pair(ccx.tn, llfnty);
        }
        case (_) {
            // fall through
        }
    }
    ret type_of(ccx, tpt._1);
}


791 792 793 794 795 796 797 798 799
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!

fn sanitize(str s) -> str {
    auto result = "";
    for (u8 c in s) {
        if (c == ('@' as u8)) {
            result += "boxed_";
        } else {
800 801 802 803 804 805 806
            if (c == (',' as u8)) {
                result += "_";
            } else {
                if (c == ('{' as u8) || c == ('(' as u8)) {
                    result += "_of_";
                } else {
                    if (c != 10u8 && c != ('}' as u8) && c != (')' as u8) &&
G
Graydon Hoare 已提交
807 808
                        c != (' ' as u8) && c != ('\t' as u8) &&
                        c != (';' as u8)) {
809 810 811 812 813
                        auto v = vec(c);
                        result += _str.from_bytes(v);
                    }
                }
            }
814 815 816 817 818
        }
    }
    ret result;
}

819 820 821 822 823 824
// LLVM constant constructors.

fn C_null(TypeRef t) -> ValueRef {
    ret llvm.LLVMConstNull(t);
}

825
fn C_integral(int i, TypeRef t) -> ValueRef {
826 827 828 829 830 831
    // FIXME. We can't use LLVM.ULongLong with our existing minimal native
    // API, which only knows word-sized args.  Lucky for us LLVM has a "take a
    // string encoding" version.  Hilarious. Please fix to handle:
    //
    // ret llvm.LLVMConstInt(T_int(), t as LLVM.ULongLong, False);
    //
832 833 834
    ret llvm.LLVMConstIntOfString(t, _str.buf(istr(i)), 10);
}

835 836 837 838
fn C_float(str s) -> ValueRef {
    ret llvm.LLVMConstRealOfString(T_float(), _str.buf(s));
}

839 840 841 842
fn C_floating(str s, TypeRef t) -> ValueRef {
    ret llvm.LLVMConstRealOfString(t, _str.buf(s));
}

843 844 845 846 847
fn C_nil() -> ValueRef {
    // NB: See comment above in T_void().
    ret C_integral(0, T_i1());
}

848 849
fn C_bool(bool b) -> ValueRef {
    if (b) {
850
        ret C_integral(1, T_bool());
851
    } else {
852
        ret C_integral(0, T_bool());
853 854 855
    }
}

856 857
fn C_int(int i) -> ValueRef {
    ret C_integral(i, T_int());
858 859
}

860 861 862
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
fn C_cstr(@crate_ctxt cx, str s) -> ValueRef {
863
    auto sc = llvm.LLVMConstString(_str.buf(s), _str.byte_len(s), False);
864
    auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(sc),
865 866
                                _str.buf(cx.names.next("str")));
    llvm.LLVMSetInitializer(g, sc);
867
    llvm.LLVMSetGlobalConstant(g, True);
868
    llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
869
                        as llvm.Linkage);
870
    ret g;
871 872
}

873 874 875 876 877 878
// A rust boxed-and-length-annotated string.
fn C_str(@crate_ctxt cx, str s) -> ValueRef {
    auto len = _str.byte_len(s);
    auto box = C_struct(vec(C_int(abi.const_refcount as int),
                            C_int(len + 1u as int), // 'alloc'
                            C_int(len + 1u as int), // 'fill'
879
                            C_int(0),               // 'pad'
880 881 882 883 884 885
                            llvm.LLVMConstString(_str.buf(s),
                                                 len, False)));
    auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(box),
                                _str.buf(cx.names.next("str")));
    llvm.LLVMSetInitializer(g, box);
    llvm.LLVMSetGlobalConstant(g, True);
886
    llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
887 888 889 890
                        as llvm.Linkage);
    ret llvm.LLVMConstPointerCast(g, T_ptr(T_str()));
}

891 892 893 894 895 896 897 898 899 900 901
fn C_zero_byte_arr(uint size) -> ValueRef {
    auto i = 0u;
    let vec[ValueRef] elts = vec();
    while (i < size) {
        elts += vec(C_integral(0, T_i8()));
        i += 1u;
    }
    ret llvm.LLVMConstArray(T_i8(), _vec.buf[ValueRef](elts),
                            _vec.len[ValueRef](elts));
}

902 903 904 905 906 907
fn C_struct(vec[ValueRef] elts) -> ValueRef {
    ret llvm.LLVMConstStruct(_vec.buf[ValueRef](elts),
                             _vec.len[ValueRef](elts),
                             False);
}

908
fn decl_fn(ModuleRef llmod, str name, uint cc, TypeRef llty) -> ValueRef {
909 910
    let ValueRef llfn =
        llvm.LLVMAddFunction(llmod, _str.buf(name), llty);
911
    llvm.LLVMSetFunctionCallConv(llfn, cc);
912 913 914
    ret llfn;
}

915 916
fn decl_cdecl_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
    ret decl_fn(llmod, name, lib.llvm.LLVMCCallConv, llty);
917 918
}

919 920
fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
    ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
921 922
}

923
fn decl_internal_fastcall_fn(ModuleRef llmod,
924
                            str name, TypeRef llty) -> ValueRef {
925
    auto llfn = decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
926
    llvm.LLVMSetLinkage(llfn, lib.llvm.LLVMInternalLinkage as llvm.Linkage);
927 928 929
    ret llfn;
}

930 931
fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
    ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
932 933
}

934
fn decl_native_glue(ModuleRef llmod, type_names tn,
935
                    bool pass_task, uint _n) -> ValueRef {
936
    // It doesn't actually matter what type we come up with here, at the
937 938
    // moment, as we cast the native function pointers to int before passing
    // them to the indirect native-invocation glue.  But eventually we'd like
939
    // to call them directly, once we have a calling convention worked out.
940
    let int n = _n as int;
941
    let str s = abi.native_glue_name(n, pass_task);
942
    let vec[TypeRef] args = vec(T_int()); // callee
943 944
    if (!pass_task) {
        args += vec(T_int()); // taskptr, will not be passed
945 946
    }
    args += _vec.init_elt[TypeRef](T_int(), n as uint);
947

948
    ret decl_fastcall_fn(llmod, s, T_fn(args, T_int()));
949 950
}

951 952 953
fn get_extern_fn(&hashmap[str, ValueRef] externs,
                 ModuleRef llmod, str name,
                 uint cc, TypeRef ty) -> ValueRef {
954 955
    if (externs.contains_key(name)) {
        ret externs.get(name);
956
    }
957
    auto f = decl_fn(llmod, name, cc, ty);
958
    externs.insert(name, f);
959 960 961
    ret f;
}

962 963 964 965 966 967 968 969 970 971 972
fn get_extern_const(&hashmap[str, ValueRef] externs,
                    ModuleRef llmod, str name, TypeRef ty) -> ValueRef {
    if (externs.contains_key(name)) {
        ret externs.get(name);
    }
    auto c = llvm.LLVMAddGlobal(llmod, ty, _str.buf(name));
    externs.insert(name, c);
    ret c;
}

fn get_simple_extern_fn(&hashmap[str, ValueRef] externs,
973
                     ModuleRef llmod, str name, int n_args) -> ValueRef {
974 975 976 977 978 979
    auto inputs = _vec.init_elt[TypeRef](T_int(), n_args as uint);
    auto output = T_int();
    auto t = T_fn(inputs, output);
    ret get_extern_fn(externs, llmod, name, lib.llvm.LLVMCCallConv, t);
}

980
fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args) -> result {
981
    auto cxx = cx.fcx.ccx;
982 983
    auto lltaskptr = cx.build.PtrToInt(cx.fcx.lltaskptr, T_int());
    auto args2 = vec(lltaskptr) + args;
984 985 986
    auto t = trans_native_call(cx.build, cxx.glues, lltaskptr,
                               cxx.externs, cxx.tn, cxx.llmod, name,
                               true, args2);
987 988 989
    ret res(cx, t);
}

990 991 992 993
fn trans_native_call(builder b, @glue_fns glues, ValueRef lltaskptr,
                     &hashmap[str, ValueRef] externs,
                     type_names tn, ModuleRef llmod, str name,
                     bool pass_task, vec[ValueRef] args) -> ValueRef {
994
    let int n = (_vec.len[ValueRef](args) as int);
995
    let ValueRef llnative = get_simple_extern_fn(externs, llmod, name, n);
996
    llnative = llvm.LLVMConstPointerCast(llnative, T_int());
997

998 999
    let ValueRef llglue;
    if (pass_task) {
1000
        llglue = glues.native_glues_rust.(n);
1001
    } else {
1002
        llglue = glues.native_glues_cdecl.(n);
1003
    }
1004
    let vec[ValueRef] call_args = vec(llnative);
1005

1006 1007 1008
    if (!pass_task) {
        call_args += vec(lltaskptr);
    }
1009

1010
    for (ValueRef a in args) {
1011
        call_args += vec(b.ZExtOrBitCast(a, T_int()));
1012
    }
1013

1014
    ret b.FastCall(llglue, call_args);
1015 1016
}

1017
fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
1018
    ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v),
1019
                                            C_int(0)));
1020 1021
}

1022
fn find_scope_cx(@block_ctxt cx) -> @block_ctxt {
1023
    if (cx.kind != NON_SCOPE_BLOCK) {
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
        ret cx;
    }
    alt (cx.parent) {
        case (parent_some(?b)) {
            be find_scope_cx(b);
        }
        case (parent_none) {
            fail;
        }
    }
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
fn find_outer_scope_cx(@block_ctxt cx) -> @block_ctxt {
    auto scope_cx = find_scope_cx(cx);
    alt (cx.parent) {
        case (parent_some(?b)) {
            be find_scope_cx(b);
        }
        case (parent_none) {
            fail;
        }
    }
}

1048 1049 1050 1051 1052
fn umax(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
    auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
    ret cx.build.Select(cond, b, a);
}

1053 1054 1055 1056 1057
fn umin(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
    auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
    ret cx.build.Select(cond, a, b);
}

1058 1059 1060 1061 1062 1063
fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
    auto mask = cx.build.Sub(align, C_int(1));
    auto bumped = cx.build.Add(off, mask);
    ret cx.build.And(bumped, cx.build.Not(mask));
}

1064 1065 1066 1067 1068
// Returns the real size of the given type for the current target.
fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
    ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
}

1069
fn llsize_of(TypeRef t) -> ValueRef {
1070 1071 1072
    ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
}

1073
fn llalign_of(TypeRef t) -> ValueRef {
1074 1075 1076
    ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMAlignOf(t), T_int(), False);
}

1077
fn size_of(@block_ctxt cx, @ty.t t) -> result {
1078
    if (!ty.type_has_dynamic_size(t)) {
1079
        ret res(cx, llsize_of(type_of(cx.fcx.ccx, t)));
1080 1081 1082 1083
    }
    ret dynamic_size_of(cx, t);
}

1084
fn align_of(@block_ctxt cx, @ty.t t) -> result {
1085
    if (!ty.type_has_dynamic_size(t)) {
1086
        ret res(cx, llalign_of(type_of(cx.fcx.ccx, t)));
1087 1088 1089 1090
    }
    ret dynamic_align_of(cx, t);
}

1091 1092 1093 1094 1095 1096 1097 1098 1099
fn alloca(@block_ctxt cx, TypeRef t) -> ValueRef {
    ret new_builder(cx.fcx.llallocas).Alloca(t);
}

fn array_alloca(@block_ctxt cx, TypeRef t, ValueRef n) -> ValueRef {
    ret new_builder(cx.fcx.llallocas).ArrayAlloca(t, n);
}


1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(@crate_ctxt cx, @ty.t t) -> uint {
    if (ty.type_has_dynamic_size(t)) {
        log "dynamically sized type passed to static_size_of_tag()";
        fail;
    }

    if (cx.tag_sizes.contains_key(t)) {
        ret cx.tag_sizes.get(t);
    }

B
Brian Anderson 已提交
1111 1112
    auto tid;
    let vec[@ty.t] subtys;
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
    alt (t.struct) {
        case (ty.ty_tag(?tid_, ?subtys_)) {
            tid = tid_;
            subtys = subtys_;
        }
        case (_) {
            log "non-tag passed to static_size_of_tag()";
            fail;
        }
    }

1124
    // Pull the type parameters out of the corresponding tag item.
1125
    let vec[ast.def_id] ty_params = tag_ty_params(cx, tid);
1126

1127 1128 1129
    // Compute max(variant sizes).
    auto max_size = 0u;
    auto variants = tag_variants(cx, tid);
1130 1131
    for (variant_info variant in variants) {
        auto tup_ty = ty.plain_tup_ty(variant.args);
1132

1133 1134 1135
        // Perform any type parameter substitutions.
        tup_ty = ty.substitute_ty_params(ty_params, subtys, tup_ty);

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
        // Here we possibly do a recursive call.
        auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));

        if (max_size < this_size) {
            max_size = this_size;
        }
    }

    cx.tag_sizes.insert(t, max_size);
    ret max_size;
}

1148
fn dynamic_size_of(@block_ctxt cx, @ty.t t) -> result {
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
    fn align_elements(@block_ctxt cx, vec[@ty.t] elts) -> result {
        //
        // C padding rules:
        //
        //
        //   - Pad after each element so that next element is aligned.
        //   - Pad after final structure member so that whole structure
        //     is aligned to max alignment of interior.
        //
        auto off = C_int(0);
        auto max_align = C_int(1);
        auto bcx = cx;
        for (@ty.t e in elts) {
            auto elt_align = align_of(bcx, e);
            bcx = elt_align.bcx;
            auto elt_size = size_of(bcx, e);
            bcx = elt_size.bcx;
            auto aligned_off = align_to(bcx, off, elt_align.val);
            off = cx.build.Add(aligned_off, elt_size.val);
            max_align = umax(bcx, max_align, elt_align.val);
        }
        off = align_to(bcx, off, max_align);
        ret res(bcx, off);
    }

1174 1175 1176
    alt (t.struct) {
        case (ty.ty_param(?p)) {
            auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
1177
            ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
1178 1179
        }
        case (ty.ty_tup(?elts)) {
1180 1181 1182 1183 1184
            let vec[@ty.t] tys = vec();
            for (ty.mt mt in elts) {
                tys += vec(mt.ty);
            }
            ret align_elements(cx, tys);
1185 1186
        }
        case (ty.ty_rec(?flds)) {
1187
            let vec[@ty.t] tys = vec();
1188
            for (ty.field f in flds) {
1189
                tys += vec(f.mt.ty);
1190
            }
1191
            ret align_elements(cx, tys);
1192
        }
1193 1194 1195 1196
        case (ty.ty_tag(?tid, ?tps)) {
            auto bcx = cx;

            // Compute max(variant sizes).
1197
            let ValueRef max_size = alloca(bcx, T_int());
1198 1199
            bcx.build.Store(C_int(0), max_size);

1200
            auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);
1201
            auto variants = tag_variants(bcx.fcx.ccx, tid);
1202 1203 1204
            for (variant_info variant in variants) {
                // Perform type substitution on the raw argument types.
                let vec[@ty.t] raw_tys = variant.args;
1205 1206 1207 1208 1209 1210
                let vec[@ty.t] tys = vec();
                for (@ty.t raw_ty in raw_tys) {
                    auto t = ty.substitute_ty_params(ty_params, tps, raw_ty);
                    tys += vec(t);
                }

1211 1212 1213 1214 1215 1216 1217 1218
                auto rslt = align_elements(bcx, tys);
                bcx = rslt.bcx;

                auto this_size = rslt.val;
                auto old_max_size = bcx.build.Load(max_size);
                bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
            }

1219 1220 1221
            auto max_size_val = bcx.build.Load(max_size);
            auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
            ret res(bcx, total_size);
1222
        }
1223 1224 1225
    }
}

1226
fn dynamic_align_of(@block_ctxt cx, @ty.t t) -> result {
1227 1228 1229
    alt (t.struct) {
        case (ty.ty_param(?p)) {
            auto aptr = field_of_tydesc(cx, t, abi.tydesc_field_align);
1230
            ret res(aptr.bcx, aptr.bcx.build.Load(aptr.val));
1231 1232 1233
        }
        case (ty.ty_tup(?elts)) {
            auto a = C_int(1);
1234
            auto bcx = cx;
1235 1236
            for (ty.mt e in elts) {
                auto align = align_of(bcx, e.ty);
1237 1238
                bcx = align.bcx;
                a = umax(bcx, a, align.val);
1239
            }
1240
            ret res(bcx, a);
1241 1242 1243
        }
        case (ty.ty_rec(?flds)) {
            auto a = C_int(1);
1244
            auto bcx = cx;
1245
            for (ty.field f in flds) {
1246
                auto align = align_of(bcx, f.mt.ty);
1247 1248
                bcx = align.bcx;
                a = umax(bcx, a, align.val);
1249
            }
1250
            ret res(bcx, a);
1251
        }
1252 1253 1254
        case (ty.ty_tag(_, _)) {
            ret res(cx, C_int(1)); // FIXME: stub
        }
1255 1256 1257
    }
}

1258
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
1259 1260 1261 1262
// tuple-like structure (tup, rec) with a static index. This one is driven off
// ty.struct and knows what to do when it runs into a ty_param stuck in the
// middle of the thing it's GEP'ing into. Much like size_of and align_of,
// above.
1263 1264

fn GEP_tup_like(@block_ctxt cx, @ty.t t,
1265
                ValueRef base, vec[int] ixs) -> result {
1266 1267 1268 1269 1270 1271 1272 1273

    check (ty.type_is_tup_like(t));

    // It might be a static-known type. Handle this.

    if (! ty.type_has_dynamic_size(t)) {
        let vec[ValueRef] v = vec();
        for (int i in ixs) {
1274
            v += vec(C_int(i));
1275
        }
1276
        ret res(cx, cx.build.GEP(base, v));
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
    }

    // It is a dynamic-containing type that, if we convert directly to an LLVM
    // TypeRef, will be all wrong; there's no proper LLVM type to represent
    // it, and the lowering function will stick in i8* values for each
    // ty_param, which is not right; the ty_params are all of some dynamic
    // size.
    //
    // What we must do instead is sadder. We must look through the indices
    // manually and split the input type into a prefix and a target. We then
    // measure the prefix size, bump the input pointer by that amount, and
    // cast to a pointer-to-target type.


    // Given a type, an index vector and an element number N in that vector,
    // calculate index X and the type that results by taking the first X-1
    // elements of the type and splitting the Xth off. Return the prefix as
    // well as the innermost Xth type.

    fn split_type(@ty.t t, vec[int] ixs, uint n)
        -> rec(vec[@ty.t] prefix, @ty.t target) {

        let uint len = _vec.len[int](ixs);

        // We don't support 0-index or 1-index GEPs. The former is nonsense
        // and the latter would only be meaningful if we supported non-0
        // values for the 0th index (we don't).

        check (len > 1u);

        if (n == 0u) {
            // Since we're starting from a value that's a pointer to a
            // *single* structure, the first index (in GEP-ese) should just be
            // 0, to yield the pointee.
            check (ixs.(n) == 0);
            ret split_type(t, ixs, n+1u);
        }

        check (n < len);

        let int ix = ixs.(n);
        let vec[@ty.t] prefix = vec();
        let int i = 0;
        while (i < ix) {
1321 1322
            _vec.push[@ty.t](prefix, ty.get_element_type(t, i as uint));
            i += 1 ;
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
        }

        auto selected = ty.get_element_type(t, i as uint);

        if (n == len-1u) {
            // We are at the innermost index.
            ret rec(prefix=prefix, target=selected);

        } else {
            // Not the innermost index; call self recursively to dig deeper.
            // Once we get an inner result, append it current prefix and
            // return to caller.
            auto inner = split_type(selected, ixs, n+1u);
            prefix += inner.prefix;
            ret rec(prefix=prefix with inner);
        }
    }

    // We make a fake prefix tuple-type here; luckily for measuring sizes
    // the tuple parens are associative so it doesn't matter that we've
    // flattened the incoming structure.

    auto s = split_type(t, ixs, 0u);
1346
    auto prefix_ty = ty.plain_tup_ty(s.prefix);
1347 1348 1349 1350 1351
    auto bcx = cx;
    auto sz = size_of(bcx, prefix_ty);
    bcx = sz.bcx;
    auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
    auto bumped = bcx.build.GEP(raw, vec(sz.val));
1352 1353 1354

    if (ty.type_has_dynamic_size(s.target)) {
        ret res(bcx, bumped);
1355
    }
1356 1357 1358

    auto typ = T_ptr(type_of(bcx.fcx.ccx, s.target));
    ret res(bcx, bcx.build.PointerCast(bumped, typ));
1359 1360
}

1361 1362 1363 1364
// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
// This function uses GEP_tup_like() above and automatically performs casts as
// appropriate. @llblobptr is the data part of a tag value; its actual type is
// meaningless, as it will be cast away.
1365 1366 1367 1368 1369 1370
fn GEP_tag(@block_ctxt cx,
           ValueRef llblobptr,
           &ast.def_id tag_id,
           &ast.def_id variant_id,
           vec[@ty.t] ty_substs,
           int ix)
1371
        -> result {
1372 1373 1374
    auto ty_params = tag_ty_params(cx.fcx.ccx, tag_id);
    auto variant = tag_variant_with_id(cx.fcx.ccx, tag_id, variant_id);

1375 1376
    // Synthesize a tuple type so that GEP_tup_like() can work its magic.
    // Separately, store the type of the element we're interested in.
1377
    auto arg_tys = variant.args;
1378 1379 1380
    auto elem_ty = ty.plain_ty(ty.ty_nil);  // typestate infelicity
    auto i = 0;
    let vec[@ty.t] true_arg_tys = vec();
1381 1382
    for (@ty.t aty in arg_tys) {
        auto arg_ty = ty.substitute_ty_params(ty_params, ty_substs, aty);
1383
        true_arg_tys += vec(arg_ty);
1384
        if (i == ix) {
1385
            elem_ty = arg_ty;
1386 1387 1388 1389
        }

        i += 1;
    }
1390 1391

    auto tup_ty = ty.plain_tup_ty(true_arg_tys);
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

    // Cast the blob pointer to the appropriate type, if we need to (i.e. if
    // the blob pointer isn't dynamically sized).
    let ValueRef llunionptr;
    if (!ty.type_has_dynamic_size(tup_ty)) {
        auto llty = type_of(cx.fcx.ccx, tup_ty);
        llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
    } else {
        llunionptr = llblobptr;
    }

    // Do the GEP_tup_like().
    auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));

    // Cast the result to the appropriate type, if necessary.
    auto val;
    if (!ty.type_has_dynamic_size(elem_ty)) {
        auto llelemty = type_of(rslt.bcx.fcx.ccx, elem_ty);
        val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
    } else {
        val = rslt.val;
    }

    ret res(rslt.bcx, val);
}

1418

1419
fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
1420
        -> result {
1421 1422
    // FIXME: need a table to collect tydesc globals.
    auto tydesc = C_int(0);
1423 1424 1425 1426 1427
    auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc));
    rslt = res(rslt.bcx, vi2p(cx, rslt.val, llptr_ty));
    ret rslt;
}

1428 1429 1430
fn trans_malloc_boxed(@block_ctxt cx, @ty.t t) -> result {
    // Synthesize a fake box type structurally so we have something
    // to measure the size of.
1431
    auto boxed_body = ty.plain_tup_ty(vec(plain_ty(ty.ty_int), t));
1432
    auto box_ptr = ty.plain_box_ty(t, ast.imm);
1433 1434 1435
    auto sz = size_of(cx, boxed_body);
    auto llty = type_of(cx.fcx.ccx, box_ptr);
    ret trans_raw_malloc(sz.bcx, llty, sz.val);
1436 1437 1438
}


1439 1440 1441 1442 1443
// Type descriptor and type glue stuff

// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
1444
fn field_of_tydesc(@block_ctxt cx, @ty.t t, int field) -> result {
1445
    auto tydesc = get_tydesc(cx, t);
1446 1447
    ret res(tydesc.bcx,
            tydesc.bcx.build.GEP(tydesc.val, vec(C_int(0), C_int(field))));
1448
}
1449

1450 1451 1452 1453 1454 1455 1456 1457 1458
// Given a type containing ty params, build a vector containing a ValueRef for
// each of the ty params it uses (from the current frame), as well as a vec
// containing a def_id for each such param. This is used solely for
// constructing derived tydescs.
fn linearize_ty_params(@block_ctxt cx, @ty.t t)
    -> tup(vec[ast.def_id], vec[ValueRef]) {
    let vec[ValueRef] param_vals = vec();
    let vec[ast.def_id] param_defs = vec();
    type rr = rec(@block_ctxt cx,
1459 1460
                  mutable vec[ValueRef] vals,
                  mutable vec[ast.def_id] defs);
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472

    state obj folder(@rr r) {
        fn fold_simple_ty(@ty.t t) -> @ty.t {
            alt(t.struct) {
                case (ty.ty_param(?pid)) {
                    let bool seen = false;
                    for (ast.def_id d in r.defs) {
                        if (d == pid) {
                            seen = true;
                        }
                    }
                    if (!seen) {
1473 1474
                        r.vals += vec(r.cx.fcx.lltydescs.get(pid));
                        r.defs += vec(pid);
1475 1476
                    }
                }
1477
                case (_) { }
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
            }
            ret t;
        }
    }


    auto x = @rec(cx = cx,
                  mutable vals = param_vals,
                  mutable defs = param_defs);

    ty.fold_ty(folder(x), t);

    ret tup(x.defs, x.vals);
}

1493
fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
1494
    // Is the supplied type a type param? If so, return the passed-in tydesc.
1495
    alt (ty.type_param(t)) {
1496
        case (some[ast.def_id](?id)) {
1497
            check (cx.fcx.lltydescs.contains_key(id));
1498 1499
            ret res(cx, cx.fcx.lltydescs.get(id));
        }
1500
        case (none[ast.def_id])      { /* fall through */ }
1501
    }
1502 1503

    // Does it contain a type param? If so, generate a derived tydesc.
1504
    let uint n_params = ty.count_ty_params(t);
1505

1506
    if (ty.count_ty_params(t) > 0u) {
1507
        auto tys = linearize_ty_params(cx, t);
1508

1509 1510 1511
        check (n_params == _vec.len[ast.def_id](tys._0));
        check (n_params == _vec.len[ValueRef](tys._1));

1512
        if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1513 1514
            declare_tydesc(cx.fcx.ccx, t);
            define_tydesc(cx.fcx.ccx, t, tys._0);
1515 1516
        }

1517
        auto root = cx.fcx.ccx.tydescs.get(t).tydesc;
1518

1519 1520
        auto tydescs = alloca(cx, T_array(T_ptr(T_tydesc(cx.fcx.ccx.tn)),
                                          1u /* for root*/ + n_params));
1521

1522
        auto i = 0;
1523 1524 1525
        auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
        cx.build.Store(root, tdp);
        i += 1;
1526 1527
        for (ValueRef td in tys._1) {
            auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
1528
            cx.build.Store(td, tdp);
1529
            i += 1;
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
        }

        auto bcx = cx;
        auto sz = size_of(bcx, t);
        bcx = sz.bcx;
        auto align = align_of(bcx, t);
        bcx = align.bcx;

        auto v = trans_upcall(bcx, "upcall_get_type_desc",
                              vec(p2i(bcx.fcx.ccx.crate_ptr),
                                  sz.val,
                                  align.val,
1542
                                  C_int((1u + n_params) as int),
1543
                                  vp2i(bcx, tydescs)));
1544

1545 1546
        ret res(v.bcx, vi2p(v.bcx, v.val,
                            T_ptr(T_tydesc(cx.fcx.ccx.tn))));
1547 1548 1549
    }

    // Otherwise, generate a tydesc if necessary, and return it.
1550
    if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1551
        let vec[ast.def_id] defs = vec();
1552 1553
        declare_tydesc(cx.fcx.ccx, t);
        define_tydesc(cx.fcx.ccx, t, defs);
1554
    }
1555
    ret res(cx, cx.fcx.ccx.tydescs.get(t).tydesc);
1556 1557
}

1558 1559 1560 1561 1562 1563
// Generates the declaration for (but doesn't fill in) a type descriptor. This
// needs to be separate from make_tydesc() below, because sometimes type glue
// functions needs to refer to their own type descriptors.
fn declare_tydesc(@crate_ctxt cx, @ty.t t) {
    auto take_glue = declare_generic_glue(cx, t, "take");
    auto drop_glue = declare_generic_glue(cx, t, "drop");
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
    auto llsize;
    auto llalign;
    if (!ty.type_has_dynamic_size(t)) {
        auto llty = type_of(cx, t);
        llsize = llsize_of(llty);
        llalign = llalign_of(llty);
    } else {
        // These will be overwritten as the derived tydesc is generated, so
        // we create placeholder values.
        llsize = C_int(0);
        llalign = C_int(0);
    }

1578
    auto glue_fn_ty = T_ptr(T_glue_fn(cx.tn));
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590

    // FIXME: this adjustment has to do with the ridiculous encoding of
    // glue-pointer-constants in the tydesc records: They are tydesc-relative
    // displacements.  This is purely for compatibility with rustboot and
    // should go when it is discarded.
    fn off(ValueRef tydescp,
           ValueRef gluefn) -> ValueRef {
        ret i2p(llvm.LLVMConstSub(p2i(gluefn), p2i(tydescp)),
                val_ty(gluefn));
    }

    auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
1591 1592 1593
    auto gvar = llvm.LLVMAddGlobal(cx.llmod, T_tydesc(cx.tn),
                                   _str.buf(name));
    auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(cx.tn)))),
1594 1595
                               llsize,
                               llalign,
1596 1597
                               off(gvar, take_glue),  // take_glue_off
                               off(gvar, drop_glue),  // drop_glue_off
1598 1599 1600 1601 1602 1603 1604 1605
                               C_null(glue_fn_ty),    // free_glue_off
                               C_null(glue_fn_ty),    // sever_glue_off
                               C_null(glue_fn_ty),    // mark_glue_off
                               C_null(glue_fn_ty),    // obj_drop_glue_off
                               C_null(glue_fn_ty)));  // is_stateful

    llvm.LLVMSetInitializer(gvar, tydesc);
    llvm.LLVMSetGlobalConstant(gvar, True);
1606
    llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMInternalLinkage
1607
                        as llvm.Linkage);
1608 1609

    auto info = rec(
1610 1611 1612 1613
        tydesc=gvar,
        take_glue=take_glue,
        drop_glue=drop_glue
    );
1614 1615

    cx.tydescs.insert(t, @info);
1616 1617
}

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
// declare_tydesc() above must have been called first.
fn define_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
    auto info = cx.tydescs.get(t);
    auto gvar = info.tydesc;

    auto tg = make_take_glue;
    auto take_glue = make_generic_glue(cx, t, info.take_glue, tg,
                                       typaram_defs);
    auto dg = make_drop_glue;
    auto drop_glue = make_generic_glue(cx, t, info.drop_glue, dg,
                                       typaram_defs);
}

fn declare_generic_glue(@crate_ctxt cx, @ty.t t, str name) -> ValueRef {
1632
    auto llfnty = T_glue_fn(cx.tn);
1633

1634 1635
    auto gcx = @rec(path=vec("glue", name) with *cx);
    auto fn_name = mangle_name_by_type(gcx, t);
1636
    fn_name = sanitize(fn_name);
1637
    auto llfn = decl_internal_fastcall_fn(cx.llmod, fn_name, llfnty);
1638
    ret llfn;
1639
}
1640

1641 1642 1643
fn make_generic_glue(@crate_ctxt cx, @ty.t t, ValueRef llfn,
                     val_and_ty_fn helper,
                     vec[ast.def_id] typaram_defs) -> ValueRef {
1644
    auto fcx = new_fn_ctxt(cx, llfn);
1645
    auto bcx = new_top_block_ctxt(fcx);
1646
    auto lltop = bcx.llbb;
1647

1648
    auto re;
1649
    if (!ty.type_is_scalar(t)) {
1650
        auto llty;
1651 1652 1653
        if (ty.type_has_dynamic_size(t)) {
            llty = T_ptr(T_i8());
        } else if (ty.type_is_structural(t)) {
1654 1655 1656 1657
            llty = T_ptr(type_of(cx, t));
        } else {
            llty = type_of(cx, t);
        }
1658

1659
        auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
1660 1661 1662 1663 1664 1665 1666 1667
        auto p = 0;
        for (ast.def_id d in typaram_defs) {
            auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p)));
            llparam = bcx.build.Load(llparam);
            bcx.fcx.lltydescs.insert(d, llparam);
            p += 1;
        }

1668
        auto llrawptr = llvm.LLVMGetParam(llfn, 4u);
1669
        auto llval = bcx.build.BitCast(llrawptr, llty);
G
Graydon Hoare 已提交
1670

1671 1672 1673 1674
        re = helper(bcx, llval, t);
    } else {
        re = res(bcx, C_nil());
    }
1675

1676
    re.bcx.build.RetVoid();
1677 1678 1679 1680

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);

1681 1682 1683
    ret llfn;
}

1684 1685
fn make_take_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
    if (ty.type_is_boxed(t)) {
1686 1687
        ret incr_refcnt_of_boxed(cx, v);

1688
    } else if (ty.type_is_structural(t)) {
1689
        ret iter_structural_ty(cx, v, t,
1690
                               bind take_ty(_, _, _));
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
    }
    ret res(cx, C_nil());
}

fn incr_refcnt_of_boxed(@block_ctxt cx, ValueRef box_ptr) -> result {
    auto rc_ptr = cx.build.GEP(box_ptr, vec(C_int(0),
                                            C_int(abi.box_rc_field_refcnt)));
    auto rc = cx.build.Load(rc_ptr);

    auto rc_adj_cx = new_sub_block_ctxt(cx, "rc++");
    auto next_cx = new_sub_block_ctxt(cx, "next");

    auto const_test = cx.build.ICmp(lib.llvm.LLVMIntEQ,
                                    C_int(abi.const_refcount as int), rc);
    cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);

    rc = rc_adj_cx.build.Add(rc, C_int(1));
    rc_adj_cx.build.Store(rc, rc_ptr);
    rc_adj_cx.build.Br(next_cx.llbb);

    ret res(next_cx, C_nil());
}

1714
fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
1715
    alt (t.struct) {
1716
        case (ty.ty_str) {
G
Graydon Hoare 已提交
1717 1718 1719 1720
            ret decr_refcnt_and_if_zero
                (cx, v, bind trans_non_gc_free(_, v),
                 "free string",
                 T_int(), C_int(0));
1721 1722
        }

1723
        case (ty.ty_vec(_)) {
G
Graydon Hoare 已提交
1724 1725 1726 1727
            fn hit_zero(@block_ctxt cx, ValueRef v,
                        @ty.t t) -> result {
                auto res = iter_sequence(cx, v, t,
                                         bind drop_ty(_,_,_));
1728 1729 1730 1731 1732 1733 1734 1735 1736
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(res.bcx, v);
            }
            ret decr_refcnt_and_if_zero(cx, v,
                                        bind hit_zero(_, v, t),
                                        "free vector",
                                        T_int(), C_int(0));
        }

1737
        case (ty.ty_box(?body_mt)) {
G
Graydon Hoare 已提交
1738 1739
            fn hit_zero(@block_ctxt cx, ValueRef v,
                        @ty.t body_ty) -> result {
1740 1741 1742 1743
                auto body = cx.build.GEP(v,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));

1744
                auto body_val = load_scalar_or_boxed(cx, body, body_ty);
1745 1746 1747 1748 1749
                auto res = drop_ty(cx, body_val, body_ty);
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(res.bcx, v);
            }
            ret decr_refcnt_and_if_zero(cx, v,
1750
                                        bind hit_zero(_, v, body_mt.ty),
1751 1752 1753 1754
                                        "free box",
                                        T_int(), C_int(0));
        }

B
Brian Anderson 已提交
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
        case (ty.ty_port(_)) {
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
                ret trans_upcall(cx, "upcall_del_port",
                                 vec(vp2i(cx, v)));
            }
            ret decr_refcnt_and_if_zero(cx, v,
                                        bind hit_zero(_, v),
                                        "free port",
                                        T_int(), C_int(0));
        }

        case (ty.ty_chan(_)) {
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
                ret trans_upcall(cx, "upcall_del_chan",
                                 vec(vp2i(cx, v)));
            }
            ret decr_refcnt_and_if_zero(cx, v,
                                        bind hit_zero(_, v),
                                        "free chan",
                                        T_int(), C_int(0));
        }

1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
        case (ty.ty_obj(_)) {
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {

                // Call through the obj's own fields-drop glue first.
                auto body =
                    cx.build.GEP(v,
                                 vec(C_int(0),
                                     C_int(abi.box_rc_field_body)));

                auto tydescptr =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.obj_body_elt_tydesc)));
1790

1791
                call_tydesc_glue_full(cx, body, cx.build.Load(tydescptr),
1792
                                      abi.tydesc_field_drop_glue_off);
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810

                // Then free the body.
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(cx, v);
            }
            auto box_cell =
                cx.build.GEP(v,
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));

            auto boxptr = cx.build.Load(box_cell);

            ret decr_refcnt_and_if_zero(cx, boxptr,
                                        bind hit_zero(_, boxptr),
                                        "free obj",
                                        T_int(), C_int(0));
        }

1811
        case (ty.ty_fn(_,_,_)) {
1812 1813 1814 1815 1816 1817 1818
            fn hit_zero(@block_ctxt cx, ValueRef v) -> result {

                // Call through the closure's own fields-drop glue first.
                auto body =
                    cx.build.GEP(v,
                                 vec(C_int(0),
                                     C_int(abi.box_rc_field_body)));
1819 1820 1821 1822
                auto bindings =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.closure_elt_bindings)));
1823 1824 1825 1826 1827

                auto tydescptr =
                    cx.build.GEP(body,
                                 vec(C_int(0),
                                     C_int(abi.closure_elt_tydesc)));
1828

1829
                call_tydesc_glue_full(cx, bindings, cx.build.Load(tydescptr),
1830 1831
                                      abi.tydesc_field_drop_glue_off);

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849

                // Then free the body.
                // FIXME: switch gc/non-gc on layer of the type.
                ret trans_non_gc_free(cx, v);
            }
            auto box_cell =
                cx.build.GEP(v,
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));

            auto boxptr = cx.build.Load(box_cell);

            ret decr_refcnt_and_if_zero(cx, boxptr,
                                        bind hit_zero(_, boxptr),
                                        "free fn",
                                        T_int(), C_int(0));
        }

1850
        case (_) {
1851
            if (ty.type_is_structural(t)) {
1852 1853 1854
                ret iter_structural_ty(cx, v, t,
                                       bind drop_ty(_, _, _));

1855
            } else if (ty.type_is_scalar(t) ||
1856
                       ty.type_is_native(t) ||
1857
                       ty.type_is_nil(t)) {
1858 1859 1860 1861
                ret res(cx, C_nil());
            }
        }
    }
1862
    cx.fcx.ccx.sess.bug("bad type in trans.make_drop_glue_inner: " +
1863
                        ty.ty_to_str(t));
1864 1865 1866
    fail;
}

1867 1868
fn decr_refcnt_and_if_zero(@block_ctxt cx,
                           ValueRef box_ptr,
1869
                           fn(@block_ctxt cx) -> result inner,
1870
                           str inner_name,
1871
                           TypeRef t_else, ValueRef v_else) -> result {
1872

1873
    auto load_rc_cx = new_sub_block_ctxt(cx, "load rc");
1874 1875 1876 1877
    auto rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
    auto inner_cx = new_sub_block_ctxt(cx, inner_name);
    auto next_cx = new_sub_block_ctxt(cx, "next");

1878 1879
    auto null_test = cx.build.IsNull(box_ptr);
    cx.build.CondBr(null_test, next_cx.llbb, load_rc_cx.llbb);
1880

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890

    auto rc_ptr = load_rc_cx.build.GEP(box_ptr,
                                       vec(C_int(0),
                                           C_int(abi.box_rc_field_refcnt)));

    auto rc = load_rc_cx.build.Load(rc_ptr);
    auto const_test =
        load_rc_cx.build.ICmp(lib.llvm.LLVMIntEQ,
                              C_int(abi.const_refcount as int), rc);
    load_rc_cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
1891 1892 1893 1894

    rc = rc_adj_cx.build.Sub(rc, C_int(1));
    rc_adj_cx.build.Store(rc, rc_ptr);
    auto zero_test = rc_adj_cx.build.ICmp(lib.llvm.LLVMIntEQ, C_int(0), rc);
1895 1896 1897 1898
    rc_adj_cx.build.CondBr(zero_test, inner_cx.llbb, next_cx.llbb);

    auto inner_res = inner(inner_cx);
    inner_res.bcx.build.Br(next_cx.llbb);
1899

1900
    auto phi = next_cx.build.Phi(t_else,
1901
                                 vec(v_else, v_else, v_else, inner_res.val),
1902
                                 vec(cx.llbb,
1903
                                     load_rc_cx.llbb,
1904
                                     rc_adj_cx.llbb,
1905 1906
                                     inner_res.bcx.llbb));

1907
    ret res(next_cx, phi);
1908 1909
}

1910 1911
// Tag information

1912
// Returns the type parameters of a tag.
1913 1914
fn tag_ty_params(@crate_ctxt cx, ast.def_id id) -> vec[ast.def_id] {
    ret ty.lookup_generic_item_type(cx.sess, cx.type_cache, id)._0;
1915 1916
}

1917 1918 1919 1920
type variant_info = rec(vec[@ty.t] args, @ty.t ctor_ty, ast.def_id id);

// Returns information about the variants in a tag.
fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[variant_info] {
1921 1922 1923 1924
    if (cx.sess.get_targ_crate_num() != id._0) {
        ret creader.get_tag_variants(cx.sess, id);
    }

1925 1926
    check (cx.items.contains_key(id));
    alt (cx.items.get(id).node) {
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
        case (ast.item_tag(_, ?variants, _, _, _)) {
            let vec[variant_info] result = vec();
            for (ast.variant variant in variants) {
                auto ctor_ty = node_ann_type(cx, variant.node.ann);
                let vec[@ty.t] arg_tys = vec();
                if (_vec.len[ast.variant_arg](variant.node.args) > 0u) {
                    for (ty.arg a in ty.ty_fn_args(ctor_ty)) {
                        arg_tys += vec(a.ty);
                    }
                }
                auto did = variant.node.id;
                result += vec(rec(args=arg_tys, ctor_ty=ctor_ty, id=did));
            }
            ret result;
        }
1942 1943 1944 1945
    }
    fail;   // not reached
}

1946
// Returns information about the tag variant with the given ID.
1947 1948
fn tag_variant_with_id(@crate_ctxt cx,
                       &ast.def_id tag_id,
1949
                       &ast.def_id variant_id) -> variant_info {
1950 1951 1952
    auto variants = tag_variants(cx, tag_id);

    auto i = 0u;
1953
    while (i < _vec.len[variant_info](variants)) {
1954
        auto variant = variants.(i);
1955
        if (common.def_eq(variant.id, variant_id)) {
1956 1957 1958 1959 1960 1961 1962 1963 1964
            ret variant;
        }
        i += 1u;
    }

    log "tag_variant_with_id(): no variant exists with that ID";
    fail;
}

1965 1966 1967 1968 1969 1970 1971 1972
// Returns a new plain tag type of the given ID with no type parameters. Don't
// use this function in new code; it's a hack to keep things working for now.
fn mk_plain_tag(ast.def_id tid) -> @ty.t {
    let vec[@ty.t] tps = vec();
    ret ty.plain_ty(ty.ty_tag(tid, tps));
}


1973
type val_pair_fn = fn(@block_ctxt cx, ValueRef dst, ValueRef src) -> result;
1974

1975
type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, @ty.t t) -> result;
1976

1977 1978 1979
type val_pair_and_ty_fn =
    fn(@block_ctxt cx, ValueRef av, ValueRef bv, @ty.t t) -> result;

1980
// Iterates through the elements of a structural type.
1981 1982
fn iter_structural_ty(@block_ctxt cx,
                      ValueRef v,
1983
                      @ty.t t,
1984 1985
                      val_and_ty_fn f)
    -> result {
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
    fn adaptor_fn(val_and_ty_fn f,
                  @block_ctxt cx,
                  ValueRef av,
                  ValueRef bv,
                  @ty.t t) -> result {
        ret f(cx, av, t);
    }
    be iter_structural_ty_full(cx, v, v, t,
                               bind adaptor_fn(f, _, _, _, _));
}


fn iter_structural_ty_full(@block_ctxt cx,
                           ValueRef av,
                           ValueRef bv,
                           @ty.t t,
                           val_pair_and_ty_fn f)
    -> result {
2004
    let result r = res(cx, C_nil());
2005

2006
    fn iter_boxpp(@block_ctxt cx,
2007 2008 2009 2010 2011
                  ValueRef box_a_cell,
                  ValueRef box_b_cell,
                  val_pair_and_ty_fn f) -> result {
        auto box_a_ptr = cx.build.Load(box_a_cell);
        auto box_b_ptr = cx.build.Load(box_b_cell);
2012
        auto tnil = plain_ty(ty.ty_nil);
2013
        auto tbox = ty.plain_box_ty(tnil, ast.imm);
2014 2015 2016

        auto inner_cx = new_sub_block_ctxt(cx, "iter box");
        auto next_cx = new_sub_block_ctxt(cx, "next");
2017
        auto null_test = cx.build.IsNull(box_a_ptr);
2018 2019
        cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);

2020
        auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
2021 2022 2023 2024
        r.bcx.build.Br(next_cx.llbb);
        ret res(next_cx, r.val);
    }

2025
    alt (t.struct) {
2026
        case (ty.ty_tup(?args)) {
2027
            let int i = 0;
2028
            for (ty.mt arg in args) {
2029 2030 2031 2032
                r = GEP_tup_like(r.bcx, t, av, vec(0, i));
                auto elt_a = r.val;
                r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
                auto elt_b = r.val;
2033
                r = f(r.bcx,
2034 2035 2036
                      load_scalar_or_boxed(r.bcx, elt_a, arg.ty),
                      load_scalar_or_boxed(r.bcx, elt_b, arg.ty),
                      arg.ty);
2037 2038 2039
                i += 1;
            }
        }
2040
        case (ty.ty_rec(?fields)) {
2041
            let int i = 0;
2042
            for (ty.field fld in fields) {
2043 2044 2045 2046
                r = GEP_tup_like(r.bcx, t, av, vec(0, i));
                auto llfld_a = r.val;
                r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
                auto llfld_b = r.val;
2047
                r = f(r.bcx,
2048 2049 2050
                      load_scalar_or_boxed(r.bcx, llfld_a, fld.mt.ty),
                      load_scalar_or_boxed(r.bcx, llfld_b, fld.mt.ty),
                      fld.mt.ty);
2051 2052 2053
                i += 1;
            }
        }
2054
        case (ty.ty_tag(?tid, ?tps)) {
2055
            auto variants = tag_variants(cx.fcx.ccx, tid);
2056
            auto n_variants = _vec.len[variant_info](variants);
2057

2058 2059 2060 2061 2062 2063 2064 2065 2066
            // Cast the tags to types we can GEP into.
            auto lltagty = T_opaque_tag_ptr(cx.fcx.ccx.tn);
            auto av_tag = cx.build.PointerCast(av, lltagty);
            auto bv_tag = cx.build.PointerCast(bv, lltagty);

            auto lldiscrim_a_ptr = cx.build.GEP(av_tag,
                                                vec(C_int(0), C_int(0)));
            auto llunion_a_ptr = cx.build.GEP(av_tag,
                                              vec(C_int(0), C_int(1)));
2067 2068
            auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);

2069 2070 2071 2072
            auto lldiscrim_b_ptr = cx.build.GEP(bv_tag,
                                                vec(C_int(0), C_int(0)));
            auto llunion_b_ptr = cx.build.GEP(bv_tag,
                                              vec(C_int(0), C_int(1)));
2073
            auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
G
Graydon Hoare 已提交
2074

2075 2076 2077 2078 2079 2080 2081
            // NB: we must hit the discriminant first so that structural
            // comparison know not to proceed when the discriminants differ.
            auto bcx = cx;
            bcx = f(bcx, lldiscrim_a, lldiscrim_b,
                    plain_ty(ty.ty_int)).bcx;

            auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
2082 2083
            unr_cx.build.Unreachable();

2084
            auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
2085
                                             n_variants);
G
Graydon Hoare 已提交
2086

2087
            auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
2088

2089 2090
            auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);

2091
            auto i = 0u;
2092
            for (variant_info variant in variants) {
2093
                auto variant_cx = new_sub_block_ctxt(bcx,
2094
                                                     "tag-iter-variant-" +
2095 2096 2097
                                                     _uint.to_str(i, 10u));
                llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);

2098
                if (_vec.len[@ty.t](variant.args) > 0u) {
2099
                    // N-ary variant.
2100
                    auto fn_ty = variant.ctor_ty;
2101 2102
                    alt (fn_ty.struct) {
                        case (ty.ty_fn(_, ?args, _)) {
2103
                            auto j = 0;
2104 2105
                            for (ty.arg a in args) {
                                auto v = vec(C_int(0), C_int(j as int));
2106

2107
                                auto rslt = GEP_tag(variant_cx, llunion_a_ptr,
2108
                                    tid, variant.id, tps, j);
2109 2110
                                auto llfldp_a = rslt.val;
                                variant_cx = rslt.bcx;
2111

2112
                                rslt = GEP_tag(variant_cx, llunion_b_ptr, tid,
2113
                                    variant.id, tps, j);
2114 2115
                                auto llfldp_b = rslt.val;
                                variant_cx = rslt.bcx;
2116 2117

                                auto ty_subst = ty.substitute_ty_params(
2118
                                    ty_params, tps, a.ty);
2119

2120
                                auto llfld_a =
2121
                                    load_scalar_or_boxed(variant_cx,
2122
                                                         llfldp_a,
2123 2124
                                                         ty_subst);

2125 2126 2127 2128 2129 2130 2131
                                auto llfld_b =
                                    load_scalar_or_boxed(variant_cx,
                                                         llfldp_b,
                                                         ty_subst);

                                auto res = f(variant_cx,
                                             llfld_a, llfld_b, ty_subst);
2132
                                variant_cx = res.bcx;
2133
                                j += 1;
2134 2135
                            }
                        }
2136
                        case (_) { fail; }
2137
                    }
2138 2139 2140 2141 2142

                    variant_cx.build.Br(next_cx.llbb);
                } else {
                    // Nullary variant; nothing to do.
                    variant_cx.build.Br(next_cx.llbb);
2143 2144 2145 2146 2147 2148 2149
                }

                i += 1u;
            }

            ret res(next_cx, C_nil());
        }
2150
        case (ty.ty_fn(_,_,_)) {
2151 2152 2153 2154 2155 2156
            auto box_cell_a =
                cx.build.GEP(av,
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));
            auto box_cell_b =
                cx.build.GEP(bv,
2157 2158
                             vec(C_int(0),
                                 C_int(abi.fn_field_box)));
2159
            ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
2160
        }
2161
        case (ty.ty_obj(_)) {
2162 2163 2164 2165 2166 2167
            auto box_cell_a =
                cx.build.GEP(av,
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));
            auto box_cell_b =
                cx.build.GEP(bv,
2168 2169
                             vec(C_int(0),
                                 C_int(abi.obj_field_box)));
2170
            ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
2171
        }
2172
        case (_) {
2173
            cx.fcx.ccx.sess.unimpl("type in iter_structural_ty_full");
2174
        }
2175
    }
2176
    ret r;
2177 2178
}

2179 2180
// Iterates through a pointer range, until the src* hits the src_lim*.
fn iter_sequence_raw(@block_ctxt cx,
2181
                     ValueRef dst,     // elt*
2182 2183 2184
                     ValueRef src,     // elt*
                     ValueRef src_lim, // elt*
                     ValueRef elt_sz,
2185
                     val_pair_fn f) -> result {
2186 2187 2188

    auto bcx = cx;

2189
    let ValueRef dst_int = vp2i(bcx, dst);
2190 2191 2192 2193 2194 2195 2196 2197 2198
    let ValueRef src_int = vp2i(bcx, src);
    let ValueRef src_lim_int = vp2i(bcx, src_lim);

    auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
    auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
    auto next_cx = new_sub_block_ctxt(cx, "next");

    bcx.build.Br(cond_cx.llbb);

2199 2200
    let ValueRef dst_curr = cond_cx.build.Phi(T_int(),
                                              vec(dst_int), vec(bcx.llbb));
2201 2202 2203
    let ValueRef src_curr = cond_cx.build.Phi(T_int(),
                                              vec(src_int), vec(bcx.llbb));

2204
    auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
2205 2206 2207 2208
                                       src_curr, src_lim_int);

    cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);

2209
    auto dst_curr_ptr = vi2p(body_cx, dst_curr, T_ptr(T_i8()));
2210
    auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
2211

2212
    auto body_res = f(body_cx, dst_curr_ptr, src_curr_ptr);
2213 2214
    body_cx = body_res.bcx;

2215
    auto dst_next = body_cx.build.Add(dst_curr, elt_sz);
2216
    auto src_next = body_cx.build.Add(src_curr, elt_sz);
2217 2218
    body_cx.build.Br(cond_cx.llbb);

2219 2220
    cond_cx.build.AddIncomingToPhi(dst_curr, vec(dst_next),
                                   vec(body_cx.llbb));
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
    cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
                                   vec(body_cx.llbb));

    ret res(next_cx, C_nil());
}


fn iter_sequence_inner(@block_ctxt cx,
                       ValueRef src,     // elt*
                       ValueRef src_lim, // elt*
                       @ty.t elt_ty,
                       val_and_ty_fn f) -> result {
    fn adaptor_fn(val_and_ty_fn f,
2234
                  @ty.t elt_ty,
2235
                  @block_ctxt cx,
2236 2237
                  ValueRef dst,
                  ValueRef src) -> result {
2238 2239 2240 2241 2242 2243 2244 2245 2246
        auto llptrty;
        if (!ty.type_has_dynamic_size(elt_ty)) {
            auto llty = type_of(cx.fcx.ccx, elt_ty);
            llptrty = T_ptr(llty);
        } else {
            llptrty = T_ptr(T_ptr(T_i8()));
        }

        auto p = cx.build.PointerCast(src, llptrty);
2247
        ret f(cx, load_scalar_or_boxed(cx, p, elt_ty), elt_ty);
2248 2249
    }

2250
    auto elt_sz = size_of(cx, elt_ty);
2251 2252
    be iter_sequence_raw(elt_sz.bcx, src, src, src_lim, elt_sz.val,
                         bind adaptor_fn(f, elt_ty, _, _, _));
2253 2254 2255
}


2256 2257 2258
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx,
                 ValueRef v,
2259
                 @ty.t t,
2260 2261 2262 2263
                 val_and_ty_fn f) -> result {

    fn iter_sequence_body(@block_ctxt cx,
                          ValueRef v,
2264
                          @ty.t elt_ty,
2265 2266 2267 2268 2269 2270 2271
                          val_and_ty_fn f,
                          bool trailing_null) -> result {

        auto p0 = cx.build.GEP(v, vec(C_int(0),
                                      C_int(abi.vec_elt_data)));
        auto lenptr = cx.build.GEP(v, vec(C_int(0),
                                          C_int(abi.vec_elt_fill)));
2272

2273 2274 2275 2276 2277 2278 2279
        auto llunit_ty;
        if (ty.type_has_dynamic_size(elt_ty)) {
            llunit_ty = T_i8();
        } else {
            llunit_ty = type_of(cx.fcx.ccx, elt_ty);
        }

2280
        auto bcx = cx;
2281

2282
        auto len = bcx.build.Load(lenptr);
2283
        if (trailing_null) {
2284 2285
            auto unit_sz = size_of(bcx, elt_ty);
            bcx = unit_sz.bcx;
2286
            len = bcx.build.Sub(len, unit_sz.val);
2287 2288
        }

2289 2290
        auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
                       T_ptr(llunit_ty));
2291

2292
        ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
2293 2294
    }

2295
    alt (t.struct) {
2296 2297
        case (ty.ty_vec(?elt)) {
            ret iter_sequence_body(cx, v, elt.ty, f, false);
2298
        }
2299
        case (ty.ty_str) {
2300
            auto et = plain_ty(ty.ty_machine(common.ty_u8));
2301
            ret iter_sequence_body(cx, v, et, f, true);
2302
        }
2303
        case (_) { fail; }
2304
    }
2305 2306 2307 2308
    cx.fcx.ccx.sess.bug("bad type in trans.iter_sequence");
    fail;
}

2309 2310 2311 2312 2313 2314 2315 2316 2317
fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
                         ValueRef tydesc, int field) {
    auto llrawptr = cx.build.BitCast(v, T_ptr(T_i8()));
    auto lltydescs = cx.build.GEP(tydesc,
                                  vec(C_int(0),
                                      C_int(abi.tydesc_field_first_param)));
    lltydescs = cx.build.Load(lltydescs);
    auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
    auto llfn = cx.build.Load(llfnptr);
2318 2319 2320 2321 2322

    // FIXME: this adjustment has to do with the ridiculous encoding of
    // glue-pointer-constants in the tydesc records: They are tydesc-relative
    // displacements.  This is purely for compatibility with rustboot and
    // should go when it is discarded.
2323 2324 2325
    llfn = vi2p(cx, cx.build.Add(vp2i(cx, llfn),
                                 vp2i(cx, tydesc)),
                val_ty(llfn));
2326

2327 2328 2329 2330 2331
    cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
                                cx.fcx.lltaskptr,
                                C_null(T_ptr(T_nil())),
                                lltydescs,
                                llrawptr));
2332 2333 2334
}

fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
2335 2336
    auto td = get_tydesc(cx, t);
    call_tydesc_glue_full(td.bcx, v, td.val, field);
2337 2338
}

2339
fn take_ty(@block_ctxt cx,
2340 2341
                    ValueRef v,
                    @ty.t t) -> result {
2342
    if (!ty.type_is_scalar(t)) {
2343
        call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue_off);
2344
    }
2345
    ret res(cx, C_nil());
2346 2347
}

2348 2349
fn drop_slot(@block_ctxt cx,
             ValueRef slot,
2350
             @ty.t t) -> result {
2351
    auto llptr = load_scalar_or_boxed(cx, slot, t);
2352 2353 2354 2355 2356 2357
    auto re = drop_ty(cx, llptr, t);

    auto llty = val_ty(slot);
    auto llelemty = lib.llvm.llvm.LLVMGetElementType(llty);
    re.bcx.build.Store(C_null(llelemty), slot);
    ret re;
2358 2359
}

2360 2361
fn drop_ty(@block_ctxt cx,
           ValueRef v,
2362
           @ty.t t) -> result {
2363

2364
    if (!ty.type_is_scalar(t)) {
2365
        call_tydesc_glue(cx, v, t, abi.tydesc_field_drop_glue_off);
2366
    }
2367
    ret res(cx, C_nil());
2368 2369
}

2370 2371 2372 2373
fn call_memcpy(@block_ctxt cx,
               ValueRef dst,
               ValueRef src,
               ValueRef n_bytes) -> result {
2374 2375
    auto src_ptr = cx.build.PointerCast(src, T_ptr(T_i8()));
    auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
2376 2377 2378
    auto size = cx.build.IntCast(n_bytes, T_int());
    ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.memcpy_glue,
                                  vec(dst_ptr, src_ptr, size)));
2379 2380
}

2381 2382 2383 2384 2385 2386 2387 2388 2389
fn call_bzero(@block_ctxt cx,
              ValueRef dst,
              ValueRef n_bytes) -> result {
    auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
    auto size = cx.build.IntCast(n_bytes, T_int());
    ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.bzero_glue,
                                  vec(dst_ptr, size)));
}

2390 2391 2392 2393 2394 2395
fn memcpy_ty(@block_ctxt cx,
             ValueRef dst,
             ValueRef src,
             @ty.t t) -> result {
    if (ty.type_has_dynamic_size(t)) {
        auto llszptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
2396 2397
        auto llsz = llszptr.bcx.build.Load(llszptr.val);
        ret call_memcpy(llszptr.bcx, dst, src, llsz);
2398 2399 2400 2401 2402 2403

    } else {
        ret res(cx, cx.build.Store(cx.build.Load(src), dst));
    }
}

2404 2405 2406 2407 2408
tag copy_action {
    INIT;
    DROP_EXISTING;
}

2409
fn copy_ty(@block_ctxt cx,
2410
           copy_action action,
2411 2412
           ValueRef dst,
           ValueRef src,
2413
           @ty.t t) -> result {
2414
    if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
2415 2416
        ret res(cx, cx.build.Store(src, dst));

2417
    } else if (ty.type_is_nil(t)) {
2418 2419
        ret res(cx, C_nil());

2420
    } else if (ty.type_is_boxed(t)) {
2421
        auto r = take_ty(cx, src, t);
2422
        if (action == DROP_EXISTING) {
2423
            r = drop_ty(r.bcx, r.bcx.build.Load(dst), t);
2424 2425 2426
        }
        ret res(r.bcx, r.bcx.build.Store(src, dst));

2427 2428
    } else if (ty.type_is_structural(t) ||
               ty.type_has_dynamic_size(t)) {
2429
        auto r = take_ty(cx, src, t);
2430
        if (action == DROP_EXISTING) {
2431 2432
            r = drop_ty(r.bcx, dst, t);
        }
2433
        ret memcpy_ty(r.bcx, dst, src, t);
2434 2435 2436
    }

    cx.fcx.ccx.sess.bug("unexpected type in trans.copy_ty: " +
2437
                        ty.ty_to_str(t));
2438 2439 2440
    fail;
}

2441
fn trans_lit(@crate_ctxt cx, &ast.lit lit, &ast.ann ann) -> ValueRef {
2442
    alt (lit.node) {
2443
        case (ast.lit_int(?i)) {
2444
            ret C_int(i);
2445 2446
        }
        case (ast.lit_uint(?u)) {
2447
            ret C_int(u as int);
2448
        }
2449 2450 2451 2452 2453 2454
        case (ast.lit_mach_int(?tm, ?i)) {
            // FIXME: the entire handling of mach types falls apart
            // if target int width is larger than host, at the moment;
            // re-do the mach-int types using 'big' when that works.
            auto t = T_int();
            alt (tm) {
G
Graydon Hoare 已提交
2455 2456 2457 2458 2459 2460 2461 2462 2463
                case (common.ty_u8) { t = T_i8(); }
                case (common.ty_u16) { t = T_i16(); }
                case (common.ty_u32) { t = T_i32(); }
                case (common.ty_u64) { t = T_i64(); }

                case (common.ty_i8) { t = T_i8(); }
                case (common.ty_i16) { t = T_i16(); }
                case (common.ty_i32) { t = T_i32(); }
                case (common.ty_i64) { t = T_i64(); }
2464
            }
2465
            ret C_integral(i, t);
2466
        }
2467 2468 2469
        case(ast.lit_float(?fs)) {
            ret C_float(fs);
        }
2470 2471 2472 2473 2474 2475 2476 2477
        case(ast.lit_mach_float(?tm, ?s)) {
            auto t = T_float();
            alt(tm) {
                case(common.ty_f32) { t = T_f32(); }
                case(common.ty_f64) { t = T_f64(); }
            }
            ret C_floating(s, t);
        }
2478
        case (ast.lit_char(?c)) {
2479
            ret C_integral(c as int, T_char());
2480 2481
        }
        case (ast.lit_bool(?b)) {
2482
            ret C_bool(b);
2483 2484
        }
        case (ast.lit_nil) {
2485
            ret C_nil();
2486 2487
        }
        case (ast.lit_str(?s)) {
2488
            ret C_str(cx, s);
2489 2490 2491 2492
        }
    }
}

2493
fn target_type(@crate_ctxt cx, @ty.t t) -> @ty.t {
2494
    alt (t.struct) {
2495 2496
        case (ty.ty_int) {
            auto tm = ty.ty_machine(cx.sess.get_targ_cfg().int_type);
2497 2498
            ret @rec(struct=tm with *t);
        }
2499 2500
        case (ty.ty_uint) {
            auto tm = ty.ty_machine(cx.sess.get_targ_cfg().uint_type);
2501 2502
            ret @rec(struct=tm with *t);
        }
2503
        case (_) { /* fall through */ }
2504 2505 2506 2507
    }
    ret t;
}

2508
fn node_ann_type(@crate_ctxt cx, &ast.ann a) -> @ty.t {
2509 2510
    alt (a) {
        case (ast.ann_none) {
2511
            cx.sess.bug("missing type annotation");
2512
        }
2513
        case (ast.ann_type(?t, _, _)) {
2514
            ret target_type(cx, t);
2515 2516 2517 2518
        }
    }
}

2519 2520 2521 2522 2523 2524
fn node_ann_ty_params(&ast.ann a) -> vec[@ty.t] {
    alt (a) {
        case (ast.ann_none) {
            log "missing type annotation";
            fail;
        }
2525
        case (ast.ann_type(_, ?tps_opt, _)) {
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
            alt (tps_opt) {
                case (none[vec[@ty.t]]) {
                    log "type annotation has no ty params";
                    fail;
                }
                case (some[vec[@ty.t]](?tps)) { ret tps; }
            }
        }
    }
}

2537 2538 2539 2540
fn node_type(@crate_ctxt cx, &ast.ann a) -> TypeRef {
    ret type_of(cx, node_ann_type(cx, a));
}

2541 2542
fn trans_unary(@block_ctxt cx, ast.unop op,
               @ast.expr e, &ast.ann a) -> result {
2543 2544

    auto sub = trans_expr(cx, e);
2545
    auto e_ty = ty.expr_ty(e);
2546

2547 2548
    alt (op) {
        case (ast.bitnot) {
2549
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2550
            ret res(sub.bcx, sub.bcx.build.Not(sub.val));
2551 2552
        }
        case (ast.not) {
2553
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2554
            ret res(sub.bcx, sub.bcx.build.Not(sub.val));
2555 2556
        }
        case (ast.neg) {
2557
            sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2558 2559 2560 2561 2562 2563
            if(e_ty.struct == ty.ty_float) {
                ret res(sub.bcx, sub.bcx.build.FNeg(sub.val));
            }
            else {
                ret res(sub.bcx, sub.bcx.build.Neg(sub.val));
            }
2564
        }
2565
        case (ast.box(_)) {
2566
            auto e_ty = ty.expr_ty(e);
2567
            auto e_val = sub.val;
2568 2569 2570
            auto box_ty = node_ann_type(sub.bcx.fcx.ccx, a);
            sub = trans_malloc_boxed(sub.bcx, e_ty);
            find_scope_cx(cx).cleanups +=
2571
                vec(clean(bind drop_ty(_, sub.val, box_ty)));
2572

2573 2574
            auto box = sub.val;
            auto rc = sub.bcx.build.GEP(box,
2575 2576
                                        vec(C_int(0),
                                            C_int(abi.box_rc_field_refcnt)));
2577 2578 2579 2580
            auto body = sub.bcx.build.GEP(box,
                                          vec(C_int(0),
                                              C_int(abi.box_rc_field_body)));
            sub.bcx.build.Store(C_int(1), rc);
2581 2582 2583 2584 2585 2586 2587 2588 2589

            // Cast the body type to the type of the value. This is needed to
            // make tags work, since tags have a different LLVM type depending
            // on whether they're boxed or not.
            if (!ty.type_has_dynamic_size(e_ty)) {
                auto llety = T_ptr(type_of(sub.bcx.fcx.ccx, e_ty));
                body = sub.bcx.build.PointerCast(body, llety);
            }

2590
            sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
2591
            ret res(sub.bcx, box);
2592
        }
2593
        case (ast.deref) {
2594 2595 2596
            auto val = sub.bcx.build.GEP(sub.val,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));
2597
            auto e_ty = node_ann_type(sub.bcx.fcx.ccx, a);
2598 2599
            if (ty.type_is_scalar(e_ty) ||
                ty.type_is_nil(e_ty)) {
2600
                val = sub.bcx.build.Load(val);
2601
            }
2602
            ret res(sub.bcx, val);
2603
        }
2604 2605 2606 2607
    }
    fail;
}

2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
fn trans_compare(@block_ctxt cx0, ast.binop op, @ty.t t0,
                 ValueRef lhs0, ValueRef rhs0) -> result {

    auto cx = cx0;

    auto lhs_r = autoderef(cx, lhs0, t0);
    auto lhs = lhs_r.val;
    cx = lhs_r.bcx;

    auto rhs_r = autoderef(cx, rhs0, t0);
    auto rhs = rhs_r.val;
    cx = rhs_r.bcx;

    auto t = autoderefed_ty(t0);
2622 2623 2624 2625

    if (ty.type_is_scalar(t)) {
        ret res(cx, trans_scalar_compare(cx, op, t, lhs, rhs));

2626 2627 2628
    } else if (ty.type_is_structural(t)
               || ty.type_is_sequence(t)) {

2629 2630
        auto scx = new_sub_block_ctxt(cx, "structural compare start");
        auto next = new_sub_block_ctxt(cx, "structural compare end");
2631 2632
        cx.build.Br(scx.llbb);

2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
        /*
         * We're doing lexicographic comparison here. We start with the
         * assumption that the two input elements are equal. Depending on
         * operator, this means that the result is either true or false;
         * equality produces 'true' for ==, <= and >=. It produces 'false' for
         * !=, < and >.
         *
         * We then move one element at a time through the structure checking
         * for pairwise element equality. If we have equality, our assumption
         * about overall sequence equality is not modified, so we have to move
         * to the next element.
         *
         * If we do not have pairwise element equality, we have reached an
         * element that 'decides' the lexicographic comparison. So we exit the
         * loop with a flag that indicates the true/false sense of that
         * decision, by testing the element again with the operator we're
         * interested in.
         *
         * When we're lucky, LLVM should be able to fold some of these two
         * tests together (as they're applied to the same operands and in some
         * cases are sometimes redundant). But we don't bother trying to
         * optimize combinations like that, at this level.
         */

2657
        auto flag = alloca(scx, T_i1());
2658

2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
        if (ty.type_is_sequence(t)) {

            // If we hit == all the way through the minimum-shared-length
            // section, default to judging the relative sequence lengths.
            auto len_cmp =
                trans_integral_compare(scx, op, plain_ty(ty.ty_uint),
                                       vec_fill(scx, lhs),
                                       vec_fill(scx, rhs));
            scx.build.Store(len_cmp, flag);

        } else {
            auto T = C_integral(1, T_i1());
            auto F = C_integral(0, T_i1());

            alt (op) {
                // ==, <= and >= default to true if they find == all the way.
                case (ast.eq) { scx.build.Store(T, flag); }
                case (ast.le) { scx.build.Store(T, flag); }
                case (ast.ge) { scx.build.Store(T, flag); }
                case (_) {
                    // < > default to false if they find == all the way.
                    scx.build.Store(F, flag);
                }

2683 2684
            }
        }
2685

2686
        fn inner(@block_ctxt last_cx,
2687
                 bool load_inner,
2688 2689 2690
                 ValueRef flag,
                 ast.binop op,
                 @block_ctxt cx,
2691 2692
                 ValueRef av0,
                 ValueRef bv0,
2693
                 @ty.t t) -> result {
2694

2695 2696 2697
            auto cnt_cx = new_sub_block_ctxt(cx, "continue comparison");
            auto stop_cx = new_sub_block_ctxt(cx, "stop comparison");

2698 2699 2700 2701 2702 2703 2704
            auto av = av0;
            auto bv = bv0;
            if (load_inner) {
                av = load_scalar_or_boxed(cx, av, t);
                bv = load_scalar_or_boxed(cx, bv, t);
            }

2705 2706 2707
            // First 'eq' comparison: if so, continue to next elts.
            auto eq_r = trans_compare(cx, ast.eq, t, av, bv);
            eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
2708

2709 2710 2711 2712
            // Second 'op' comparison: find out how this elt-pair decides.
            auto stop_r = trans_compare(stop_cx, op, t, av, bv);
            stop_r.bcx.build.Store(stop_r.val, flag);
            stop_r.bcx.build.Br(last_cx.llbb);
2713 2714 2715
            ret res(cnt_cx, C_nil());
        }

2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
        auto r;
        if (ty.type_is_structural(t)) {
            r = iter_structural_ty_full(scx, lhs, rhs, t,
                                        bind inner(next, false, flag, op,
                                                   _, _, _, _));
        } else {
            auto lhs_p0 = vec_p0(scx, lhs);
            auto rhs_p0 = vec_p0(scx, rhs);
            auto min_len = umin(scx, vec_fill(scx, lhs), vec_fill(scx, rhs));
            auto rhs_lim = scx.build.GEP(rhs_p0, vec(min_len));
            auto elt_ty = ty.sequence_element_type(t);
            auto elt_llsz_r = size_of(scx, elt_ty);
            scx = elt_llsz_r.bcx;
2729
            r = iter_sequence_raw(scx, lhs_p0, rhs_p0, rhs_lim,
2730 2731 2732 2733
                                  elt_llsz_r.val,
                                  bind inner(next, true, flag, op,
                                             _, _, _, elt_ty));
        }
2734

2735 2736
        r.bcx.build.Br(next.llbb);
        auto v = next.build.Load(flag);
2737 2738
        ret res(next, v);

2739

2740
    } else {
2741
        // FIXME: compare obj, fn by pointer?
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
        cx.fcx.ccx.sess.unimpl("type in trans_compare");
        ret res(cx, C_bool(false));
    }
}

fn trans_scalar_compare(@block_ctxt cx, ast.binop op, @ty.t t,
                        ValueRef lhs, ValueRef rhs) -> ValueRef {
    if (ty.type_is_fp(t)) {
        ret trans_fp_compare(cx, op, t, lhs, rhs);
    } else {
        ret trans_integral_compare(cx, op, t, lhs, rhs);
    }
}

fn trans_fp_compare(@block_ctxt cx, ast.binop op, @ty.t fptype,
                    ValueRef lhs, ValueRef rhs) -> ValueRef {
2758

2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775
    auto cmp = lib.llvm.LLVMIntEQ;
    alt (op) {
        // FIXME: possibly use the unordered-or-< predicates here,
        // for now we're only going with ordered-and-< style (no NaNs).
        case (ast.eq) { cmp = lib.llvm.LLVMRealOEQ; }
        case (ast.ne) { cmp = lib.llvm.LLVMRealONE; }
        case (ast.lt) { cmp = lib.llvm.LLVMRealOLT; }
        case (ast.gt) { cmp = lib.llvm.LLVMRealOGT; }
        case (ast.le) { cmp = lib.llvm.LLVMRealOLE; }
        case (ast.ge) { cmp = lib.llvm.LLVMRealOGE; }
    }

    ret cx.build.FCmp(cmp, lhs, rhs);
}

fn trans_integral_compare(@block_ctxt cx, ast.binop op, @ty.t intype,
                          ValueRef lhs, ValueRef rhs) -> ValueRef {
2776 2777 2778 2779 2780
    auto cmp = lib.llvm.LLVMIntEQ;
    alt (op) {
        case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
        case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }

2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
        case (ast.lt) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSLT;
            } else {
                cmp = lib.llvm.LLVMIntULT;
            }
        }
        case (ast.le) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSLE;
            } else {
                cmp = lib.llvm.LLVMIntULE;
            }
        }
        case (ast.gt) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSGT;
            } else {
                cmp = lib.llvm.LLVMIntUGT;
            }
        }
        case (ast.ge) {
            if (ty.type_is_signed(intype)) {
                cmp = lib.llvm.LLVMIntSGE;
            } else {
                cmp = lib.llvm.LLVMIntUGE;
            }
        }
2809 2810 2811 2812
    }
    ret cx.build.ICmp(cmp, lhs, rhs);
}

2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
fn trans_vec_append(@block_ctxt cx, @ty.t t,
                    ValueRef lhs, ValueRef rhs) -> result {

    auto elt_ty = ty.sequence_element_type(t);

    auto skip_null = C_bool(false);
    alt (t.struct) {
        case (ty.ty_str) { skip_null = C_bool(true); }
        case (_) { }
    }

    auto bcx = cx;

    auto llvec_tydesc = get_tydesc(bcx, t);
    bcx = llvec_tydesc.bcx;

    auto llelt_tydesc = get_tydesc(bcx, elt_ty);
    bcx = llelt_tydesc.bcx;

2832 2833 2834 2835 2836 2837 2838 2839
    auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
    auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());

    ret res(bcx, bcx.build.FastCall(cx.fcx.ccx.glues.vec_append_glue,
                                    vec(cx.fcx.lltaskptr,
                                        llvec_tydesc.val,
                                        llelt_tydesc.val,
                                        dst, src, skip_null)));
2840 2841
}

2842 2843
fn trans_vec_add(@block_ctxt cx, @ty.t t,
                 ValueRef lhs, ValueRef rhs) -> result {
2844
    auto r = alloc_ty(cx, t);
2845 2846
    auto tmp = r.val;
    r = copy_ty(r.bcx, INIT, tmp, lhs, t);
2847
    auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
2848
    tmp = load_scalar_or_boxed(bcx, tmp, t);
2849 2850
    find_scope_cx(cx).cleanups +=
        vec(clean(bind drop_ty(_, tmp, t)));
2851
    ret res(bcx, tmp);
2852 2853 2854
}


2855
fn trans_eager_binop(@block_ctxt cx, ast.binop op, @ty.t intype,
2856
                     ValueRef lhs, ValueRef rhs) -> result {
2857

2858
    auto is_float = false;
2859
    alt (intype.struct) {
2860 2861 2862 2863 2864 2865 2866
        case (ty.ty_float) {
            is_float = true;
        }
        case (_) {
            is_float = false;
        }
    }
2867

2868
    alt (op) {
2869 2870
        case (ast.add) {
            if (ty.type_is_sequence(intype)) {
2871
                ret trans_vec_add(cx, intype, lhs, rhs);
2872
            }
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
            if (is_float) {
                ret res(cx, cx.build.FAdd(lhs, rhs));
            }
            else {
                ret res(cx, cx.build.Add(lhs, rhs));
            }
        }
        case (ast.sub) {
            if (is_float) {
                ret res(cx, cx.build.FSub(lhs, rhs));
            }
            else {
                ret res(cx, cx.build.Sub(lhs, rhs));
            }
        }

2889
        case (ast.mul) {
2890 2891 2892 2893 2894 2895
            if (is_float) {
                ret res(cx, cx.build.FMul(lhs, rhs));
            }
            else {
                ret res(cx, cx.build.Mul(lhs, rhs));
            }
2896
        }
2897

2898
        case (ast.div) {
2899 2900 2901
            if (is_float) {
                ret res(cx, cx.build.FDiv(lhs, rhs));
            }
2902
            if (ty.type_is_signed(intype)) {
2903
                ret res(cx, cx.build.SDiv(lhs, rhs));
2904
            } else {
2905
                ret res(cx, cx.build.UDiv(lhs, rhs));
2906 2907 2908
            }
        }
        case (ast.rem) {
2909 2910 2911
            if (is_float) {
                ret res(cx, cx.build.FRem(lhs, rhs));
            }
2912
            if (ty.type_is_signed(intype)) {
2913
                ret res(cx, cx.build.SRem(lhs, rhs));
2914
            } else {
2915
                ret res(cx, cx.build.URem(lhs, rhs));
2916 2917
            }
        }
2918

2919 2920 2921 2922 2923 2924
        case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
        case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
        case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
        case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
        case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
        case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
2925
        case (_) {
2926
            ret trans_compare(cx, op, intype, lhs, rhs);
2927 2928 2929 2930 2931
        }
    }
    fail;
}

2932 2933 2934 2935 2936 2937
fn autoderef(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
    let ValueRef v1 = v;
    let @ty.t t1 = t;

    while (true) {
        alt (t1.struct) {
2938
            case (ty.ty_box(?mt)) {
2939 2940 2941
                auto body = cx.build.GEP(v1,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body)));
2942 2943
                t1 = mt.ty;
                v1 = load_scalar_or_boxed(cx, body, t1);
2944 2945 2946 2947 2948 2949 2950 2951
            }
            case (_) {
                ret res(cx, v1);
            }
        }
    }
}

2952 2953 2954 2955 2956
fn autoderefed_ty(@ty.t t) -> @ty.t {
    let @ty.t t1 = t;

    while (true) {
        alt (t1.struct) {
2957 2958
            case (ty.ty_box(?mt)) {
                t1 = mt.ty;
2959 2960 2961 2962 2963 2964 2965 2966
            }
            case (_) {
                ret t1;
            }
        }
    }
}

2967 2968
fn trans_binary(@block_ctxt cx, ast.binop op,
                @ast.expr a, @ast.expr b) -> result {
2969

2970 2971 2972 2973 2974 2975
    // First couple cases are lazy:

    alt (op) {
        case (ast.and) {
            // Lazy-eval and
            auto lhs_res = trans_expr(cx, a);
2976
            lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2977

2978
            auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2979
            auto rhs_res = trans_expr(rhs_cx, b);
2980
            rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
2981

2982
            auto lhs_false_cx = new_scope_block_ctxt(cx, "lhs false");
2983
            auto lhs_false_res = res(lhs_false_cx, C_bool(false));
2984 2985 2986

            lhs_res.bcx.build.CondBr(lhs_res.val,
                                     rhs_cx.llbb,
2987 2988 2989 2990
                                     lhs_false_cx.llbb);

            ret join_results(cx, T_bool(),
                             vec(lhs_false_res, rhs_res));
2991 2992 2993 2994 2995
        }

        case (ast.or) {
            // Lazy-eval or
            auto lhs_res = trans_expr(cx, a);
2996
            lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2997

2998
            auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2999
            auto rhs_res = trans_expr(rhs_cx, b);
3000
            rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
3001

3002
            auto lhs_true_cx = new_scope_block_ctxt(cx, "lhs true");
3003
            auto lhs_true_res = res(lhs_true_cx, C_bool(true));
3004 3005

            lhs_res.bcx.build.CondBr(lhs_res.val,
3006
                                     lhs_true_cx.llbb,
3007
                                     rhs_cx.llbb);
3008 3009 3010

            ret join_results(cx, T_bool(),
                             vec(lhs_true_res, rhs_res));
3011
        }
3012 3013

        case (_) {
3014 3015
            // Remaining cases are eager:
            auto lhs = trans_expr(cx, a);
3016 3017
            auto lhty = ty.expr_ty(a);
            lhs = autoderef(lhs.bcx, lhs.val, lhty);
3018
            auto rhs = trans_expr(lhs.bcx, b);
3019 3020
            auto rhty = ty.expr_ty(b);
            rhs = autoderef(rhs.bcx, rhs.val, rhty);
3021 3022 3023
            ret trans_eager_binop(rhs.bcx, op,
                                  autoderefed_ty(lhty),
                                  lhs.val, rhs.val);
3024
        }
3025 3026 3027 3028
    }
    fail;
}

3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039
fn join_results(@block_ctxt parent_cx,
                TypeRef t,
                vec[result] ins)
    -> result {

    let vec[result] live = vec();
    let vec[ValueRef] vals = vec();
    let vec[BasicBlockRef] bbs = vec();

    for (result r in ins) {
        if (! is_terminated(r.bcx)) {
3040 3041 3042
            live += vec(r);
            vals += vec(r.val);
            bbs += vec(r.bcx.llbb);
3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
        }
    }

    alt (_vec.len[result](live)) {
        case (0u) {
            // No incoming edges are live, so we're in dead-code-land.
            // Arbitrarily pick the first dead edge, since the caller
            // is just going to propagate it outward.
            check (_vec.len[result](ins) >= 1u);
            ret ins.(0);
        }

3055
        case (_) { /* fall through */ }
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
    }

    // We have >1 incoming edges. Make a join block and br+phi them into it.
    auto join_cx = new_sub_block_ctxt(parent_cx, "join");
    for (result r in live) {
        r.bcx.build.Br(join_cx.llbb);
    }
    auto phi = join_cx.build.Phi(t, vals, bbs);
    ret res(join_cx, phi);
}

3067
fn trans_if(@block_ctxt cx, @ast.expr cond,
3068
            &ast.block thn, &option.t[@ast.expr] els) -> result {
3069 3070 3071

    auto cond_res = trans_expr(cx, cond);

3072
    auto then_cx = new_scope_block_ctxt(cx, "then");
3073 3074
    auto then_res = trans_block(then_cx, thn);

3075
    auto else_cx = new_scope_block_ctxt(cx, "else");
3076

3077 3078
    auto else_res;
    auto expr_llty;
3079
    alt (els) {
3080
        case (some[@ast.expr](?elexpr)) {
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092
            alt (elexpr.node) {
                case (ast.expr_if(_, _, _, _)) {
                    else_res = trans_expr(else_cx, elexpr);
                }
                case (ast.expr_block(?blk, _)) {
                    // Calling trans_block directly instead of trans_expr
                    // because trans_expr will create another scope block
                    // context for the block, but we've already got the
                    // 'else' context
                    else_res = trans_block(else_cx, blk);
                }
            }
3093 3094 3095

            // If we have an else expression, then the entire
            // if expression can have a non-nil type.
3096
            // FIXME: This isn't quite right, particularly re: dynamic types
3097
            auto expr_ty = ty.expr_ty(elexpr);
3098 3099 3100 3101 3102 3103 3104
            if (ty.type_has_dynamic_size(expr_ty)) {
                expr_llty = T_typaram_ptr(cx.fcx.ccx.tn);
            } else {
                expr_llty = type_of(else_res.bcx.fcx.ccx, expr_ty);
                if (ty.type_is_structural(expr_ty)) {
                    expr_llty = T_ptr(expr_llty);
                }
3105
            }
3106 3107 3108 3109
        }
        case (_) {
            else_res = res(else_cx, C_nil());
            expr_llty = T_nil();
3110 3111 3112
        }
    }

3113
    cond_res.bcx.build.CondBr(cond_res.val,
3114 3115
                              then_cx.llbb,
                              else_cx.llbb);
3116

3117
    ret join_results(cx, expr_llty,
3118
                     vec(then_res, else_res));
3119 3120
}

G
Graydon Hoare 已提交
3121 3122 3123 3124 3125 3126
fn trans_for(@block_ctxt cx,
             @ast.decl decl,
             @ast.expr seq,
             &ast.block body) -> result {
    fn inner(@block_ctxt cx,
             @ast.local local, ValueRef curr,
3127 3128
             @ty.t t, ast.block body,
             @block_ctxt outer_next_cx) -> result {
G
Graydon Hoare 已提交
3129 3130

        auto next_cx = new_sub_block_ctxt(cx, "next");
3131 3132 3133
        auto scope_cx =
            new_loop_scope_block_ctxt(cx, option.some[@block_ctxt](next_cx),
                                      outer_next_cx, "for loop scope");
G
Graydon Hoare 已提交
3134 3135 3136

        cx.build.Br(scope_cx.llbb);
        auto local_res = alloc_local(scope_cx, local);
3137
        auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
3138 3139
        scope_cx.cleanups +=
            vec(clean(bind drop_slot(_, local_res.val, t)));
3140
        bcx = trans_block(bcx, body).bcx;
G
Graydon Hoare 已提交
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
        bcx.build.Br(next_cx.llbb);
        ret res(next_cx, C_nil());
    }


    let @ast.local local;
    alt (decl.node) {
        case (ast.decl_local(?loc)) {
            local = loc;
        }
    }

3153
    auto next_cx = new_sub_block_ctxt(cx, "next");
G
Graydon Hoare 已提交
3154 3155
    auto seq_ty = ty.expr_ty(seq);
    auto seq_res = trans_expr(cx, seq);
3156 3157 3158 3159
    auto it = iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
                            bind inner(_, local, _, _, body, next_cx));
    it.bcx.build.Br(next_cx.llbb);
    ret res(next_cx, it.val);
G
Graydon Hoare 已提交
3160 3161
}

3162 3163 3164 3165 3166 3167

// Iterator translation

// Searches through a block for all references to locals or upvars in this
// frame and returns the list of definition IDs thus found.
fn collect_upvars(@block_ctxt cx, &ast.block bloc, &ast.def_id initial_decl)
3168
        -> vec[ast.def_id] {
3169
    type env = @rec(
3170 3171 3172
        mutable vec[ast.def_id] refs,
        hashmap[ast.def_id,()] decls
    );
3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186

    fn fold_expr_path(&env e, &common.span sp, &ast.path p,
                      &option.t[ast.def] d, ast.ann a) -> @ast.expr {
        alt (option.get[ast.def](d)) {
            case (ast.def_arg(?did))    { e.refs += vec(did);   }
            case (ast.def_local(?did))  { e.refs += vec(did);   }
            case (ast.def_upvar(?did))  { e.refs += vec(did);   }
            case (_)                    { /* ignore */          }
        }

        ret @fold.respan[ast.expr_](sp, ast.expr_path(p, d, a));
    }

    fn fold_decl_local(&env e, &common.span sp, @ast.local local)
3187
            -> @ast.decl {
3188 3189 3190 3191 3192 3193 3194
        e.decls.insert(local.id, ());
        ret @fold.respan[ast.decl_](sp, ast.decl_local(local));
    }

    auto fep = fold_expr_path;
    auto fdl = fold_decl_local;
    auto fld = @rec(
3195 3196 3197 3198
        fold_expr_path=fep,
        fold_decl_local=fdl
        with *fold.new_identity_fold[env]()
    );
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217

    let vec[ast.def_id] refs = vec();
    let hashmap[ast.def_id,()] decls = new_def_hash[()]();
    decls.insert(initial_decl, ());
    let env e = @rec(mutable refs=refs, decls=decls);

    fold.fold_block[env](e, fld, bloc);

    // Calculate (refs - decls). This is the set of captured upvars.
    let vec[ast.def_id] result = vec();
    for (ast.def_id ref_id in e.refs) {
        if (!decls.contains_key(ref_id)) {
            result += vec(ref_id);
        }
    }

    ret result;
}

3218 3219 3220 3221
fn trans_for_each(@block_ctxt cx,
                  @ast.decl decl,
                  @ast.expr seq,
                  &ast.block body) -> result {
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250

    /*
     * The translation is a little .. complex here. Code like:
     *
     *    let ty1 p = ...;
     *
     *    let ty1 q = ...;
     *
     *    foreach (ty v in foo(a,b)) { body(p,q,v) }
     *
     *
     * Turns into a something like so (C/Rust mishmash):
     *
     *    type env = { *ty1 p, *ty2 q, ... };
     *
     *    let env e = { &p, &q, ... };
     *
     *    fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
     *
     *    foo([foreach123_body, env*], a, b);
     *
     */

    // Step 1: walk body and figure out which references it makes
    // escape. This could be determined upstream, and probably ought
    // to be so, eventualy. For first cut, skip this. Null env.

    // FIXME: possibly support alias-mode here?
    auto decl_ty = plain_ty(ty.ty_nil);
3251
    auto decl_id;
3252 3253 3254
    alt (decl.node) {
        case (ast.decl_local(?local)) {
            decl_ty = node_ann_type(cx.fcx.ccx, local.ann);
3255
            decl_id = local.id;
3256 3257 3258
        }
    }

3259
    auto upvars = collect_upvars(cx, body, decl_id);
3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270
    auto upvar_count = _vec.len[ast.def_id](upvars);

    auto llbindingsptr;
    if (upvar_count > 0u) {
        // Gather up the upvars.
        let vec[ValueRef] llbindings = vec();
        let vec[TypeRef] llbindingtys = vec();
        for (ast.def_id did in upvars) {
            auto llbinding;
            alt (cx.fcx.lllocals.find(did)) {
                case (none[ValueRef]) {
3271 3272 3273 3274 3275 3276
                    alt (cx.fcx.llupvars.find(did)) {
                        case (none[ValueRef]) {
                            llbinding = cx.fcx.llargs.get(did);
                        }
                        case (some[ValueRef](?llval)) { llbinding = llval; }
                    }
3277 3278 3279 3280 3281 3282 3283 3284
                }
                case (some[ValueRef](?llval)) { llbinding = llval; }
            }
            llbindings += vec(llbinding);
            llbindingtys += vec(val_ty(llbinding));
        }

        // Create an array of bindings and copy in aliases to the upvars.
3285
        llbindingsptr = alloca(cx, T_struct(llbindingtys));
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
        auto i = 0u;
        while (i < upvar_count) {
            auto llbindingptr = cx.build.GEP(llbindingsptr,
                                             vec(C_int(0), C_int(i as int)));
            cx.build.Store(llbindings.(i), llbindingptr);
            i += 1u;
        }
    } else {
        // Null bindings.
        llbindingsptr = C_null(T_ptr(T_i8()));
3296 3297
    }

3298 3299 3300
    // Create an environment and populate it with the bindings.
    auto llenvptrty = T_closure_ptr(cx.fcx.ccx.tn, T_ptr(T_nil()),
                                    val_ty(llbindingsptr), 0u);
3301
    auto llenvptr = alloca(cx, llvm.LLVMGetElementType(llenvptrty));
3302 3303 3304 3305 3306 3307

    auto llbindingsptrptr = cx.build.GEP(llenvptr,
                                         vec(C_int(0),
                                             C_int(abi.box_rc_field_body),
                                             C_int(2)));
    cx.build.Store(llbindingsptr, llbindingsptrptr);
3308 3309 3310

    // Step 2: Declare foreach body function.

3311
    let str s = mangle_name_by_seq(cx.fcx.ccx, "foreach");
3312 3313 3314 3315 3316 3317 3318

    // The 'env' arg entering the body function is a fake env member (as in
    // the env-part of the normal rust calling convention) that actually
    // points to a stack allocated env in this frame. We bundle that env
    // pointer along with the foreach-body-fn pointer into a 'normal' fn pair
    // and pass it in as a first class fn-arg to the iterator.

3319 3320 3321
    auto iter_body_llty = type_of_fn_full(cx.fcx.ccx, ast.proto_fn,
                                          none[TypeRef],
                                          vec(rec(mode=ast.val, ty=decl_ty)),
3322
                                          plain_ty(ty.ty_nil), 0u);
3323

3324
    let ValueRef lliterbody = decl_internal_fastcall_fn(cx.fcx.ccx.llmod,
3325
                                                       s, iter_body_llty);
3326 3327 3328 3329

    // FIXME: handle ty params properly.
    let vec[ast.ty_param] ty_params = vec();

3330
    auto fcx = new_fn_ctxt(cx.fcx.ccx, lliterbody);
3331
    auto bcx = new_top_block_ctxt(fcx);
3332
    auto lltop = bcx.llbb;
3333

3334 3335 3336
    // Populate the upvars from the environment.
    auto llremoteenvptr = bcx.build.PointerCast(fcx.llenv, llenvptrty);
    auto llremotebindingsptrptr = bcx.build.GEP(llremoteenvptr,
3337
        vec(C_int(0), C_int(abi.box_rc_field_body), C_int(2)));
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
    auto llremotebindingsptr = bcx.build.Load(llremotebindingsptrptr);

    auto i = 0u;
    while (i < upvar_count) {
        auto upvar_id = upvars.(i);
        auto llupvarptrptr = bcx.build.GEP(llremotebindingsptr,
                                           vec(C_int(0), C_int(i as int)));
        auto llupvarptr = bcx.build.Load(llupvarptrptr);
        fcx.llupvars.insert(upvar_id, llupvarptr);

        i += 1u;
    }

    // Treat the loop variable as an upvar as well. We copy it to an alloca
    // as usual.
    auto lllvar = llvm.LLVMGetParam(fcx.llfn, 3u);
3354
    auto lllvarptr = alloca(bcx, val_ty(lllvar));
3355 3356 3357
    bcx.build.Store(lllvar, lllvarptr);
    fcx.llupvars.insert(decl_id, lllvarptr);

3358
    auto res = trans_block(bcx, body);
3359 3360 3361 3362

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);

3363 3364 3365
    res.bcx.build.RetVoid();


3366
    // Step 3: Call iter passing [lliterbody, llenv], plus other args.
3367 3368

    alt (seq.node) {
3369

3370
        case (ast.expr_call(?f, ?args, ?ann)) {
3371

3372 3373
            auto pair = alloca(cx, T_fn_pair(cx.fcx.ccx.tn,
                                             iter_body_llty));
3374 3375 3376 3377 3378
            auto code_cell = cx.build.GEP(pair,
                                          vec(C_int(0),
                                              C_int(abi.fn_field_code)));
            cx.build.Store(lliterbody, code_cell);

3379 3380 3381
            auto env_cell = cx.build.GEP(pair, vec(C_int(0),
                                                   C_int(abi.fn_field_box)));
            auto llenvblobptr = cx.build.PointerCast(llenvptr,
3382
                T_opaque_closure_ptr(cx.fcx.ccx.tn));
3383 3384
            cx.build.Store(llenvblobptr, env_cell);

3385 3386
            // log "lliterbody: " + val_str(cx.fcx.ccx.tn, lliterbody);
            ret trans_call(cx, f,
3387
                           some[ValueRef](cx.build.Load(pair)),
3388 3389
                           args,
                           ann);
3390 3391
        }
    }
3392 3393 3394 3395
    fail;
}


3396 3397
fn trans_while(@block_ctxt cx, @ast.expr cond,
               &ast.block body) -> result {
3398

3399
    auto cond_cx = new_scope_block_ctxt(cx, "while cond");
3400
    auto next_cx = new_sub_block_ctxt(cx, "next");
3401 3402
    auto body_cx = new_loop_scope_block_ctxt(cx, option.none[@block_ctxt],
                                             next_cx, "while loop body");
3403 3404

    auto body_res = trans_block(body_cx, body);
3405 3406 3407
    auto cond_res = trans_expr(cond_cx, cond);

    body_res.bcx.build.Br(cond_cx.llbb);
3408 3409 3410

    auto cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
    cond_bcx.build.CondBr(cond_res.val, body_cx.llbb, next_cx.llbb);
3411 3412

    cx.build.Br(cond_cx.llbb);
3413 3414 3415
    ret res(next_cx, C_nil());
}

3416 3417
fn trans_do_while(@block_ctxt cx, &ast.block body,
                  @ast.expr cond) -> result {
3418

3419
    auto next_cx = new_sub_block_ctxt(cx, "next");
3420 3421
    auto body_cx = new_loop_scope_block_ctxt(cx, option.none[@block_ctxt],
                                             next_cx, "do-while loop body");
3422 3423

    auto body_res = trans_block(body_cx, body);
3424 3425 3426 3427 3428 3429
    auto cond_res = trans_expr(body_res.bcx, cond);

    cond_res.bcx.build.CondBr(cond_res.val,
                              body_cx.llbb,
                              next_cx.llbb);
    cx.build.Br(body_cx.llbb);
3430 3431 3432
    ret res(next_cx, body_res.val);
}

P
Patrick Walton 已提交
3433 3434
// Pattern matching translation

3435 3436
fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
                   @block_ctxt next_cx) -> result {
P
Patrick Walton 已提交
3437 3438 3439
    alt (pat.node) {
        case (ast.pat_wild(_)) { ret res(cx, llval); }
        case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
3440 3441 3442

        case (ast.pat_lit(?lt, ?ann)) {
            auto lllit = trans_lit(cx.fcx.ccx, *lt, ann);
3443 3444
            auto lltype = ty.ann_to_type(ann);
            auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
3445

3446 3447
            auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
            lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
3448 3449 3450
            ret res(matched_cx, llval);
        }

P
Patrick Walton 已提交
3451
        case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
3452
            auto lltagptr = cx.build.PointerCast(llval,
3453
                T_opaque_tag_ptr(cx.fcx.ccx.tn));
3454 3455 3456

            auto lldiscrimptr = cx.build.GEP(lltagptr,
                                             vec(C_int(0), C_int(0)));
3457
            auto lldiscrim = cx.build.Load(lldiscrimptr);
3458

P
Patrick Walton 已提交
3459 3460 3461
            auto vdef = option.get[ast.variant_def](vdef_opt);
            auto variant_id = vdef._1;
            auto variant_tag = 0;
3462 3463

            auto variants = tag_variants(cx.fcx.ccx, vdef._0);
P
Patrick Walton 已提交
3464
            auto i = 0;
3465 3466
            for (variant_info v in variants) {
                auto this_variant_id = v.id;
P
Patrick Walton 已提交
3467
                if (variant_id._0 == this_variant_id._0 &&
G
Graydon Hoare 已提交
3468
                    variant_id._1 == this_variant_id._1) {
P
Patrick Walton 已提交
3469 3470 3471 3472 3473 3474 3475
                    variant_tag = i;
                }
                i += 1;
            }

            auto matched_cx = new_sub_block_ctxt(cx, "matched_cx");

3476
            auto lleq = cx.build.ICmp(lib.llvm.LLVMIntEQ, lldiscrim,
P
Patrick Walton 已提交
3477 3478 3479
                                      C_int(variant_tag));
            cx.build.CondBr(lleq, matched_cx.llbb, next_cx.llbb);

3480 3481
            auto ty_params = node_ann_ty_params(ann);

P
Patrick Walton 已提交
3482
            if (_vec.len[@ast.pat](subpats) > 0u) {
3483
                auto llblobptr = matched_cx.build.GEP(lltagptr,
3484
                    vec(C_int(0), C_int(1)));
P
Patrick Walton 已提交
3485 3486
                auto i = 0;
                for (@ast.pat subpat in subpats) {
3487 3488
                    auto rslt = GEP_tag(matched_cx, llblobptr, vdef._0,
                                        vdef._1, ty_params, i);
3489 3490 3491
                    auto llsubvalptr = rslt.val;
                    matched_cx = rslt.bcx;

3492
                    auto llsubval = load_scalar_or_boxed(matched_cx,
G
Graydon Hoare 已提交
3493 3494
                                                         llsubvalptr,
                                                         pat_ty(subpat));
P
Patrick Walton 已提交
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
                    auto subpat_res = trans_pat_match(matched_cx, subpat,
                                                      llsubval, next_cx);
                    matched_cx = subpat_res.bcx;
                }
            }

            ret res(matched_cx, llval);
        }
    }

    fail;
}

3508 3509
fn trans_pat_binding(@block_ctxt cx, @ast.pat pat, ValueRef llval)
    -> result {
P
Patrick Walton 已提交
3510 3511
    alt (pat.node) {
        case (ast.pat_wild(_)) { ret res(cx, llval); }
3512
        case (ast.pat_lit(_, _)) { ret res(cx, llval); }
P
Patrick Walton 已提交
3513 3514 3515
        case (ast.pat_bind(?id, ?def_id, ?ann)) {
            auto ty = node_ann_type(cx.fcx.ccx, ann);

3516 3517 3518 3519
            auto rslt = alloc_ty(cx, ty);
            auto dst = rslt.val;
            auto bcx = rslt.bcx;

P
Patrick Walton 已提交
3520
            llvm.LLVMSetValueName(dst, _str.buf(id));
3521
            bcx.fcx.lllocals.insert(def_id, dst);
3522 3523
            bcx.cleanups +=
                vec(clean(bind drop_slot(_, dst, ty)));
P
Patrick Walton 已提交
3524

3525
            ret copy_ty(bcx, INIT, dst, llval, ty);
P
Patrick Walton 已提交
3526
        }
3527
        case (ast.pat_tag(_, ?subpats, ?vdef_opt, ?ann)) {
P
Patrick Walton 已提交
3528 3529
            if (_vec.len[@ast.pat](subpats) == 0u) { ret res(cx, llval); }

3530 3531 3532 3533
            // Get the appropriate variant for this tag.
            auto vdef = option.get[ast.variant_def](vdef_opt);

            auto lltagptr = cx.build.PointerCast(llval,
3534
                T_opaque_tag_ptr(cx.fcx.ccx.tn));
3535
            auto llblobptr = cx.build.GEP(lltagptr, vec(C_int(0), C_int(1)));
P
Patrick Walton 已提交
3536

3537 3538
            auto ty_param_substs = node_ann_ty_params(ann);

P
Patrick Walton 已提交
3539 3540 3541
            auto this_cx = cx;
            auto i = 0;
            for (@ast.pat subpat in subpats) {
3542 3543
                auto rslt = GEP_tag(this_cx, llblobptr, vdef._0, vdef._1,
                                    ty_param_substs, i);
3544 3545 3546
                this_cx = rslt.bcx;
                auto llsubvalptr = rslt.val;

3547
                auto llsubval = load_scalar_or_boxed(this_cx, llsubvalptr,
G
Graydon Hoare 已提交
3548
                                                     pat_ty(subpat));
P
Patrick Walton 已提交
3549 3550 3551
                auto subpat_res = trans_pat_binding(this_cx, subpat,
                                                    llsubval);
                this_cx = subpat_res.bcx;
3552
                i += 1;
P
Patrick Walton 已提交
3553 3554 3555 3556 3557 3558 3559
            }

            ret res(this_cx, llval);
        }
    }
}

3560 3561
fn trans_alt(@block_ctxt cx, @ast.expr expr,
             vec[ast.arm] arms, ast.ann ann) -> result {
P
Patrick Walton 已提交
3562 3563 3564
    auto expr_res = trans_expr(cx, expr);

    auto this_cx = expr_res.bcx;
3565
    let vec[result] arm_results = vec();
P
Patrick Walton 已提交
3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577
    for (ast.arm arm in arms) {
        auto next_cx = new_sub_block_ctxt(expr_res.bcx, "next");
        auto match_res = trans_pat_match(this_cx, arm.pat, expr_res.val,
                                         next_cx);

        auto binding_cx = new_scope_block_ctxt(match_res.bcx, "binding");
        match_res.bcx.build.Br(binding_cx.llbb);

        auto binding_res = trans_pat_binding(binding_cx, arm.pat,
                                             expr_res.val);

        auto block_res = trans_block(binding_res.bcx, arm.block);
3578
        arm_results += vec(block_res);
P
Patrick Walton 已提交
3579 3580 3581 3582

        this_cx = next_cx;
    }

3583 3584 3585
    auto default_cx = this_cx;
    auto default_res = trans_fail(default_cx, expr.span,
                                  "non-exhaustive match failure");
P
Patrick Walton 已提交
3586

3587
    // FIXME: This isn't quite right, particularly re: dynamic types
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599
    auto expr_ty = ty.ann_to_type(ann);
    auto expr_llty;
    if (ty.type_has_dynamic_size(expr_ty)) {
        expr_llty = T_typaram_ptr(cx.fcx.ccx.tn);
    } else {
        expr_llty = type_of(cx.fcx.ccx, expr_ty);
        if (ty.type_is_structural(expr_ty)) {
            expr_llty = T_ptr(expr_llty);
        }
    }

    ret join_results(cx, expr_llty, arm_results);
P
Patrick Walton 已提交
3600 3601
}

3602
type generic_info = rec(@ty.t item_type,
3603 3604
                        vec[ValueRef] tydescs);

3605 3606
type lval_result = rec(result res,
                       bool is_mem,
3607
                       option.t[generic_info] generic,
3608 3609
                       option.t[ValueRef] llobj,
                       option.t[@ty.t] method_ty);
3610 3611 3612 3613

fn lval_mem(@block_ctxt cx, ValueRef val) -> lval_result {
    ret rec(res=res(cx, val),
            is_mem=true,
3614
            generic=none[generic_info],
3615 3616
            llobj=none[ValueRef],
            method_ty=none[@ty.t]);
3617 3618 3619 3620 3621
}

fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
    ret rec(res=res(cx, val),
            is_mem=false,
3622
            generic=none[generic_info],
3623 3624
            llobj=none[ValueRef],
            method_ty=none[@ty.t]);
3625
}
3626

3627 3628 3629 3630 3631 3632 3633 3634 3635
fn trans_external_path(@block_ctxt cx, ast.def_id did,
                       ty.ty_params_opt_and_ty tpt) -> lval_result {
    auto ccx = cx.fcx.ccx;
    auto name = creader.get_symbol(ccx.sess, did);
    auto v = get_extern_const(ccx.externs, ccx.llmod,
                              name, type_of_ty_params_opt_and_ty(ccx, tpt));
    ret lval_val(cx, v);
}

3636 3637 3638 3639
fn lval_generic_fn(@block_ctxt cx,
                   ty.ty_params_and_ty tpt,
                   ast.def_id fn_id,
                   &ast.ann ann)
3640
        -> lval_result {
3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
    auto lv;
    if (cx.fcx.ccx.sess.get_targ_crate_num() == fn_id._0) {
        // Internal reference.
        check (cx.fcx.ccx.fn_pairs.contains_key(fn_id));
        lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(fn_id));
    } else {
        // External reference.
        auto tpot = tup(some[vec[ast.def_id]](tpt._0), tpt._1);
        lv = trans_external_path(cx, fn_id, tpot);
    }
3651 3652 3653 3654 3655 3656 3657 3658

    auto monoty;
    auto tys;
    alt (ann) {
        case (ast.ann_none) {
            cx.fcx.ccx.sess.bug("no type annotation for path!");
            fail;
        }
3659
        case (ast.ann_type(?monoty_, ?tps, _)) {
3660 3661 3662 3663
            monoty = monoty_;
            tys = option.get[vec[@ty.t]](tps);
        }
    }
3664 3665 3666 3667 3668 3669 3670

    if (_vec.len[@ty.t](tys) != 0u) {
        auto bcx = cx;
        let vec[ValueRef] tydescs = vec();
        for (@ty.t t in tys) {
            auto td = get_tydesc(bcx, t);
            bcx = td.bcx;
3671
            _vec.push[ValueRef](tydescs, td.val);
3672 3673 3674 3675 3676 3677 3678 3679 3680 3681
        }
        auto gen = rec( item_type = tpt._1,
                        tydescs = tydescs );
        lv = rec(res = res(bcx, lv.res.val),
                 generic = some[generic_info](gen)
                 with lv);
    }
    ret lv;
}

3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
fn lookup_discriminant(@crate_ctxt ccx, ast.def_id tid, ast.def_id vid)
        -> ValueRef {
    alt (ccx.discrims.find(vid)) {
        case (none[ValueRef]) {
            // It's an external discriminant that we haven't seen yet.
            check (ccx.sess.get_targ_crate_num() != vid._0);
            auto sym = creader.get_symbol(ccx.sess, vid);
            auto gvar = llvm.LLVMAddGlobal(ccx.llmod, T_int(), _str.buf(sym));
            llvm.LLVMSetLinkage(gvar,
                                lib.llvm.LLVMExternalLinkage as llvm.Linkage);
            llvm.LLVMSetGlobalConstant(gvar, True);
            ccx.discrims.insert(vid, gvar);
            ret gvar;
        }
        case (some[ValueRef](?llval)) { ret llval; }
    }
}

3700
fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
G
Graydon Hoare 已提交
3701
              &ast.ann ann) -> lval_result {
3702 3703 3704 3705
    alt (dopt) {
        case (some[ast.def](?def)) {
            alt (def) {
                case (ast.def_arg(?did)) {
3706 3707 3708 3709 3710 3711 3712 3713 3714
                    alt (cx.fcx.llargs.find(did)) {
                        case (none[ValueRef]) {
                            check (cx.fcx.llupvars.contains_key(did));
                            ret lval_mem(cx, cx.fcx.llupvars.get(did));
                        }
                        case (some[ValueRef](?llval)) {
                            ret lval_mem(cx, llval);
                        }
                    }
3715 3716
                }
                case (ast.def_local(?did)) {
3717 3718 3719 3720 3721 3722 3723 3724 3725
                    alt (cx.fcx.lllocals.find(did)) {
                        case (none[ValueRef]) {
                            check (cx.fcx.llupvars.contains_key(did));
                            ret lval_mem(cx, cx.fcx.llupvars.get(did));
                        }
                        case (some[ValueRef](?llval)) {
                            ret lval_mem(cx, llval);
                        }
                    }
G
Graydon Hoare 已提交
3726
                }
P
Patrick Walton 已提交
3727 3728
                case (ast.def_binding(?did)) {
                    check (cx.fcx.lllocals.contains_key(did));
3729
                    ret lval_mem(cx, cx.fcx.lllocals.get(did));
P
Patrick Walton 已提交
3730
                }
3731 3732 3733 3734
                case (ast.def_obj_field(?did)) {
                    check (cx.fcx.llobjfields.contains_key(did));
                    ret lval_mem(cx, cx.fcx.llobjfields.get(did));
                }
3735
                case (ast.def_fn(?did)) {
3736
                    auto tyt = ty.lookup_generic_item_type(cx.fcx.ccx.sess,
3737
                        cx.fcx.ccx.type_cache, did);
3738
                    ret lval_generic_fn(cx, tyt, did, ann);
3739
                }
3740
                case (ast.def_obj(?did)) {
3741
                    auto tyt = ty.lookup_generic_item_type(cx.fcx.ccx.sess,
3742
                        cx.fcx.ccx.type_cache, did);
3743
                    ret lval_generic_fn(cx, tyt, did, ann);
3744
                }
3745
                case (ast.def_variant(?tid, ?vid)) {
3746 3747 3748 3749 3750 3751
                    auto v_tyt = ty.lookup_generic_item_type(cx.fcx.ccx.sess,
                        cx.fcx.ccx.type_cache, vid);
                    alt (v_tyt._1.struct) {
                        case (ty.ty_fn(_, _, _)) {
                            // N-ary variant.
                            ret lval_generic_fn(cx, v_tyt, vid, ann);
3752
                        }
3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
                        case (_) {
                            // Nullary variant.
                            auto tag_ty = node_ann_type(cx.fcx.ccx, ann);
                            auto lldiscrim_gv =
                                lookup_discriminant(cx.fcx.ccx, tid, vid);
                            auto lldiscrim = cx.build.Load(lldiscrim_gv);

                            auto alloc_result = alloc_ty(cx, tag_ty);
                            auto lltagblob = alloc_result.val;

                            auto lltagty;
                            if (ty.type_has_dynamic_size(tag_ty)) {
                                lltagty = T_opaque_tag(cx.fcx.ccx.tn);
                            } else {
                                lltagty = type_of(cx.fcx.ccx, tag_ty);
                            }
3769 3770
                            auto lltagptr = alloc_result.bcx.build.
                                PointerCast(lltagblob, T_ptr(lltagty));
3771

3772 3773 3774 3775
                            auto lldiscrimptr = alloc_result.bcx.build.GEP(
                                lltagptr, vec(C_int(0), C_int(0)));
                            alloc_result.bcx.build.Store(lldiscrim,
                                                         lldiscrimptr);
3776

3777 3778
                            ret lval_val(alloc_result.bcx, lltagptr);
                        }
3779
                    }
3780
                }
3781
                case (ast.def_const(?did)) {
3782
                    // TODO: externals
3783 3784 3785
                    check (cx.fcx.ccx.consts.contains_key(did));
                    ret lval_mem(cx, cx.fcx.ccx.consts.get(did));
                }
3786
                case (ast.def_native_fn(?did)) {
3787
                    auto tyt = ty.lookup_generic_item_type(cx.fcx.ccx.sess,
3788
                        cx.fcx.ccx.type_cache, did);
3789
                    ret lval_generic_fn(cx, tyt, did, ann);
3790
                }
3791 3792
                case (_) {
                    cx.fcx.ccx.sess.unimpl("def variant in trans");
G
Graydon Hoare 已提交
3793 3794 3795
                }
            }
        }
3796
        case (none[ast.def]) {
3797
            cx.fcx.ccx.sess.err("unresolved expr_path in trans");
3798 3799 3800 3801 3802
        }
    }
    fail;
}

3803
fn trans_field(@block_ctxt cx, &ast.span sp, ValueRef v, @ty.t t0,
3804
               &ast.ident field, &ast.ann ann) -> lval_result {
3805 3806 3807 3808

    auto r = autoderef(cx, v, t0);
    auto t = autoderefed_ty(t0);

3809
    alt (t.struct) {
3810
        case (ty.ty_tup(_)) {
3811
            let uint ix = ty.field_num(cx.fcx.ccx.sess, sp, field);
3812
            auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3813
            ret lval_mem(v.bcx, v.val);
3814
        }
3815 3816
        case (ty.ty_rec(?fields)) {
            let uint ix = ty.field_idx(cx.fcx.ccx.sess, sp, field, fields);
3817
            auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3818
            ret lval_mem(v.bcx, v.val);
3819
        }
3820 3821
        case (ty.ty_obj(?methods)) {
            let uint ix = ty.method_idx(cx.fcx.ccx.sess, sp, field, methods);
3822 3823 3824 3825 3826 3827
            auto vtbl = r.bcx.build.GEP(r.val,
                                        vec(C_int(0),
                                            C_int(abi.obj_field_vtbl)));
            vtbl = r.bcx.build.Load(vtbl);
            auto v =  r.bcx.build.GEP(vtbl, vec(C_int(0),
                                                C_int(ix as int)));
3828 3829

            auto lvo = lval_mem(r.bcx, v);
3830 3831 3832 3833
            let @ty.t fn_ty = ty.method_ty_to_fn_ty(methods.(ix));
            ret rec(llobj = some[ValueRef](r.val),
                    method_ty = some[@ty.t](fn_ty)
                    with lvo);
3834
        }
3835
        case (_) { cx.fcx.ccx.sess.unimpl("field variant in trans_field"); }
3836 3837 3838 3839
    }
    fail;
}

3840 3841
fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
               @ast.expr idx, &ast.ann ann) -> lval_result {
3842

G
Graydon Hoare 已提交
3843
    auto lv = trans_expr(cx, base);
3844
    lv = autoderef(lv.bcx, lv.val, ty.expr_ty(base));
G
Graydon Hoare 已提交
3845 3846
    auto ix = trans_expr(lv.bcx, idx);
    auto v = lv.val;
3847
    auto bcx = ix.bcx;
3848

3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
    // Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
    auto ix_val;
    auto ix_size = llsize_of_real(cx.fcx.ccx, val_ty(ix.val));
    auto int_size = llsize_of_real(cx.fcx.ccx, T_int());
    if (ix_size < int_size) {
        ix_val = bcx.build.ZExt(ix.val, T_int());
    } else if (ix_size > int_size) {
        ix_val = bcx.build.Trunc(ix.val, T_int());
    } else {
        ix_val = ix.val;
    }

3861 3862
    auto unit_ty = node_ann_type(cx.fcx.ccx, ann);
    auto unit_sz = size_of(bcx, unit_ty);
3863
    bcx = unit_sz.bcx;
3864
    llvm.LLVMSetValueName(unit_sz.val, _str.buf("unit_sz"));
3865 3866

    auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
3867
    llvm.LLVMSetValueName(scaled_ix, _str.buf("scaled_ix"));
3868

3869 3870
    auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
    lim = bcx.build.Load(lim);
3871

3872 3873
    auto bounds_check = bcx.build.ICmp(lib.llvm.LLVMIntULT,
                                       scaled_ix, lim);
3874

3875 3876 3877
    auto fail_cx = new_sub_block_ctxt(bcx, "fail");
    auto next_cx = new_sub_block_ctxt(bcx, "next");
    bcx.build.CondBr(bounds_check, next_cx.llbb, fail_cx.llbb);
3878 3879

    // fail: bad bounds check.
B
Brian Anderson 已提交
3880
    auto fail_res = trans_fail(fail_cx, sp, "bounds check");
3881 3882

    auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
3883 3884 3885 3886 3887 3888 3889 3890
    auto elt;
    if (ty.type_has_dynamic_size(unit_ty)) {
        body = next_cx.build.PointerCast(body, T_ptr(T_array(T_i8(), 0u)));
        elt = next_cx.build.GEP(body, vec(C_int(0), scaled_ix));
    } else {
        elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
    }

3891
    ret lval_mem(next_cx, elt);
3892 3893
}

3894 3895 3896 3897
// The additional bool returned indicates whether it's mem (that is
// represented as an alloca or heap, hence needs a 'load' to be used as an
// immediate).

3898
fn trans_lval(@block_ctxt cx, @ast.expr e) -> lval_result {
3899
    alt (e.node) {
3900 3901
        case (ast.expr_path(?p, ?dopt, ?ann)) {
            ret trans_path(cx, p, dopt, ann);
3902 3903
        }
        case (ast.expr_field(?base, ?ident, ?ann)) {
3904 3905 3906
            auto r = trans_expr(cx, base);
            auto t = ty.expr_ty(base);
            ret trans_field(r.bcx, e.span, r.val, t, ident, ann);
3907
        }
3908 3909 3910
        case (ast.expr_index(?base, ?idx, ?ann)) {
            ret trans_index(cx, e.span, base, idx, ann);
        }
3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927

        // Kind of bizarre to pass an *entire* self-call here...but let's try
        // it
        case (ast.expr_call_self(?ident, _, ?ann)) {
            alt (cx.fcx.llself) {
                case (some[self_vt](?s_vt)) {
                    auto r =  s_vt.v;
                    auto t =  s_vt.t;
                    ret trans_field(cx, e.span, r, t, ident, ann);
                }
                case (_) {
                    // Shouldn't happen.
                    fail;
                }

            }
        }
3928
        case (_) { cx.fcx.ccx.sess.unimpl("expr variant in trans_lval"); }
G
Graydon Hoare 已提交
3929 3930 3931 3932
    }
    fail;
}

3933
fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
3934 3935 3936 3937
    auto e_res = trans_expr(cx, e);
    auto llsrctype = val_ty(e_res.val);
    auto t = node_ann_type(cx.fcx.ccx, ann);
    auto lldsttype = type_of(cx.fcx.ccx, t);
3938
    if (!ty.type_is_fp(t)) {
3939
        // TODO: native-to-native casts
3940 3941
        if (ty.type_is_native(ty.expr_ty(e))) {
            e_res.val = e_res.bcx.build.PtrToInt(e_res.val, lldsttype);
3942 3943
        } else if (ty.type_is_native(t)) {
            e_res.val = e_res.bcx.build.IntToPtr(e_res.val, lldsttype);
3944
        } else if (llvm.LLVMGetIntTypeWidth(lldsttype) >
3945
            llvm.LLVMGetIntTypeWidth(llsrctype)) {
3946
            if (ty.type_is_signed(t)) {
3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
                // Widening signed cast.
                e_res.val =
                    e_res.bcx.build.SExtOrBitCast(e_res.val,
                                                  lldsttype);
            } else {
                // Widening unsigned cast.
                e_res.val =
                    e_res.bcx.build.ZExtOrBitCast(e_res.val,
                                                  lldsttype);
            }
        } else {
            // Narrowing cast.
            e_res.val =
                e_res.bcx.build.TruncOrBitCast(e_res.val,
                                               lldsttype);
        }
    } else {
        cx.fcx.ccx.sess.unimpl("fp cast");
    }
    ret e_res;
}

3969 3970 3971 3972
fn trans_bind_thunk(@crate_ctxt cx,
                    @ty.t incoming_fty,
                    @ty.t outgoing_fty,
                    vec[option.t[@ast.expr]] args,
3973
                    @ty.t closure_ty,
3974 3975
                    vec[@ty.t] bound_tys,
                    uint ty_param_count) -> ValueRef {
3976 3977 3978
    // Construct a thunk-call with signature incoming_fty, and that copies
    // args forward into a call to outgoing_fty.

3979
    let str s = mangle_name_by_seq(cx, "thunk");
3980
    let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx, incoming_fty));
3981
    let ValueRef llthunk = decl_internal_fastcall_fn(cx.llmod, s, llthunk_ty);
3982

3983
    auto fcx = new_fn_ctxt(cx, llthunk);
3984
    auto bcx = new_top_block_ctxt(fcx);
3985
    auto lltop = bcx.llbb;
3986

3987
    auto llclosure_ptr_ty = type_of(cx, ty.plain_box_ty(closure_ty, ast.imm));
3988
    auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ptr_ty);
3989

3990 3991 3992 3993 3994 3995
    auto lltarget = GEP_tup_like(bcx, closure_ty, llclosure,
                                 vec(0,
                                     abi.box_rc_field_body,
                                     abi.closure_elt_target));
    bcx = lltarget.bcx;
    auto lltargetclosure = bcx.build.GEP(lltarget.val,
3996 3997 3998
                                         vec(C_int(0),
                                             C_int(abi.fn_field_box)));
    lltargetclosure = bcx.build.Load(lltargetclosure);
3999 4000

    auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
4001
    auto outgoing_args = ty.ty_fn_args(outgoing_fty);
4002 4003 4004 4005 4006 4007 4008

    auto llretptr = fcx.llretptr;
    if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
        llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.tn));
    }

    let vec[ValueRef] llargs = vec(llretptr,
4009
                                   fcx.lltaskptr,
4010
                                   lltargetclosure);
4011 4012 4013 4014 4015

    // Copy in the type parameters.
    let uint i = 0u;
    while (i < ty_param_count) {
        auto lltyparam_ptr =
4016 4017 4018 4019 4020 4021 4022
            GEP_tup_like(bcx, closure_ty, llclosure,
                         vec(0,
                             abi.box_rc_field_body,
                             abi.closure_elt_ty_params,
                             (i as int)));
        bcx = lltyparam_ptr.bcx;
        llargs += vec(bcx.build.Load(lltyparam_ptr.val));
4023 4024 4025
        i += 1u;
    }

4026
    let uint a = 3u;    // retptr, task ptr, env come first
4027
    let int b = 0;
4028
    let uint outgoing_arg_index = 0u;
4029 4030 4031
    let vec[TypeRef] llout_arg_tys =
        type_of_explicit_args(cx, outgoing_args);

4032
    for (option.t[@ast.expr] arg in args) {
4033 4034 4035 4036

        auto out_arg = outgoing_args.(outgoing_arg_index);
        auto llout_arg_ty = llout_arg_tys.(outgoing_arg_index);

4037 4038 4039 4040
        alt (arg) {

            // Arg provided at binding time; thunk copies it from closure.
            case (some[@ast.expr](_)) {
4041 4042 4043 4044 4045 4046
                auto bound_arg =
                    GEP_tup_like(bcx, closure_ty, llclosure,
                                 vec(0,
                                     abi.box_rc_field_body,
                                     abi.closure_elt_bindings,
                                     b));
4047

4048
                bcx = bound_arg.bcx;
4049 4050 4051 4052 4053 4054 4055 4056 4057
                auto val = bound_arg.val;

                if (out_arg.mode == ast.val) {
                    val = bcx.build.Load(val);
                } else if (ty.count_ty_params(out_arg.ty) > 0u) {
                    check (out_arg.mode == ast.alias);
                    val = bcx.build.PointerCast(val, llout_arg_ty);
                }

4058
                llargs += vec(val);
4059 4060 4061 4062 4063 4064
                b += 1;
            }

            // Arg will be provided when the thunk is invoked.
            case (none[@ast.expr]) {
                let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
4065 4066 4067

                if (ty.count_ty_params(out_arg.ty) > 0u) {
                    check (out_arg.mode == ast.alias);
4068
                    passed_arg = bcx.build.PointerCast(passed_arg,
4069
                                                       llout_arg_ty);
4070
                }
4071

4072
                llargs += vec(passed_arg);
4073 4074 4075
                a += 1u;
            }
        }
4076

4077
        outgoing_arg_index += 1u;
4078 4079 4080
    }

    // FIXME: turn this call + ret into a tail call.
4081
    auto lltargetfn = bcx.build.GEP(lltarget.val,
4082 4083
                                    vec(C_int(0),
                                        C_int(abi.fn_field_code)));
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093

    // Cast the outgoing function to the appropriate type (see the comments in
    // trans_bind below for why this is necessary).
    auto lltargetty = type_of_fn(bcx.fcx.ccx,
                                 ty.ty_fn_proto(outgoing_fty),
                                 outgoing_args,
                                 outgoing_ret_ty,
                                 ty_param_count);
    lltargetfn = bcx.build.PointerCast(lltargetfn, T_ptr(T_ptr(lltargetty)));

4094
    lltargetfn = bcx.build.Load(lltargetfn);
4095

4096
    auto r = bcx.build.FastCall(lltargetfn, llargs);
4097
    bcx.build.RetVoid();
4098

4099 4100 4101
    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);

4102 4103 4104
    ret llthunk;
}

4105 4106 4107
fn trans_bind(@block_ctxt cx, @ast.expr f,
              vec[option.t[@ast.expr]] args,
              &ast.ann ann) -> result {
4108 4109 4110 4111
    auto f_res = trans_lval(cx, f);
    if (f_res.is_mem) {
        cx.fcx.ccx.sess.unimpl("re-binding existing function");
    } else {
4112 4113
        let vec[@ast.expr] bound = vec();

4114 4115 4116 4117 4118
        for (option.t[@ast.expr] argopt in args) {
            alt (argopt) {
                case (none[@ast.expr]) {
                }
                case (some[@ast.expr](?e)) {
4119
                    _vec.push[@ast.expr](bound, e);
4120 4121 4122
                }
            }
        }
4123 4124

        // Figure out which tydescs we need to pass, if any.
B
Brian Anderson 已提交
4125 4126
        let @ty.t outgoing_fty;
        let vec[ValueRef] lltydescs;
4127 4128 4129
        alt (f_res.generic) {
            case (none[generic_info]) {
                outgoing_fty = ty.expr_ty(f);
B
Brian Anderson 已提交
4130
                lltydescs = vec();
4131 4132 4133 4134 4135 4136 4137 4138 4139
            }
            case (some[generic_info](?ginfo)) {
                outgoing_fty = ginfo.item_type;
                lltydescs = ginfo.tydescs;
            }
        }
        auto ty_param_count = _vec.len[ValueRef](lltydescs);

        if (_vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
4140 4141 4142 4143 4144
            // Trivial 'binding': just return the static pair-ptr.
            ret f_res.res;
        } else {
            auto bcx = f_res.res.bcx;
            auto pair_t = node_type(cx.fcx.ccx, ann);
4145
            auto pair_v = alloca(bcx, pair_t);
4146 4147 4148 4149

            // Translate the bound expressions.
            let vec[@ty.t] bound_tys = vec();
            let vec[ValueRef] bound_vals = vec();
4150
            auto i = 0u;
4151 4152 4153
            for (@ast.expr e in bound) {
                auto arg = trans_expr(bcx, e);
                bcx = arg.bcx;
4154

4155 4156
                _vec.push[ValueRef](bound_vals, arg.val);
                _vec.push[@ty.t](bound_tys, ty.expr_ty(e));
4157 4158

                i += 1u;
4159 4160 4161
            }

            // Synthesize a closure type.
4162
            let @ty.t bindings_ty = ty.plain_tup_ty(bound_tys);
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175

            // NB: keep this in sync with T_closure_ptr; we're making
            // a ty.t structure that has the same "shape" as the LLVM type
            // it constructs.
            let @ty.t tydesc_ty = plain_ty(ty.ty_type);

            let vec[@ty.t] captured_tys =
                _vec.init_elt[@ty.t](tydesc_ty, ty_param_count);

            let vec[@ty.t] closure_tys =
                vec(tydesc_ty,
                    outgoing_fty,
                    bindings_ty,
4176
                    ty.plain_tup_ty(captured_tys));
4177

4178
            let @ty.t closure_ty = ty.plain_tup_ty(closure_tys);
4179 4180

            auto r = trans_malloc_boxed(bcx, closure_ty);
4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
            auto box = r.val;
            bcx = r.bcx;
            auto rc = bcx.build.GEP(box,
                                    vec(C_int(0),
                                        C_int(abi.box_rc_field_refcnt)));
            auto closure =
                bcx.build.GEP(box,
                              vec(C_int(0),
                                  C_int(abi.box_rc_field_body)));
            bcx.build.Store(C_int(1), rc);

            // Store bindings tydesc.
            auto bound_tydesc =
                bcx.build.GEP(closure,
                              vec(C_int(0),
                                  C_int(abi.closure_elt_tydesc)));
            auto bindings_tydesc = get_tydesc(bcx, bindings_ty);
4198 4199
            bcx = bindings_tydesc.bcx;
            bcx.build.Store(bindings_tydesc.val, bound_tydesc);
4200

4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213
            // Determine the LLVM type for the outgoing function type. This
            // may be different from the type returned by trans_malloc_boxed()
            // since we have more information than that function does;
            // specifically, we know how many type descriptors the outgoing
            // function has, which type_of() doesn't, as only we know which
            // item the function refers to.
            auto llfnty = type_of_fn(bcx.fcx.ccx,
                                     ty.ty_fn_proto(outgoing_fty),
                                     ty.ty_fn_args(outgoing_fty),
                                     ty.ty_fn_ret(outgoing_fty),
                                     ty_param_count);
            auto llclosurety = T_ptr(T_fn_pair(bcx.fcx.ccx.tn, llfnty));

4214 4215
            // Store thunk-target.
            auto bound_target =
4216 4217
                bcx.build.GEP(closure,
                              vec(C_int(0),
4218
                                  C_int(abi.closure_elt_target)));
4219
            auto src = bcx.build.Load(f_res.res.val);
4220
            bound_target = bcx.build.PointerCast(bound_target, llclosurety);
4221
            bcx.build.Store(src, bound_target);
4222

4223
            // Copy expr values into boxed bindings.
4224
            i = 0u;
4225 4226 4227 4228
            auto bindings =
                bcx.build.GEP(closure,
                              vec(C_int(0),
                                  C_int(abi.closure_elt_bindings)));
4229 4230
            for (ValueRef v in bound_vals) {
                auto bound = bcx.build.GEP(bindings,
4231
                                           vec(C_int(0), C_int(i as int)));
4232
                bcx = copy_ty(r.bcx, INIT, bound, v, bound_tys.(i)).bcx;
4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252
                i += 1u;
            }

            // If necessary, copy tydescs describing type parameters into the
            // appropriate slot in the closure.
            alt (f_res.generic) {
                case (none[generic_info]) { /* nothing to do */ }
                case (some[generic_info](?ginfo)) {
                    auto ty_params_slot =
                        bcx.build.GEP(closure,
                                      vec(C_int(0),
                                          C_int(abi.closure_elt_ty_params)));
                    auto i = 0;
                    for (ValueRef td in ginfo.tydescs) {
                        auto ty_param_slot = bcx.build.GEP(ty_params_slot,
                                                           vec(C_int(0),
                                                               C_int(i)));
                        bcx.build.Store(td, ty_param_slot);
                        i += 1;
                    }
4253 4254

                    outgoing_fty = ginfo.item_type;
4255
                }
4256 4257
            }

4258 4259 4260 4261
            // Make thunk and store thunk-ptr in outer pair's code slot.
            auto pair_code = bcx.build.GEP(pair_v,
                                           vec(C_int(0),
                                               C_int(abi.fn_field_code)));
4262 4263

            let @ty.t pair_ty = node_ann_type(cx.fcx.ccx, ann);
4264

4265
            let ValueRef llthunk =
4266
                trans_bind_thunk(cx.fcx.ccx, pair_ty, outgoing_fty,
4267
                                 args, closure_ty, bound_tys,
4268
                                 ty_param_count);
4269 4270 4271 4272 4273 4274 4275

            bcx.build.Store(llthunk, pair_code);

            // Store box ptr in outer pair's box slot.
            auto pair_box = bcx.build.GEP(pair_v,
                                          vec(C_int(0),
                                              C_int(abi.fn_field_box)));
4276 4277 4278 4279 4280
            bcx.build.Store
                (bcx.build.PointerCast
                 (box,
                  T_opaque_closure_ptr(bcx.fcx.ccx.tn)),
                 pair_box);
4281

4282
            find_scope_cx(cx).cleanups +=
4283
                vec(clean(bind drop_slot(_, pair_v, pair_ty)));
4284

4285 4286
            ret res(bcx, pair_v);
        }
4287 4288 4289
    }
}

4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

fn trans_args(@block_ctxt cx,
              ValueRef llenv,
              option.t[ValueRef] llobj,
              option.t[generic_info] gen,
4301
              option.t[ValueRef] lliterbody,
4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313
              &vec[@ast.expr] es,
              @ty.t fn_ty)
    -> tup(@block_ctxt, vec[ValueRef], ValueRef) {

    let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
    let vec[ValueRef] llargs = vec();
    let vec[ValueRef] lltydescs = vec();
    let @block_ctxt bcx = cx;


    // Arg 0: Output pointer.
    auto retty = ty.ty_fn_ret(fn_ty);
4314 4315 4316 4317
    auto llretslot_res = alloc_ty(bcx, retty);
    bcx = llretslot_res.bcx;
    auto llretslot = llretslot_res.val;

4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
    alt (gen) {
        case (some[generic_info](?g)) {
            lltydescs = g.tydescs;
            args = ty.ty_fn_args(g.item_type);
            retty = ty.ty_fn_ret(g.item_type);
        }
        case (_) {
        }
    }
    if (ty.type_has_dynamic_size(retty)) {
4328 4329
        llargs += vec(bcx.build.PointerCast(llretslot,
                                            T_typaram_ptr(cx.fcx.ccx.tn)));
4330 4331 4332 4333 4334 4335
    } else if (ty.count_ty_params(retty) != 0u) {
        // It's possible that the callee has some generic-ness somewhere in
        // its return value -- say a method signature within an obj or a fn
        // type deep in a structure -- which the caller has a concrete view
        // of. If so, cast the caller's view of the restlot to the callee's
        // view, for the sake of making a type-compatible call.
4336 4337 4338
        llargs +=
            vec(cx.build.PointerCast(llretslot,
                                     T_ptr(type_of(bcx.fcx.ccx, retty))));
4339
    } else {
4340
        llargs += vec(llretslot);
4341 4342 4343 4344
    }


    // Arg 1: Task pointer.
4345
    llargs += vec(bcx.fcx.lltaskptr);
4346 4347 4348 4349 4350 4351 4352

    // Arg 2: Env (closure-bindings / self-obj)
    alt (llobj) {
        case (some[ValueRef](?ob)) {
            // Every object is always found in memory,
            // and not-yet-loaded (as part of an lval x.y
            // doted method-call).
4353
            llargs += vec(bcx.build.Load(ob));
4354 4355
        }
        case (_) {
4356
            llargs += vec(llenv);
4357 4358 4359 4360 4361 4362
        }
    }

    // Args >3: ty_params ...
    llargs += lltydescs;

4363 4364 4365 4366
    // ... then possibly an lliterbody argument.
    alt (lliterbody) {
        case (none[ValueRef]) {}
        case (some[ValueRef](?lli)) {
4367
            llargs += vec(lli);
4368 4369 4370
        }
    }

4371
    // ... then explicit args.
4372 4373 4374 4375 4376 4377

    // First we figure out the caller's view of the types of the arguments.
    // This will be needed if this is a generic call, because the callee has
    // to cast her view of the arguments to the caller's view.
    auto arg_tys = type_of_explicit_args(cx.fcx.ccx, args);

4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402
    auto i = 0u;
    for (@ast.expr e in es) {
        auto mode = args.(i).mode;

        auto val;
        if (ty.type_is_structural(ty.expr_ty(e))) {
            auto re = trans_expr(bcx, e);
            val = re.val;
            bcx = re.bcx;
        } else if (mode == ast.alias) {
            let lval_result lv;
            if (ty.is_lval(e)) {
                lv = trans_lval(bcx, e);
            } else {
                auto r = trans_expr(bcx, e);
                lv = lval_val(r.bcx, r.val);
            }
            bcx = lv.res.bcx;

            if (lv.is_mem) {
                val = lv.res.val;
            } else {
                // Non-mem but we're trying to alias; synthesize an
                // alloca, spill to it and pass its address.
                auto llty = val_ty(lv.res.val);
4403
                auto llptr = alloca(lv.res.bcx, llty);
4404 4405 4406 4407 4408 4409 4410 4411 4412 4413
                lv.res.bcx.build.Store(lv.res.val, llptr);
                val = llptr;
            }

        } else {
            auto re = trans_expr(bcx, e);
            val = re.val;
            bcx = re.bcx;
        }

4414 4415
        if (ty.count_ty_params(args.(i).ty) > 0u) {
            auto lldestty = arg_tys.(i);
4416 4417 4418 4419 4420 4421 4422
            if (mode == ast.val) {
                // FIXME: we'd prefer to use &&, but rustboot doesn't like it
                if (ty.type_is_structural(ty.expr_ty(e))) {
                    lldestty = T_ptr(lldestty);
                }
            }

4423 4424 4425
            val = bcx.build.PointerCast(val, lldestty);
        } else if (mode == ast.alias) {
            auto lldestty = arg_tys.(i);
4426
            val = bcx.build.PointerCast(val, lldestty);
4427 4428
        }

4429 4430 4431 4432 4433 4434 4435 4436 4437
        if (mode == ast.val) {
            // FIXME: we'd prefer to use &&, but rustboot doesn't like it
            if (ty.type_is_structural(ty.expr_ty(e))) {
                // Until here we've been treating structures by pointer;
                // we are now passing it as an arg, so need to load it.
                val = bcx.build.Load(val);
            }
        }

4438
        llargs += vec(val);
4439 4440 4441 4442 4443 4444
        i += 1u;
    }

    ret tup(bcx, llargs, llretslot);
}

4445
fn trans_call(@block_ctxt cx, @ast.expr f,
4446 4447 4448
              option.t[ValueRef] lliterbody,
              vec[@ast.expr] args,
              &ast.ann ann) -> result {
4449 4450 4451 4452 4453

    // NB: 'f' isn't necessarily a function; it might be an entire self-call
    // expression because of the hack that allows us to process self-calls
    // with trans_call.

4454
    auto f_res = trans_lval(cx, f);
4455
    auto faddr = f_res.res.val;
4456
    auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.ccx.tn));
4457 4458 4459 4460 4461 4462 4463 4464 4465

    alt (f_res.llobj) {
        case (some[ValueRef](_)) {
            // It's a vtbl entry.
            faddr = f_res.res.bcx.build.Load(faddr);
        }
        case (none[ValueRef]) {
            // It's a closure.
            auto bcx = f_res.res.bcx;
4466 4467
            auto pair = faddr;
            faddr = bcx.build.GEP(pair, vec(C_int(0),
G
Graydon Hoare 已提交
4468
                                            C_int(abi.fn_field_code)));
4469
            faddr = bcx.build.Load(faddr);
4470

4471 4472 4473 4474
            auto llclosure = bcx.build.GEP(pair,
                                           vec(C_int(0),
                                               C_int(abi.fn_field_box)));
            llenv = bcx.build.Load(llclosure);
4475
        }
4476
    }
4477

4478 4479 4480 4481 4482
    let @ty.t fn_ty;
    alt (f_res.method_ty) {
        case (some[@ty.t](?meth)) {
            // self-call
            fn_ty = meth;
4483
        }
4484 4485 4486
        
        case (_) {
            fn_ty = ty.expr_ty(f);
4487 4488

        }
4489

4490
    }
4491

4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
    auto ret_ty = ty.ann_to_type(ann);
    auto args_res = trans_args(f_res.res.bcx,
                               llenv, f_res.llobj,
                               f_res.generic,
                               lliterbody,
                               args, fn_ty);

    auto bcx = args_res._0;
    auto llargs = args_res._1;
    auto llretslot = args_res._2;

    /*
4504
    log "calling: " + val_str(cx.fcx.ccx.tn, faddr);
4505

4506 4507 4508
    for (ValueRef arg in llargs) {
        log "arg: " + val_str(cx.fcx.ccx.tn, arg);
    }
4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525
    */

    bcx.build.FastCall(faddr, llargs);
    auto retval = C_nil();

    if (!ty.type_is_nil(ret_ty)) {
        retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
        // Retval doesn't correspond to anything really tangible in the frame,
        // but it's a ref all the same, so we put a note here to drop it when
        // we're done in this scope.
        find_scope_cx(cx).cleanups +=
            vec(clean(bind drop_ty(_, retval, ret_ty)));
    }

    ret res(bcx, retval);
}

4526 4527
fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
             &ast.ann ann) -> result {
4528 4529 4530 4531 4532 4533
    auto bcx = cx;
    auto t = node_ann_type(bcx.fcx.ccx, ann);
    auto tup_res = alloc_ty(bcx, t);
    auto tup_val = tup_res.val;
    bcx = tup_res.bcx;

4534 4535
    find_scope_cx(cx).cleanups +=
        vec(clean(bind drop_ty(_, tup_val, t)));
G
Graydon Hoare 已提交
4536
    let int i = 0;
4537

4538
    for (ast.elt e in elts) {
4539 4540 4541 4542 4543 4544
        auto e_ty = ty.expr_ty(e.expr);
        auto src_res = trans_expr(bcx, e.expr);
        bcx = src_res.bcx;
        auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
        bcx = dst_res.bcx;
        bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
G
Graydon Hoare 已提交
4545 4546
        i += 1;
    }
4547
    ret res(bcx, tup_val);
G
Graydon Hoare 已提交
4548 4549
}

4550 4551
fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
             &ast.ann ann) -> result {
4552 4553 4554
    auto t = node_ann_type(cx.fcx.ccx, ann);
    auto unit_ty = t;
    alt (t.struct) {
4555 4556
        case (ty.ty_vec(?mt)) {
            unit_ty = mt.ty;
G
Graydon Hoare 已提交
4557 4558 4559 4560 4561 4562
        }
        case (_) {
            cx.fcx.ccx.sess.bug("non-vec type in trans_vec");
        }
    }

4563 4564 4565
    auto bcx = cx;
    auto unit_sz = size_of(bcx, unit_ty);
    bcx = unit_sz.bcx;
4566 4567
    auto data_sz = bcx.build.Mul(C_int(_vec.len[@ast.expr](args) as int),
                                 unit_sz.val);
G
Graydon Hoare 已提交
4568 4569

    // FIXME: pass tydesc properly.
4570
    auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)));
4571
    bcx = sub.bcx;
G
Graydon Hoare 已提交
4572

4573
    auto llty = type_of(bcx.fcx.ccx, t);
4574
    auto vec_val = vi2p(bcx, sub.val, llty);
4575 4576
    find_scope_cx(bcx).cleanups +=
        vec(clean(bind drop_ty(_, vec_val, t)));
G
Graydon Hoare 已提交
4577

4578 4579 4580 4581
    auto body = bcx.build.GEP(vec_val, vec(C_int(0),
                                           C_int(abi.vec_elt_data)));

    auto pseudo_tup_ty =
4582 4583
        ty.plain_tup_ty(_vec.init_elt[@ty.t](unit_ty,
                                             _vec.len[@ast.expr](args)));
G
Graydon Hoare 已提交
4584
    let int i = 0;
4585

G
Graydon Hoare 已提交
4586
    for (@ast.expr e in args) {
4587 4588 4589 4590
        auto src_res = trans_expr(bcx, e);
        bcx = src_res.bcx;
        auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
        bcx = dst_res.bcx;
4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605

        // Cast the destination type to the source type. This is needed to
        // make tags work, for a subtle combination of reasons:
        //
        // (1) "dst_res" above is derived from "body", which is in turn
        //     derived from "vec_val".
        // (2) "vec_val" has the LLVM type "llty".
        // (3) "llty" is the result of calling type_of() on a vector type.
        // (4) For tags, type_of() returns a different type depending on
        //     on whether the tag is behind a box or not. Vector types are
        //     considered boxes.
        // (5) "src_res" is derived from "unit_ty", which is not behind a box.

        auto dst_val;
        if (!ty.type_has_dynamic_size(unit_ty)) {
4606
            auto llunit_ty = type_of(cx.fcx.ccx, unit_ty);
4607 4608 4609 4610 4611 4612
            dst_val = bcx.build.PointerCast(dst_res.val, T_ptr(llunit_ty));
        } else {
            dst_val = dst_res.val;
        }

        bcx = copy_ty(bcx, INIT, dst_val, src_res.val, unit_ty).bcx;
G
Graydon Hoare 已提交
4613 4614
        i += 1;
    }
4615 4616 4617
    auto fill = bcx.build.GEP(vec_val,
                              vec(C_int(0), C_int(abi.vec_elt_fill)));
    bcx.build.Store(data_sz, fill);
4618

4619
    ret res(bcx, vec_val);
G
Graydon Hoare 已提交
4620 4621
}

4622
fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
4623 4624
             option.t[@ast.expr] base, &ast.ann ann) -> result {

4625 4626 4627 4628 4629 4630 4631
    auto bcx = cx;
    auto t = node_ann_type(bcx.fcx.ccx, ann);
    auto llty = type_of(bcx.fcx.ccx, t);
    auto rec_res = alloc_ty(bcx, t);
    auto rec_val = rec_res.val;
    bcx = rec_res.bcx;

4632 4633
    find_scope_cx(cx).cleanups +=
        vec(clean(bind drop_ty(_, rec_val, t)));
4634
    let int i = 0;
4635

G
Graydon Hoare 已提交
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652
    auto base_val = C_nil();

    alt (base) {
        case (none[@ast.expr]) { }
        case (some[@ast.expr](?bexp)) {
            auto base_res = trans_expr(bcx, bexp);
            bcx = base_res.bcx;
            base_val = base_res.val;
        }
    }

    let vec[ty.field] ty_fields = vec();
    alt (t.struct) {
        case (ty.ty_rec(?flds)) { ty_fields = flds; }
    }

    for (ty.field tf in ty_fields) {
4653
        auto e_ty = tf.mt.ty;
4654 4655
        auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
        bcx = dst_res.bcx;
G
Graydon Hoare 已提交
4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673

        auto expr_provided = false;
        auto src_res = res(bcx, C_nil());

        for (ast.field f in fields) {
            if (_str.eq(f.ident, tf.ident)) {
                expr_provided = true;
                src_res = trans_expr(bcx, f.expr);
            }
        }
        if (!expr_provided) {
            src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
            src_res = res(src_res.bcx,
                          load_scalar_or_boxed(bcx, src_res.val, e_ty));
        }

        bcx = src_res.bcx;
        bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
4674 4675
        i += 1;
    }
4676
    ret res(bcx, rec_val);
4677 4678
}

G
Graydon Hoare 已提交
4679

G
Graydon Hoare 已提交
4680

4681
fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
4682
    alt (e.node) {
4683
        case (ast.expr_lit(?lit, ?ann)) {
4684
            ret res(cx, trans_lit(cx.fcx.ccx, *lit, ann));
4685 4686
        }

4687
        case (ast.expr_unary(?op, ?x, ?ann)) {
4688
            ret trans_unary(cx, op, x, ann);
4689 4690
        }

P
Patrick Walton 已提交
4691
        case (ast.expr_binary(?op, ?x, ?y, _)) {
4692
            ret trans_binary(cx, op, x, y);
4693
        }
4694

P
Patrick Walton 已提交
4695
        case (ast.expr_if(?cond, ?thn, ?els, _)) {
4696
            ret trans_if(cx, cond, thn, els);
4697 4698
        }

G
Graydon Hoare 已提交
4699 4700 4701 4702
        case (ast.expr_for(?decl, ?seq, ?body, _)) {
            ret trans_for(cx, decl, seq, body);
        }

4703 4704 4705 4706
        case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
            ret trans_for_each(cx, decl, seq, body);
        }

4707
        case (ast.expr_while(?cond, ?body, _)) {
4708
            ret trans_while(cx, cond, body);
4709 4710
        }

4711
        case (ast.expr_do_while(?body, ?cond, _)) {
4712
            ret trans_do_while(cx, body, cond);
4713 4714
        }

4715 4716
        case (ast.expr_alt(?expr, ?arms, ?ann)) {
            ret trans_alt(cx, expr, arms, ann);
P
Patrick Walton 已提交
4717 4718
        }

P
Patrick Walton 已提交
4719
        case (ast.expr_block(?blk, _)) {
4720 4721 4722 4723 4724 4725 4726 4727
            auto sub_cx = new_scope_block_ctxt(cx, "block-expr body");
            auto next_cx = new_sub_block_ctxt(cx, "next");
            auto sub = trans_block(sub_cx, blk);

            cx.build.Br(sub_cx.llbb);
            sub.bcx.build.Br(next_cx.llbb);

            ret res(next_cx, sub.val);
4728
        }
4729

4730
        case (ast.expr_assign(?dst, ?src, ?ann)) {
4731
            auto lhs_res = trans_lval(cx, dst);
4732 4733
            check (lhs_res.is_mem);
            auto rhs_res = trans_expr(lhs_res.res.bcx, src);
4734
            auto t = node_ann_type(cx.fcx.ccx, ann);
G
Graydon Hoare 已提交
4735
            // FIXME: calculate copy init-ness in typestate.
4736 4737
            ret copy_ty(rhs_res.bcx, DROP_EXISTING,
                        lhs_res.res.val, rhs_res.val, t);
4738
        }
G
Graydon Hoare 已提交
4739

4740 4741 4742
        case (ast.expr_assign_op(?op, ?dst, ?src, ?ann)) {
            auto t = node_ann_type(cx.fcx.ccx, ann);
            auto lhs_res = trans_lval(cx, dst);
4743
            check (lhs_res.is_mem);
4744
            auto lhs_val = load_scalar_or_boxed(lhs_res.res.bcx,
G
Graydon Hoare 已提交
4745
                                                lhs_res.res.val, t);
4746
            auto rhs_res = trans_expr(lhs_res.res.bcx, src);
4747 4748
            auto v = trans_eager_binop(rhs_res.bcx, op, t,
                                       lhs_val, rhs_res.val);
4749
            // FIXME: calculate copy init-ness in typestate.
4750 4751
            ret copy_ty(v.bcx, DROP_EXISTING,
                        lhs_res.res.val, v.val, t);
4752 4753
        }

4754 4755 4756 4757
        case (ast.expr_bind(?f, ?args, ?ann)) {
            ret trans_bind(cx, f, args, ann);
        }

G
Graydon Hoare 已提交
4758
        case (ast.expr_call(?f, ?args, ?ann)) {
4759
            ret trans_call(cx, f, none[ValueRef], args, ann);
4760 4761
        }

4762 4763 4764
        case (ast.expr_call_self(?ident, ?args, ?ann)) {
            // A weird hack to make self-calls work.
            ret trans_call(cx, e, none[ValueRef], args, ann);
4765 4766
        }

4767
        case (ast.expr_cast(?e, _, ?ann)) {
4768
            ret trans_cast(cx, e, ann);
4769
        }
G
Graydon Hoare 已提交
4770

4771
        case (ast.expr_vec(?args, _, ?ann)) {
G
Graydon Hoare 已提交
4772 4773 4774
            ret trans_vec(cx, args, ann);
        }

G
Graydon Hoare 已提交
4775 4776 4777
        case (ast.expr_tup(?args, ?ann)) {
            ret trans_tup(cx, args, ann);
        }
G
Graydon Hoare 已提交
4778

4779 4780
        case (ast.expr_rec(?args, ?base, ?ann)) {
            ret trans_rec(cx, args, base, ann);
4781 4782
        }

4783
        case (ast.expr_ext(_, _, _, ?expanded, _)) {
4784
            ret trans_expr(cx, expanded);
4785 4786
        }

4787
        case (ast.expr_fail(_)) {
4788 4789 4790
            ret trans_fail(cx, e.span, "explicit failure");
        }

4791
        case (ast.expr_log(?a, _)) {
4792 4793 4794
            ret trans_log(cx, a);
        }

4795
        case (ast.expr_check_expr(?a, _)) {
4796 4797 4798
            ret trans_check_expr(cx, a);
        }

4799
        case (ast.expr_break(?a)) {
4800 4801 4802
            ret trans_break(cx);
        }

4803
        case (ast.expr_cont(?a)) {
4804 4805 4806
            ret trans_cont(cx);
        }

4807
        case (ast.expr_ret(?e, _)) {
4808 4809 4810
            ret trans_ret(cx, e);
        }

4811
        case (ast.expr_put(?e, _)) {
4812 4813 4814
            ret trans_put(cx, e);
        }

4815
        case (ast.expr_be(?e, _)) {
4816 4817 4818
            ret trans_be(cx, e);
        }

B
Brian Anderson 已提交
4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834
        case (ast.expr_port(?ann)) {
            ret trans_port(cx, ann);
        }

        case (ast.expr_chan(?e, ?ann)) {
            ret trans_chan(cx, e, ann);
        }

        case (ast.expr_send(?lhs, ?rhs, ?ann)) {
            ret trans_send(cx, lhs, rhs, ann);
        }

        case (ast.expr_recv(?lhs, ?rhs, ?ann)) {
            ret trans_recv(cx, lhs, rhs, ann);
        }

4835 4836
        // lval cases fall through to trans_lval and then
        // possibly load the result (if it's non-structural).
G
Graydon Hoare 已提交
4837

4838
        case (_) {
4839
            auto t = ty.expr_ty(e);
4840
            auto sub = trans_lval(cx, e);
4841
            ret res(sub.res.bcx,
4842
                    load_scalar_or_boxed(sub.res.bcx, sub.res.val, t));
4843
        }
4844
    }
4845
    cx.fcx.ccx.sess.unimpl("expr variant in trans_expr");
4846 4847 4848
    fail;
}

4849
// We pass structural values around the compiler "by pointer" and
4850 4851
// non-structural values (scalars and boxes) "by value". This function selects
// whether to load a pointer or pass it.
4852

4853 4854 4855
fn load_scalar_or_boxed(@block_ctxt cx,
                        ValueRef v,
                        @ty.t t) -> ValueRef {
4856
    if (ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t)) {
4857
        ret cx.build.Load(v);
4858 4859
    } else {
        ret v;
4860 4861 4862
    }
}

4863
fn trans_log(@block_ctxt cx, @ast.expr e) -> result {
4864

4865
    auto sub = trans_expr(cx, e);
4866
    auto e_ty = ty.expr_ty(e);
4867

4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887
    if (ty.type_is_fp(e_ty)) {
        let TypeRef tr;
        let bool is32bit = false;
        alt (e_ty.struct) {
            case (ty.ty_machine(util.common.ty_f32)) {
                tr = T_f32();
                is32bit = true;
            }
            case (ty.ty_machine(util.common.ty_f64)) {
                tr = T_f64();
            }
            case (_) {
                tr = T_float();
            }
        }
        if (is32bit) {
            ret trans_upcall(sub.bcx,
                             "upcall_log_float",
                             vec(sub.val));
        } else {
4888
            auto tmp = alloca(sub.bcx, tr);
4889
            sub.bcx.build.Store(sub.val, tmp);
4890 4891 4892 4893
            auto v = vp2i(sub.bcx, tmp);
            ret trans_upcall(sub.bcx,
                             "upcall_log_double",
                             vec(v));
4894 4895 4896
        }
    }

4897
    alt (e_ty.struct) {
4898
        case (ty.ty_str) {
4899
            auto v = vp2i(sub.bcx, sub.val);
4900 4901 4902
            ret trans_upcall(sub.bcx,
                             "upcall_log_str",
                             vec(v));
4903 4904
        }
        case (_) {
4905 4906 4907
            ret trans_upcall(sub.bcx,
                             "upcall_log_int",
                             vec(sub.val));
4908 4909
        }
    }
4910
    fail;
4911 4912
}

4913
fn trans_check_expr(@block_ctxt cx, @ast.expr e) -> result {
4914 4915
    auto cond_res = trans_expr(cx, e);

4916
    auto expr_str = pretty.pprust.expr_to_str(e);
4917
    auto fail_cx = new_sub_block_ctxt(cx, "fail");
B
Brian Anderson 已提交
4918
    auto fail_res = trans_fail(fail_cx, e.span, expr_str);
4919

4920
    auto next_cx = new_sub_block_ctxt(cx, "next");
4921 4922 4923 4924 4925 4926
    cond_res.bcx.build.CondBr(cond_res.val,
                              next_cx.llbb,
                              fail_cx.llbb);
    ret res(next_cx, C_nil());
}

B
Brian Anderson 已提交
4927 4928 4929 4930 4931 4932
fn trans_fail(@block_ctxt cx, common.span sp, str fail_str) -> result {
    auto V_fail_str = p2i(C_cstr(cx.fcx.ccx, fail_str));
    auto V_filename = p2i(C_cstr(cx.fcx.ccx, sp.filename));
    auto V_line = sp.lo.line as int;
    auto args = vec(V_fail_str, V_filename, C_int(V_line));

4933 4934 4935
    auto sub = trans_upcall(cx, "upcall_fail", args);
    sub.bcx.build.Unreachable();
    ret res(sub.bcx, C_nil());
B
Brian Anderson 已提交
4936 4937
}

4938
fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
4939 4940 4941 4942 4943
    auto llcallee = C_nil();
    auto llenv = C_nil();

    alt (cx.fcx.lliterbody) {
        case (some[ValueRef](?lli)) {
4944
            auto slot = alloca(cx, val_ty(lli));
4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956
            cx.build.Store(lli, slot);

            llcallee = cx.build.GEP(slot, vec(C_int(0),
                                              C_int(abi.fn_field_code)));
            llcallee = cx.build.Load(llcallee);

            llenv = cx.build.GEP(slot, vec(C_int(0),
                                           C_int(abi.fn_field_box)));
            llenv = cx.build.Load(llenv);
        }
    }
    auto bcx = cx;
4957
    auto dummy_retslot = alloca(bcx, T_nil());
4958 4959 4960 4961 4962
    let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
    alt (e) {
        case (none[@ast.expr]) { }
        case (some[@ast.expr](?x)) {
            auto r = trans_expr(bcx, x);
4963 4964

            auto llarg = r.val;
4965
            bcx = r.bcx;
4966 4967 4968 4969 4970 4971
            if (ty.type_is_structural(ty.expr_ty(x))) {
                // Until here we've been treating structures by pointer; we
                // are now passing it as an arg, so need to load it.
                llarg = bcx.build.Load(llarg);
            }

4972
            llargs += vec(llarg);
4973 4974
        }
    }
4975

4976
    ret res(bcx, bcx.build.FastCall(llcallee, llargs));
4977 4978
}

4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019
fn trans_break_cont(@block_ctxt cx, bool to_end) -> result {
    auto bcx = cx;
    // Locate closest loop block, outputting cleanup as we go.
    auto cleanup_cx = cx;
    while (true) {
        bcx = trans_block_cleanups(bcx, cleanup_cx);
        alt (cleanup_cx.kind) {
            case (LOOP_SCOPE_BLOCK(?_cont, ?_break)) {
                if (to_end) {
                    bcx.build.Br(_break.llbb);
                } else {
                    alt (_cont) {
                        case (option.some[@block_ctxt](?_cont)) {
                            bcx.build.Br(_cont.llbb);
                        }
                        case (_) {
                            bcx.build.Br(cleanup_cx.llbb);
                        }
                    }
                }
                ret res(new_sub_block_ctxt(cx, "unreachable"), C_nil());
            }
            case (_) {
                alt (cleanup_cx.parent) {
                    case (parent_some(?cx)) { cleanup_cx = cx; }
                }
            }
        }
    }
    ret res(cx, C_nil()); // Never reached. Won't compile otherwise.
}

fn trans_break(@block_ctxt cx) -> result {
    ret trans_break_cont(cx, true);
}

fn trans_cont(@block_ctxt cx) -> result {
    ret trans_break_cont(cx, false);
}


5020 5021 5022 5023
fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
    auto bcx = cx;
    auto val = C_nil();

5024 5025
    alt (e) {
        case (some[@ast.expr](?x)) {
5026
            auto t = ty.expr_ty(x);
5027 5028 5029
            auto r = trans_expr(cx, x);
            bcx = r.bcx;
            val = r.val;
5030
            bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
5031
        }
5032
        case (_) { /* fall through */  }
5033 5034
    }

5035 5036
    // Run all cleanups and back out.
    let bool more_cleanups = true;
5037
    auto cleanup_cx = cx;
5038
    while (more_cleanups) {
5039
        bcx = trans_block_cleanups(bcx, cleanup_cx);
5040
        alt (cleanup_cx.parent) {
5041
            case (parent_some(?b)) {
5042
                cleanup_cx = b;
5043 5044 5045 5046 5047 5048 5049
            }
            case (parent_none) {
                more_cleanups = false;
            }
        }
    }

5050 5051
    bcx.build.RetVoid();
    ret res(bcx, C_nil());
5052 5053
}

5054
fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
5055
    // FIXME: This should be a typestate precondition
5056
    check (ast.is_call_expr(e));
5057 5058
    // FIXME: Turn this into a real tail call once
    // calling convention issues are settled
5059 5060 5061
    ret trans_ret(cx, some(e));
}

B
Brian Anderson 已提交
5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111
fn trans_port(@block_ctxt cx, ast.ann ann) -> result {

    auto t = node_ann_type(cx.fcx.ccx, ann);
    auto unit_ty;
    alt (t.struct) {
        case (ty.ty_port(?t)) {
            unit_ty = t;
        }
        case (_) {
            cx.fcx.ccx.sess.bug("non-port type in trans_port");
            fail;
        }
    }

    auto llunit_ty = type_of(cx.fcx.ccx, unit_ty);

    auto bcx = cx;
    auto unit_sz = size_of(bcx, unit_ty);
    bcx = unit_sz.bcx;
    auto sub = trans_upcall(bcx, "upcall_new_port", vec(unit_sz.val));
    bcx = sub.bcx;
    auto llty = type_of(cx.fcx.ccx, t);
    auto port_val = vi2p(bcx, sub.val, llty);
    auto dropref = clean(bind drop_ty(_, port_val, t));
    find_scope_cx(bcx).cleanups += vec(dropref);

    ret res(bcx, port_val);
}

fn trans_chan(@block_ctxt cx, @ast.expr e, ast.ann ann) -> result {

    auto bcx = cx;
    auto prt = trans_expr(bcx, e);
    bcx = prt.bcx;

    auto prt_val = vp2i(bcx, prt.val);
    auto sub = trans_upcall(bcx, "upcall_new_chan", vec(prt_val));
    bcx = sub.bcx;

    auto chan_ty = node_ann_type(bcx.fcx.ccx, ann);
    auto chan_llty = type_of(bcx.fcx.ccx, chan_ty);
    auto chan_val = vi2p(bcx, sub.val, chan_llty);
    auto dropref = clean(bind drop_ty(_, chan_val, chan_ty));
    find_scope_cx(bcx).cleanups += vec(dropref);

    ret res(bcx, chan_val);
}

fn trans_send(@block_ctxt cx, @ast.expr lhs, @ast.expr rhs,
              ast.ann ann) -> result {
5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130

    auto bcx = cx;
    auto chn = trans_expr(bcx, lhs);
    bcx = chn.bcx;
    auto data = trans_expr(bcx, rhs);
    bcx = data.bcx;

    auto chan_ty = node_ann_type(cx.fcx.ccx, ann);
    auto unit_ty;
    alt (chan_ty.struct) {
        case (ty.ty_chan(?t)) {
            unit_ty = t;
        }
        case (_) {
            bcx.fcx.ccx.sess.bug("non-chan type in trans_send");
            fail;
        }
    }

5131 5132 5133 5134
    auto data_alloc = alloc_ty(bcx, unit_ty);
    bcx = data_alloc.bcx;
    auto data_tmp = copy_ty(bcx, INIT, data_alloc.val, data.val, unit_ty);
    bcx = data_tmp.bcx;
5135

5136 5137
    find_scope_cx(bcx).cleanups +=
        vec(clean(bind drop_ty(_, data_alloc.val, unit_ty)));
5138

5139 5140 5141
    auto sub = trans_upcall(bcx, "upcall_send",
                            vec(vp2i(bcx, chn.val),
                                vp2i(bcx, data_alloc.val)));
5142 5143
    bcx = sub.bcx;

5144
    ret res(bcx, chn.val);
B
Brian Anderson 已提交
5145 5146 5147 5148
}

fn trans_recv(@block_ctxt cx, @ast.expr lhs, @ast.expr rhs,
              ast.ann ann) -> result {
5149 5150

    auto bcx = cx;
B
Brian Anderson 已提交
5151 5152 5153
    auto data = trans_lval(bcx, lhs);
    check (data.is_mem);
    bcx = data.res.bcx;
5154 5155 5156 5157
    auto unit_ty = node_ann_type(bcx.fcx.ccx, ann);

    // FIXME: calculate copy init-ness in typestate.
    ret recv_val(bcx, data.res.val, rhs, unit_ty, DROP_EXISTING);
5158
 }
5159 5160 5161 5162 5163

fn recv_val(@block_ctxt cx, ValueRef lhs, @ast.expr rhs,
            @ty.t unit_ty, copy_action action) -> result {

    auto bcx = cx;
5164 5165
    auto prt = trans_expr(bcx, rhs);
    bcx = prt.bcx;
B
Brian Anderson 已提交
5166

5167
    auto sub = trans_upcall(bcx, "upcall_recv",
5168
                            vec(vp2i(bcx, lhs),
5169 5170 5171
                                vp2i(bcx, prt.val)));
    bcx = sub.bcx;

5172 5173
    auto data_load = load_scalar_or_boxed(bcx, lhs, unit_ty);
    auto cp = copy_ty(bcx, action, lhs, data_load, unit_ty);
5174
    bcx = cp.bcx;
B
Brian Anderson 已提交
5175

5176
    // TODO: Any cleanup need to be done here?
5177

5178
    ret res(bcx, lhs);
B
Brian Anderson 已提交
5179 5180
}

5181 5182 5183 5184 5185 5186 5187 5188 5189
fn init_local(@block_ctxt cx, @ast.local local) -> result {

    // Make a note to drop this slot on the way out.
    check (cx.fcx.lllocals.contains_key(local.id));
    auto llptr = cx.fcx.lllocals.get(local.id);
    auto ty = node_ann_type(cx.fcx.ccx, local.ann);
    auto bcx = cx;

    find_scope_cx(cx).cleanups +=
5190
        vec(clean(bind drop_slot(_, llptr, ty)));
5191 5192

    alt (local.init) {
5193
        case (some[ast.initializer](?init)) {
5194 5195 5196 5197 5198 5199 5200 5201 5202
            alt (init.op) {
                case (ast.init_assign) {
                    auto sub = trans_expr(bcx, init.expr);
                    bcx = copy_ty(sub.bcx, INIT, llptr, sub.val, ty).bcx;
                }
                case (ast.init_recv) {
                    bcx = recv_val(bcx, llptr, init.expr, ty, INIT).bcx;
                }
            }
5203 5204 5205
        }
        case (_) {
            if (middle.ty.type_has_dynamic_size(ty)) {
5206 5207
                auto llsz = size_of(bcx, ty);
                bcx = call_bzero(llsz.bcx, llptr, llsz.val).bcx;
5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220

            } else {
                auto llty = type_of(bcx.fcx.ccx, ty);
                auto null = lib.llvm.llvm.LLVMConstNull(llty);
                bcx.build.Store(null, llptr);
            }
        }
    }
    ret res(bcx, llptr);
}

fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
    auto bcx = cx;
5221
    alt (s.node) {
5222
        case (ast.stmt_expr(?e,_)) {
5223
            bcx = trans_expr(cx, e).bcx;
5224
        }
5225

5226
        case (ast.stmt_decl(?d,_)) {
5227 5228
            alt (d.node) {
                case (ast.decl_local(?local)) {
5229
                    bcx = init_local(bcx, local).bcx;
5230
                }
G
Graydon Hoare 已提交
5231 5232 5233
                case (ast.decl_item(?i)) {
                    trans_item(cx.fcx.ccx, *i);
                }
5234 5235
            }
        }
5236
        case (_) {
5237
            cx.fcx.ccx.sess.unimpl("stmt variant");
5238 5239
        }
    }
5240
    ret res(bcx, C_nil());
5241 5242
}

5243
fn new_builder(BasicBlockRef llbb) -> builder {
5244 5245 5246 5247 5248
    let BuilderRef llbuild = llvm.LLVMCreateBuilder();
    llvm.LLVMPositionBuilderAtEnd(llbuild, llbb);
    ret builder(llbuild);
}

5249 5250
// You probably don't want to use this one. See the
// next three functions instead.
5251
fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
5252
                  block_kind kind,
5253
                  str name) -> @block_ctxt {
5254
    let vec[cleanup] cleanups = vec();
5255
    let BasicBlockRef llbb =
5256
        llvm.LLVMAppendBasicBlock(cx.llfn,
5257
                                  _str.buf(cx.ccx.names.next(name)));
5258

5259
    ret @rec(llbb=llbb,
5260
             build=new_builder(llbb),
5261
             parent=parent,
5262
             kind=kind,
5263
             mutable cleanups=cleanups,
5264 5265 5266
             fcx=cx);
}

5267 5268
// Use this when you're at the top block of a function or the like.
fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
5269 5270
    ret new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
                       "function top level");
5271
}
5272

5273 5274
// Use this when you're at a curly-brace or similar lexical scope.
fn new_scope_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
5275
    ret new_block_ctxt(bcx.fcx, parent_some(bcx), SCOPE_BLOCK, n);
5276 5277
}

5278 5279 5280 5281 5282 5283
fn new_loop_scope_block_ctxt(@block_ctxt bcx, option.t[@block_ctxt] _cont,
                             @block_ctxt _break, str n) -> @block_ctxt {
    ret new_block_ctxt(bcx.fcx, parent_some(bcx),
                       LOOP_SCOPE_BLOCK(_cont, _break), n);
}

5284
// Use this when you're making a general CFG BB within a scope.
5285
fn new_sub_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
5286
    ret new_block_ctxt(bcx.fcx, parent_some(bcx), NON_SCOPE_BLOCK, n);
5287
}
5288

5289

5290 5291
fn trans_block_cleanups(@block_ctxt cx,
                        @block_ctxt cleanup_cx) -> @block_ctxt {
5292
    auto bcx = cx;
5293

5294
    if (cleanup_cx.kind == NON_SCOPE_BLOCK) {
5295 5296 5297
        check (_vec.len[cleanup](cleanup_cx.cleanups) == 0u);
    }

5298 5299 5300 5301
    auto i = _vec.len[cleanup](cleanup_cx.cleanups);
    while (i > 0u) {
        i -= 1u;
        auto c = cleanup_cx.cleanups.(i);
5302
        alt (c) {
5303
            case (clean(?cfn)) {
5304
                bcx = cfn(bcx).bcx;
5305 5306 5307
            }
        }
    }
5308 5309 5310
    ret bcx;
}

5311 5312 5313 5314 5315
iter block_locals(&ast.block b) -> @ast.local {
    // FIXME: putting from inside an iter block doesn't work, so we can't
    // use the index here.
    for (@ast.stmt s in b.node.stmts) {
        alt (s.node) {
5316
            case (ast.stmt_decl(?d,_)) {
5317 5318 5319 5320
                alt (d.node) {
                    case (ast.decl_local(?local)) {
                        put local;
                    }
5321
                    case (_) { /* fall through */ }
5322 5323
                }
            }
5324
            case (_) { /* fall through */ }
5325 5326 5327 5328
        }
    }
}

5329 5330 5331 5332 5333 5334 5335 5336 5337 5338
fn llallocas_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
    let vec[cleanup] cleanups = vec();
    ret @rec(llbb=fcx.llallocas,
             build=new_builder(fcx.llallocas),
             parent=parent_none,
             kind=SCOPE_BLOCK,
             mutable cleanups=cleanups,
             fcx=fcx);
}

5339
fn alloc_ty(@block_ctxt cx, @ty.t t) -> result {
5340 5341
    auto val = C_int(0);
    if (ty.type_has_dynamic_size(t)) {
5342 5343 5344 5345 5346 5347 5348 5349 5350

        // NB: we have to run this particular 'size_of' in a
        // block_ctxt built on the llallocas block for the fn,
        // so that the size dominates the array_alloca that
        // comes next.

        auto n = size_of(llallocas_block_ctxt(cx.fcx), t);
        cx.fcx.llallocas = n.bcx.llbb;
        val = array_alloca(cx, T_i8(), n.val);
5351
    } else {
5352
        val = alloca(cx, type_of(cx.fcx.ccx, t));
5353
    }
5354 5355 5356 5357 5358 5359 5360
    // NB: since we've pushed all size calculations in this
    // function up to the alloca block, we actually return the
    // block passed into us unmodified; it doesn't really
    // have to be passed-and-returned here, but it fits
    // past caller conventions and may well make sense again,
    // so we leave it as-is.
    ret res(cx, val);
5361 5362
}

5363 5364 5365 5366 5367 5368 5369
fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
    auto t = node_ann_type(cx.fcx.ccx, local.ann);
    auto r = alloc_ty(cx, t);
    r.bcx.fcx.lllocals.insert(local.id, r.val);
    ret r;
}

5370
fn trans_block(@block_ctxt cx, &ast.block b) -> result {
5371 5372
    auto bcx = cx;

5373
    for each (@ast.local local in block_locals(b)) {
5374
        bcx = alloc_local(bcx, local).bcx;
5375
    }
5376
    auto r = res(bcx, C_nil());
5377

5378
    for (@ast.stmt s in b.node.stmts) {
5379 5380
        r = trans_stmt(bcx, *s);
        bcx = r.bcx;
5381 5382 5383 5384 5385
        // If we hit a terminator, control won't go any further so
        // we're in dead-code land. Stop here.
        if (is_terminated(bcx)) {
            ret r;
        }
5386
    }
5387

5388 5389 5390 5391
    alt (b.node.expr) {
        case (some[@ast.expr](?e)) {
            r = trans_expr(bcx, e);
            bcx = r.bcx;
5392

5393 5394
            if (is_terminated(bcx)) {
                ret r;
5395 5396 5397
            } else {
                auto r_ty = ty.expr_ty(e);

5398
                if (ty.type_is_boxed(r_ty)) {
5399
                    // The value resulting from the block gets copied into an
B
Brian Anderson 已提交
5400
                    // alloca created in an outer scope and its refcount
5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412
                    // bumped so that it can escape this block. This means
                    // that it will definitely live until the end of the
                    // enclosing scope, even if nobody uses it, which may be
                    // something of a surprise.

                    // It's possible we never hit this block, so the alloca
                    // must be initialized to null, then when the potential
                    // value finally goes out of scope the drop glue will see
                    // that it was never used and ignore it.

                    // NB: Here we're building and initalizing the alloca in
                    // the alloca context, not this block's context.
5413
                    auto res_alloca = alloc_ty(bcx, r_ty);
5414 5415 5416 5417 5418
                    auto alloca_ty = type_of(bcx.fcx.ccx, r_ty);
                    auto builder = new_builder(bcx.fcx.llallocas);
                    builder.Store(C_null(alloca_ty), res_alloca.val);

                    // Now we're working in our own block context again
5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434
                    auto res_copy = copy_ty(bcx, INIT,
                                            res_alloca.val, r.val, r_ty);
                    bcx = res_copy.bcx;

                    fn drop_hoisted_ty(@block_ctxt cx,
                                       ValueRef alloca_val,
                                       @ty.t t) -> result {
                        auto reg_val = load_scalar_or_boxed(cx,
                                                            alloca_val, t);
                        ret drop_ty(cx, reg_val, t);
                    }

                    auto cleanup = bind drop_hoisted_ty(_, res_alloca.val,
                                                        r_ty);
                    find_outer_scope_cx(bcx).cleanups += vec(clean(cleanup));
                }
5435 5436 5437 5438 5439 5440 5441
            }
        }
        case (none[@ast.expr]) {
            r = res(bcx, C_nil());
        }
    }

5442
    bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
5443
    ret res(bcx, r.val);
5444 5445
}

5446 5447 5448 5449 5450 5451 5452
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

5453
fn new_fn_ctxt(@crate_ctxt cx,
5454
               ValueRef llfndecl) -> @fn_ctxt {
5455

5456 5457 5458
    let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
    let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
    let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
5459 5460

    let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
5461 5462
    let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
    let hashmap[ast.def_id, ValueRef] lllocals = new_def_hash[ValueRef]();
5463
    let hashmap[ast.def_id, ValueRef] llupvars = new_def_hash[ValueRef]();
5464
    let hashmap[ast.def_id, ValueRef] lltydescs = new_def_hash[ValueRef]();
5465

5466 5467 5468
    let BasicBlockRef llallocas =
        llvm.LLVMAppendBasicBlock(llfndecl, _str.buf("allocas"));

5469
    ret @rec(llfn=llfndecl,
5470
             lltaskptr=lltaskptr,
5471 5472
             llenv=llenv,
             llretptr=llretptr,
5473
             mutable llallocas = llallocas,
5474
             mutable llself=none[self_vt],
5475
             mutable lliterbody=none[ValueRef],
5476
             llargs=llargs,
5477
             llobjfields=llobjfields,
5478
             lllocals=lllocals,
5479
             llupvars=llupvars,
5480
             lltydescs=lltydescs,
5481
             ccx=cx);
5482 5483
}

5484 5485 5486 5487 5488 5489 5490
// NB: must keep 4 fns in sync:
//
//  - type_of_fn_full
//  - create_llargs_for_fn_args.
//  - new_fn_ctxt
//  - trans_args

5491
fn create_llargs_for_fn_args(&@fn_ctxt cx,
5492
                             ast.proto proto,
5493
                             option.t[tup(TypeRef, @ty.t)] ty_self,
5494
                             @ty.t ret_ty,
5495
                             &vec[ast.arg] args,
5496
                             &vec[ast.ty_param] ty_params) {
5497 5498

    auto arg_n = 3u;
5499

5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510
    alt (ty_self) {
        case (some[tup(TypeRef, @ty.t)](?tt)) {
            cx.llself = some[self_vt](rec(v = cx.llenv, t = tt._1));
        }
        case (none[tup(TypeRef, @ty.t)]) {
            for (ast.ty_param tp in ty_params) {
                auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
                check (llarg as int != 0);
                cx.lltydescs.insert(tp.id, llarg);
                arg_n += 1u;
            }
5511
        }
5512 5513
    }

5514
    if (proto == ast.proto_iter) {
5515 5516 5517 5518 5519
        auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
        check (llarg as int != 0);
        cx.lliterbody = some[ValueRef](llarg);
        arg_n += 1u;
    }
5520

5521 5522 5523 5524 5525 5526 5527 5528
    for (ast.arg arg in args) {
        auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
        check (llarg as int != 0);
        cx.llargs.insert(arg.id, llarg);
        arg_n += 1u;
    }
}

5529 5530 5531 5532
// Recommended LLVM style, strange though this is, is to copy from args to
// allocas immediately upon entry; this permits us to GEP into structures we
// were passed and whatnot. Apparently mem2reg will mop up.

5533
fn copy_any_self_to_alloca(@fn_ctxt fcx,
5534
                           option.t[tup(TypeRef, @ty.t)] ty_self) {
5535

5536
    auto bcx = llallocas_block_ctxt(fcx);
5537

5538
    alt (fcx.llself) {
5539
        case (some[self_vt](?s_vt)) {
5540
            alt (ty_self) {
5541 5542 5543 5544
                case (some[tup(TypeRef, @ty.t)](?tt)) {
                    auto a = alloca(bcx, tt._0);
                    bcx.build.Store(s_vt.v, a);
                    fcx.llself = some[self_vt](rec(v = a, t = s_vt.t));
5545 5546 5547 5548 5549 5550
                }
            }
        }
        case (_) {
        }
    }
5551 5552 5553 5554 5555 5556 5557 5558 5559 5560
}


fn copy_args_to_allocas(@fn_ctxt fcx,
                        vec[ast.arg] args,
                        vec[ty.arg] arg_tys) {

    auto bcx = llallocas_block_ctxt(fcx);

    let uint arg_n = 0u;
5561

5562
    for (ast.arg aarg in args) {
5563
        if (aarg.mode != ast.alias) {
5564 5565 5566 5567
            auto arg_t = type_of_arg(fcx.ccx, arg_tys.(arg_n));
            auto a = alloca(bcx, arg_t);
            auto argval = fcx.llargs.get(aarg.id);
            bcx.build.Store(argval, a);
5568
            // Overwrite the llargs entry for this arg with its alloca.
5569
            fcx.llargs.insert(aarg.id, a);
5570 5571
        }

5572 5573
        arg_n += 1u;
    }
5574 5575

    fcx.llallocas = bcx.llbb;
5576 5577
}

5578 5579 5580 5581 5582
fn is_terminated(@block_ctxt cx) -> bool {
    auto inst = llvm.LLVMGetLastInstruction(cx.llbb);
    ret llvm.LLVMIsATerminatorInst(inst) as int != 0;
}

5583 5584
fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
    alt (ty.ann_to_type(ann).struct) {
5585
        case (ty.ty_fn(_, ?arg_tys, _)) {
5586 5587 5588 5589 5590 5591
            ret arg_tys;
        }
    }
    fail;
}

5592 5593
fn ret_ty_of_fn_ty(@ty.t t) -> @ty.t {
    alt (t.struct) {
5594
        case (ty.ty_fn(_, _, ?ret_ty)) {
5595 5596 5597 5598 5599 5600
            ret ret_ty;
        }
    }
    fail;
}

5601 5602 5603 5604 5605

fn ret_ty_of_fn(ast.ann ann) -> @ty.t {
    ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
}

5606
fn populate_fn_ctxt_from_llself(@fn_ctxt fcx, self_vt llself) {
5607
    auto bcx = llallocas_block_ctxt(fcx);
5608

5609
    let vec[@ty.t] field_tys = vec();
5610

5611 5612
    for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
        field_tys += vec(node_ann_type(bcx.fcx.ccx, f.ann));
5613 5614
    }

5615 5616
    // Synthesize a tuple type for the fields so that GEP_tup_like() can work
    // its magic.
5617
    auto fields_tup_ty = ty.plain_tup_ty(field_tys);
5618 5619 5620

    auto n_typarams = _vec.len[ast.ty_param](bcx.fcx.ccx.obj_typarams);
    let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.ccx.tn, n_typarams);
5621 5622

    auto box_cell =
5623
        bcx.build.GEP(llself.v,
5624 5625
                      vec(C_int(0),
                          C_int(abi.obj_field_box)));
5626

5627
    auto box_ptr = bcx.build.Load(box_cell);
5628

5629
    box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
5630

5631
    auto obj_typarams = bcx.build.GEP(box_ptr,
5632 5633 5634
                                     vec(C_int(0),
                                         C_int(abi.box_rc_field_body),
                                         C_int(abi.obj_body_elt_typarams)));
5635

5636 5637 5638
    // The object fields immediately follow the type parameters, so we skip
    // over them to get the pointer.
    auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
5639
        llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));
5640 5641 5642 5643 5644

    // If we can (i.e. the type is statically sized), then cast the resulting
    // fields pointer to the appropriate LLVM type. If not, just leave it as
    // i8 *.
    if (!ty.type_has_dynamic_size(fields_tup_ty)) {
5645
        auto llfields_ty = type_of(fcx.ccx, fields_tup_ty);
5646 5647 5648 5649 5650
        obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
    } else {
        obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
    }

5651 5652

    let int i = 0;
5653

5654
    for (ast.ty_param p in fcx.ccx.obj_typarams) {
5655 5656 5657 5658
        let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
                                               vec(C_int(0),
                                                   C_int(i)));
        lltyparam = bcx.build.Load(lltyparam);
5659
        fcx.lltydescs.insert(p.id, lltyparam);
5660 5661 5662 5663
        i += 1;
    }

    i = 0;
5664
    for (ast.obj_field f in fcx.ccx.obj_fields) {
5665
        auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
5666
        bcx = llallocas_block_ctxt(fcx);
5667
        auto llfield = rslt.val;
5668
        fcx.llobjfields.insert(f.id, llfield);
5669 5670
        i += 1;
    }
5671

5672
    fcx.llallocas = bcx.llbb;
5673 5674
}

5675
fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
5676
            option.t[tup(TypeRef, @ty.t)] ty_self,
5677
            &vec[ast.ty_param] ty_params, &ast.ann ann) {
5678

5679 5680
    auto llfndecl = cx.item_ids.get(fid);

5681
    auto fcx = new_fn_ctxt(cx, llfndecl);
5682
    create_llargs_for_fn_args(fcx, f.proto,
5683
                              ty_self, ret_ty_of_fn(ann),
5684
                              f.decl.inputs, ty_params);
5685

5686
    copy_any_self_to_alloca(fcx, ty_self);
5687 5688

    alt (fcx.llself) {
5689
        case (some[self_vt](?llself)) {
5690
            populate_fn_ctxt_from_llself(fcx, llself);
5691 5692 5693 5694
        }
        case (_) {
        }
    }
5695

5696 5697 5698 5699 5700
    copy_args_to_allocas(fcx, f.decl.inputs, arg_tys_of_fn(ann));

    auto bcx = new_top_block_ctxt(fcx);
    auto lltop = bcx.llbb;

5701 5702
    auto res = trans_block(bcx, f.body);
    if (!is_terminated(res.bcx)) {
5703 5704
        // FIXME: until LLVM has a unit type, we are moving around
        // C_nil values rather than their void type.
5705
        res.bcx.build.RetVoid();
5706
    }
5707 5708 5709

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);
5710 5711
}

5712 5713 5714
fn trans_vtbl(@crate_ctxt cx, 
              TypeRef llself_ty,
              @ty.t self_ty,
5715 5716
              &ast._obj ob,
              &vec[ast.ty_param] ty_params) -> ValueRef {
G
Graydon Hoare 已提交
5717
    let vec[ValueRef] methods = vec();
5718 5719 5720 5721 5722 5723 5724 5725 5726

    fn meth_lteq(&@ast.method a, &@ast.method b) -> bool {
        ret _str.lteq(a.node.ident, b.node.ident);
    }

    auto meths = std.sort.merge_sort[@ast.method](bind meth_lteq(_,_),
                                                  ob.methods);

    for (@ast.method m in meths) {
5727

5728 5729
        auto llfnty = T_nil();
        alt (node_ann_type(cx, m.node.ann).struct) {
5730 5731
            case (ty.ty_fn(?proto, ?inputs, ?output)) {
                llfnty = type_of_fn_full(cx, proto,
5732
                                         some[TypeRef](llself_ty),
5733 5734
                                         inputs, output,
                                         _vec.len[ast.ty_param](ty_params));
5735 5736 5737
            }
        }

5738 5739
        let @crate_ctxt mcx = extend_path(cx, m.node.ident);
        let str s = mangle_name_by_seq(mcx, "method");
5740
        let ValueRef llfn = decl_internal_fastcall_fn(cx.llmod, s, llfnty);
5741
        cx.item_ids.insert(m.node.id, llfn);
P
Patrick Walton 已提交
5742
        cx.item_symbols.insert(m.node.id, s);
5743

5744 5745
        trans_fn(mcx, m.node.meth, m.node.id, 
                 some[tup(TypeRef, @ty.t)](tup(llself_ty, self_ty)),
5746
                 ty_params, m.node.ann);
5747
        methods += vec(llfn);
G
Graydon Hoare 已提交
5748
    }
5749
    auto vtbl = C_struct(methods);
5750 5751 5752
    auto vtbl_name = mangle_name_by_seq(cx, "vtbl");
    auto gvar = llvm.LLVMAddGlobal(cx.llmod, val_ty(vtbl),
                                   _str.buf(vtbl_name));
5753 5754
    llvm.LLVMSetInitializer(gvar, vtbl);
    llvm.LLVMSetGlobalConstant(gvar, True);
5755
    llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMInternalLinkage
5756
                        as llvm.Linkage);
5757
    ret gvar;
G
Graydon Hoare 已提交
5758 5759
}

5760 5761
fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
             &vec[ast.ty_param] ty_params, &ast.ann ann) {
5762 5763 5764

    auto llctor_decl = cx.item_ids.get(oid);

5765
    // Translate obj ctor args to function arguments.
5766 5767 5768 5769 5770 5771 5772 5773
    let vec[ast.arg] fn_args = vec();
    for (ast.obj_field f in ob.fields) {
        fn_args += vec(rec(mode=ast.alias,
                           ty=f.ty,
                           ident=f.ident,
                           id=f.id));
    }

5774
    auto fcx = new_fn_ctxt(cx, llctor_decl);
5775
    create_llargs_for_fn_args(fcx, ast.proto_fn,
5776 5777
                              none[tup(TypeRef, @ty.t)], 
                              ret_ty_of_fn(ann),
5778
                              fn_args, ty_params);
5779

5780
    let vec[ty.arg] arg_tys = arg_tys_of_fn(ann);
5781 5782 5783 5784
    copy_args_to_allocas(fcx, fn_args, arg_tys);

    auto bcx = new_top_block_ctxt(fcx);
    auto lltop = bcx.llbb;
5785

5786 5787
    auto self_ty = ret_ty_of_fn(ann);
    auto llself_ty = type_of(cx, self_ty);
5788
    auto pair = bcx.fcx.llretptr;
5789
    auto vtbl = trans_vtbl(cx, llself_ty, self_ty, ob, ty_params);
5790 5791 5792
    auto pair_vtbl = bcx.build.GEP(pair,
                                   vec(C_int(0),
                                       C_int(abi.obj_field_vtbl)));
5793 5794 5795
    auto pair_box = bcx.build.GEP(pair,
                                  vec(C_int(0),
                                      C_int(abi.obj_field_box)));
5796
    bcx.build.Store(vtbl, pair_vtbl);
5797

5798
    let TypeRef llbox_ty = T_opaque_obj_ptr(cx.tn);
5799 5800 5801 5802

    if (_vec.len[ast.ty_param](ty_params) == 0u &&
        _vec.len[ty.arg](arg_tys) == 0u) {
        // Store null into pair, if no args or typarams.
5803 5804 5805 5806 5807
        bcx.build.Store(C_null(llbox_ty), pair_box);
    } else {
        // Malloc a box for the body and copy args in.
        let vec[@ty.t] obj_fields = vec();
        for (ty.arg a in arg_tys) {
5808
            _vec.push[@ty.t](obj_fields, a.ty);
5809
        }
5810 5811

        // Synthesize an obj body type.
5812 5813 5814
        auto tydesc_ty = plain_ty(ty.ty_type);
        let vec[@ty.t] tps = vec();
        for (ast.ty_param tp in ty_params) {
5815
            _vec.push[@ty.t](tps, tydesc_ty);
5816 5817
        }

5818 5819 5820 5821 5822
        let @ty.t typarams_ty = ty.plain_tup_ty(tps);
        let @ty.t fields_ty = ty.plain_tup_ty(obj_fields);
        let @ty.t body_ty = ty.plain_tup_ty(vec(tydesc_ty,
                                                typarams_ty,
                                                fields_ty));
5823
        let @ty.t boxed_body_ty = ty.plain_box_ty(body_ty, ast.imm);
5824

5825
        // Malloc a box for the body.
5826
        auto box = trans_malloc_boxed(bcx, body_ty);
5827 5828 5829 5830 5831 5832 5833 5834
        bcx = box.bcx;
        auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
                               vec(0, abi.box_rc_field_refcnt));
        bcx = rc.bcx;
        auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
                                 vec(0, abi.box_rc_field_body));
        bcx = body.bcx;
        bcx.build.Store(C_int(1), rc.val);
5835

5836 5837
        // Store body tydesc.
        auto body_tydesc =
5838 5839 5840
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_tydesc));
        bcx = body_tydesc.bcx;
5841

5842 5843 5844
        auto body_td = get_tydesc(bcx, body_ty);
        bcx = body_td.bcx;
        bcx.build.Store(body_td.val, body_tydesc.val);
5845

5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860
        // Copy typarams into captured typarams.
        auto body_typarams =
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_typarams));
        bcx = body_typarams.bcx;
        let int i = 0;
        for (ast.ty_param tp in ty_params) {
            auto typaram = bcx.fcx.lltydescs.get(tp.id);
            auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
                                        vec(0, i));
            bcx = capture.bcx;
            bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
            i += 1;
        }

5861 5862
        // Copy args into body fields.
        auto body_fields =
5863 5864 5865
            GEP_tup_like(bcx, body_ty, body.val,
                         vec(0, abi.obj_body_elt_fields));
        bcx = body_fields.bcx;
5866

5867
        i = 0;
5868
        for (ast.obj_field f in ob.fields) {
5869 5870
            auto arg = bcx.fcx.llargs.get(f.id);
            arg = load_scalar_or_boxed(bcx, arg, arg_tys.(i).ty);
5871 5872 5873 5874
            auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
                                      vec(0, i));
            bcx = field.bcx;
            bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
5875 5876 5877
            i += 1;
        }
        // Store box ptr in outer pair.
5878
        auto p = bcx.build.PointerCast(box.val, llbox_ty);
5879
        bcx.build.Store(p, pair_box);
5880
    }
5881
    bcx.build.RetVoid();
5882 5883 5884

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);
5885 5886
}

5887
fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
5888 5889
                     &ast.variant variant, int index,
                     &vec[ast.ty_param] ty_params) {
5890
    if (_vec.len[ast.variant_arg](variant.node.args) == 0u) {
5891 5892 5893
        ret;    // nullary constructors are just constants
    }

5894 5895 5896
    // Translate variant arguments to function arguments.
    let vec[ast.arg] fn_args = vec();
    auto i = 0u;
5897
    for (ast.variant_arg varg in variant.node.args) {
5898 5899 5900 5901 5902 5903
        fn_args += vec(rec(mode=ast.alias,
                           ty=varg.ty,
                           ident="arg" + _uint.to_str(i, 10u),
                           id=varg.id));
    }

5904 5905
    check (cx.item_ids.contains_key(variant.node.id));
    let ValueRef llfndecl = cx.item_ids.get(variant.node.id);
5906

5907
    auto fcx = new_fn_ctxt(cx, llfndecl);
5908

5909
    create_llargs_for_fn_args(fcx, ast.proto_fn,
5910 5911
                              none[tup(TypeRef, @ty.t)], 
                              ret_ty_of_fn(variant.node.ann),
5912
                              fn_args, ty_params);
5913

5914 5915 5916 5917 5918
    let vec[@ty.t] ty_param_substs = vec();
    for (ast.ty_param tp in ty_params) {
        ty_param_substs += vec(plain_ty(ty.ty_param(tp.id)));
    }

5919
    auto arg_tys = arg_tys_of_fn(variant.node.ann);
5920 5921 5922 5923
    copy_args_to_allocas(fcx, fn_args, arg_tys);

    auto bcx = new_top_block_ctxt(fcx);
    auto lltop = bcx.llbb;
5924

5925 5926 5927 5928 5929
    // Cast the tag to a type we can GEP into.
    auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
                                          T_opaque_tag_ptr(fcx.ccx.tn));

    auto lldiscrimptr = bcx.build.GEP(lltagptr,
5930
                                      vec(C_int(0), C_int(0)));
5931 5932
    bcx.build.Store(C_int(index), lldiscrimptr);

5933
    auto llblobptr = bcx.build.GEP(lltagptr,
5934
                                   vec(C_int(0), C_int(1)));
5935 5936

    i = 0u;
5937 5938
    for (ast.variant_arg va in variant.node.args) {
        auto rslt = GEP_tag(bcx, llblobptr, tag_id, variant.node.id,
5939
                            ty_param_substs, i as int);
5940 5941
        bcx = rslt.bcx;
        auto lldestptr = rslt.val;
5942

5943 5944 5945 5946
        // If this argument to this function is a tag, it'll have come in to
        // this function as an opaque blob due to the way that type_of()
        // works. So we have to cast to the destination's view of the type.
        auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
5947
            val_ty(lldestptr));
5948

5949 5950
        auto arg_ty = arg_tys.(i).ty;
        auto llargval;
5951
        if (ty.type_is_structural(arg_ty) ||
5952
                ty.type_has_dynamic_size(arg_ty)) {
5953 5954 5955 5956 5957 5958 5959 5960
            llargval = llargptr;
        } else {
            llargval = bcx.build.Load(llargptr);
        }

        rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
        bcx = rslt.bcx;

5961 5962 5963 5964
        i += 1u;
    }

    bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
5965
    bcx.build.RetVoid();
5966 5967 5968

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);
5969 5970
}

5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986
// FIXME: this should do some structural hash-consing to avoid
// duplicate constants. I think. Maybe LLVM has a magical mode
// that does so later on?

fn trans_const_expr(@crate_ctxt cx, @ast.expr e) -> ValueRef {
    alt (e.node) {
        case (ast.expr_lit(?lit, ?ann)) {
            ret trans_lit(cx, *lit, ann);
        }
    }
}

fn trans_const(@crate_ctxt cx, @ast.expr e,
               &ast.def_id cid, &ast.ann ann) {
    auto t = node_ann_type(cx, ann);
    auto v = trans_const_expr(cx, e);
5987 5988 5989 5990 5991 5992

    // The scalars come back as 1st class LLVM vals
    // which we have to stick into global constants.
    auto g = cx.consts.get(cid);
    llvm.LLVMSetInitializer(g, v);
    llvm.LLVMSetGlobalConstant(g, True);
5993 5994
}

5995
fn trans_item(@crate_ctxt cx, &ast.item item) {
5996
    alt (item.node) {
5997
        case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
5998
            auto sub_cx = extend_path(cx, name);
5999
            trans_fn(sub_cx, f, fid, none[tup(TypeRef, @ty.t)], tps, ann);
6000
        }
6001
        case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
6002 6003 6004
            auto sub_cx = @rec(obj_typarams=tps,
                               obj_fields=ob.fields with
                               *extend_path(cx, name));
6005
            trans_obj(sub_cx, ob, oid.ctor, tps, ann);
6006
        }
6007
        case (ast.item_mod(?name, ?m, _)) {
6008
            auto sub_cx = extend_path(cx, name);
6009
            trans_mod(sub_cx, m);
6010
        }
6011
        case (ast.item_tag(?name, ?variants, ?tps, ?tag_id, _)) {
6012
            auto sub_cx = extend_path(cx, name);
6013
            auto i = 0;
6014
            for (ast.variant variant in variants) {
6015
                trans_tag_variant(sub_cx, tag_id, variant, i, tps);
6016
                i += 1;
6017 6018
            }
        }
6019
        case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
6020
            auto sub_cx = extend_path(cx, name);
6021 6022
            trans_const(sub_cx, expr, cid, ann);
        }
6023
        case (_) { /* fall through */ }
6024 6025 6026
    }
}

6027
fn trans_mod(@crate_ctxt cx, &ast._mod m) {
6028 6029
    for (@ast.item item in m.items) {
        trans_item(cx, *item);
6030 6031 6032
    }
}

6033 6034 6035 6036 6037 6038 6039 6040
fn get_pair_fn_ty(TypeRef llpairty) -> TypeRef {
    // Bit of a kludge: pick the fn typeref out of the pair.
    let vec[TypeRef] pair_tys = vec(T_nil(), T_nil());
    llvm.LLVMGetStructElementTypes(llpairty,
                                   _vec.buf[TypeRef](pair_tys));
    ret llvm.LLVMGetElementType(pair_tys.(0));
}

6041
fn decl_fn_and_pair(@crate_ctxt cx,
6042
                    str flav,
6043
                    vec[ast.ty_param] ty_params,
6044 6045 6046
                    &ast.ann ann,
                    ast.def_id id) {

6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059
    auto llfty;
    auto llpairty;
    alt (node_ann_type(cx, ann).struct) {
        case (ty.ty_fn(?proto, ?inputs, ?output)) {
            llfty = type_of_fn(cx, proto, inputs, output,
                               _vec.len[ast.ty_param](ty_params));
            llpairty = T_fn_pair(cx.tn, llfty);
        }
        case (_) {
            cx.sess.bug("decl_fn_and_pair(): fn item doesn't have fn type?!");
            fail;
        }
    }
6060 6061

    // Declare the function itself.
6062
    let str s = mangle_name_by_seq(cx, flav);
6063
    let ValueRef llfn = decl_internal_fastcall_fn(cx.llmod, s, llfty);
6064 6065

    // Declare the global constant pair that points to it.
6066
    let str ps = mangle_name_by_type(cx, node_ann_type(cx, ann));
6067 6068 6069 6070 6071 6072

    register_fn_pair(cx, ps, llpairty, llfn, id);
}

fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
                    ast.def_id id) {
6073 6074 6075
    let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
                                           _str.buf(ps));
    auto pair = C_struct(vec(llfn,
6076
                             C_null(T_opaque_closure_ptr(cx.tn))));
6077 6078 6079

    llvm.LLVMSetInitializer(gvar, pair);
    llvm.LLVMSetGlobalConstant(gvar, True);
6080 6081 6082
    llvm.LLVMSetVisibility(gvar,
                           lib.llvm.LLVMProtectedVisibility
                           as llvm.Visibility);
6083 6084

    cx.item_ids.insert(id, llfn);
P
Patrick Walton 已提交
6085
    cx.item_symbols.insert(id, ps);
6086 6087 6088
    cx.fn_pairs.insert(id, gvar);
}

6089 6090 6091 6092 6093 6094 6095 6096 6097 6098
// Returns the number of type parameters that the given native function has.
fn native_fn_ty_param_count(@crate_ctxt cx, &ast.def_id id) -> uint {
    auto count;
    auto native_item = cx.native_items.get(id);
    alt (native_item.node) {
        case (ast.native_item_ty(_,_)) {
            cx.sess.bug("decl_native_fn_and_pair(): native fn isn't " +
                        "actually a fn?!");
            fail;
        }
6099
        case (ast.native_item_fn(_, _, _, ?tps, _, _)) {
6100 6101 6102 6103 6104 6105
            count = _vec.len[ast.ty_param](tps);
        }
    }
    ret count;
}

6106
fn native_fn_wrapper_type(@crate_ctxt cx, uint ty_param_count, @ty.t x)
6107
        -> TypeRef {
6108 6109
    alt (x.struct) {
        case (ty.ty_native_fn(?abi, ?args, ?out)) {
6110
            ret type_of_fn(cx, ast.proto_fn, args, out, ty_param_count);
6111 6112 6113 6114 6115
        }
    }
    fail;
}

6116 6117 6118 6119
fn decl_native_fn_and_pair(@crate_ctxt cx,
                           str name,
                           &ast.ann ann,
                           ast.def_id id) {
6120 6121
    auto num_ty_param = native_fn_ty_param_count(cx, id);

6122
    // Declare the wrapper.
6123 6124
    auto t = node_ann_type(cx, ann);
    auto wrapper_type = native_fn_wrapper_type(cx, num_ty_param, t);
6125
    let str s = mangle_name_by_seq(cx, "wrapper");
6126
    let ValueRef wrapper_fn = decl_internal_fastcall_fn(cx.llmod, s,
6127
                                                       wrapper_type);
6128

6129 6130
    // Declare the global constant pair that points to it.
    auto wrapper_pair_type = T_fn_pair(cx.tn, wrapper_type);
6131
    let str ps = mangle_name_by_type(cx, node_ann_type(cx, ann));
6132

6133 6134
    register_fn_pair(cx, ps, wrapper_pair_type, wrapper_fn, id);

6135 6136 6137
    // Build the wrapper.
    auto fcx = new_fn_ctxt(cx, wrapper_fn);
    auto bcx = new_top_block_ctxt(fcx);
6138
    auto lltop = bcx.llbb;
6139

6140 6141 6142 6143
    // Declare the function itself.
    auto item = cx.native_items.get(id);
    auto fn_type = node_ann_type(cx, ann);  // NB: has no type params

6144
    auto abi = ty.ty_fn_abi(fn_type);
6145 6146
    auto llfnty = type_of_native_fn(cx, abi, ty.ty_fn_args(fn_type),
                                    ty.ty_fn_ret(fn_type), num_ty_param);
6147

6148
    let vec[ValueRef] call_args = vec();
6149
    auto arg_n = 3u;
6150 6151
    auto pass_task;

6152
    auto lltaskptr = vp2i(bcx, fcx.lltaskptr);
6153 6154
    alt (abi) {
        case (ast.native_abi_rust) {
6155
            pass_task = true;
6156
            call_args += vec(lltaskptr);
6157 6158 6159
            for each (uint i in _uint.range(0u, num_ty_param)) {
                auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
                check (llarg as int != 0);
6160
                call_args += vec(vp2i(bcx, llarg));
6161 6162 6163 6164
                arg_n += 1u;
            }
        }
        case (ast.native_abi_cdecl) {
6165
            pass_task = false;
6166
        }
6167 6168 6169 6170
        case (ast.native_abi_llvm) {
            pass_task = false;
            // We handle this case below.
        }
6171
    }
6172

6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192
    fn push_arg(@block_ctxt cx,
                &mutable vec[ValueRef] args,
                ValueRef v,
                @ty.t t) {
        if (ty.type_is_integral(t)) {
            auto lldsttype = T_int();
            auto llsrctype = type_of(cx.fcx.ccx, t);
            if (llvm.LLVMGetIntTypeWidth(lldsttype) >
                llvm.LLVMGetIntTypeWidth(llsrctype)) {
                args += vec(cx.build.ZExtOrBitCast(v, T_int()));
            } else {
                args += vec(cx.build.TruncOrBitCast(v, T_int()));
            }
        } else if (ty.type_is_fp(t)) {
            args += vec(cx.build.FPToSI(v, T_int()));
        } else {
            args += vec(vp2i(cx, v));
        }
    }

6193 6194
    auto r;
    auto rptr;
6195
    auto args = ty.ty_fn_args(fn_type);
6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215
    if (abi == ast.native_abi_llvm) {
        let vec[ValueRef] call_args = vec();
        let vec[TypeRef] call_arg_tys = vec();
        auto i = 0u;
        while (i < _vec.len[ty.arg](args)) {
            auto call_arg = llvm.LLVMGetParam(fcx.llfn, i + 3u);
            call_args += vec(call_arg);
            call_arg_tys += vec(val_ty(call_arg));
            i += 1u;
        }
        auto llnativefnty = T_fn(call_arg_tys,
                                 type_of(cx, ty.ty_fn_ret(fn_type)));
        auto llnativefn = get_extern_fn(cx.externs, cx.llmod, name,
                                        lib.llvm.LLVMCCallConv, llnativefnty);
        r = bcx.build.Call(llnativefn, call_args);
        rptr = fcx.llretptr;
    } else {
        for (ty.arg arg in args) {
            auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
            check (llarg as int != 0);
6216
            push_arg(bcx, call_args, llarg, arg.ty);
6217 6218 6219
            arg_n += 1u;
        }

6220
        r = trans_native_call(bcx.build, cx.glues, lltaskptr, cx.externs,
6221 6222
                              cx.tn, cx.llmod, name, pass_task, call_args);
        rptr = bcx.build.BitCast(fcx.llretptr, T_ptr(T_i32()));
6223
    }
6224

6225
    bcx.build.Store(r, rptr);
6226
    bcx.build.RetVoid();
6227 6228 6229

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);
6230 6231
}

6232 6233
fn collect_native_item(&@crate_ctxt cx, @ast.native_item i) -> @crate_ctxt {
    alt (i.node) {
6234
        case (ast.native_item_fn(?name, _, _, _, ?fid, ?ann)) {
6235 6236
            cx.native_items.insert(fid, i);
            if (! cx.obj_methods.contains_key(fid)) {
6237
                decl_native_fn_and_pair(cx, name, ann, fid);
6238 6239
            }
        }
6240 6241 6242
        case (ast.native_item_ty(_, ?tid)) {
            cx.native_items.insert(tid, i);
        }
6243 6244 6245
    }
    ret cx;
}
6246

6247 6248 6249 6250 6251
fn item_name(@ast.item i) -> str {
    alt (i.node) {
        case (ast.item_mod(?name, _, _)) {
            ret name;
        }
6252
        case (ast.item_tag(?name, _, _, _, _)) {
6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271
            ret name;
        }
        case (ast.item_const(?name, _, _, _, _)) {
            ret name;
        }
        case (ast.item_fn(?name, _, _, _, _)) {
            ret name;
        }
        case (ast.item_native_mod(?name, _, _)) {
            ret name;
        }
        case (ast.item_ty(?name, _, _, _, _)) {
            ret name;
        }
        case (ast.item_obj(?name, _, _, _, _)) {
            ret name;
        }
    }
}
6272

6273
fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
6274
    alt (i.node) {
6275 6276 6277 6278
        case (ast.item_const(?name, _, _, ?cid, ?ann)) {
            auto typ = node_ann_type(cx, ann);
            auto g = llvm.LLVMAddGlobal(cx.llmod, type_of(cx, typ),
                                        _str.buf(cx.names.next(name)));
6279
            llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
6280
                                as llvm.Linkage);
6281
            cx.items.insert(cid, i);
6282
            cx.consts.insert(cid, g);
6283 6284
        }

6285 6286 6287 6288
        case (ast.item_fn(_, _, _, ?did, _)) {
            // handled below
        }

6289 6290 6291 6292
        case (ast.item_mod(?name, ?m, ?mid)) {
            cx.items.insert(mid, i);
        }

6293 6294 6295 6296 6297 6298 6299 6300
        case (ast.item_native_mod(_, _, _)) {
            // empty
        }

        case (ast.item_ty(_, _, _, ?did, _)) {
            cx.items.insert(did, i);
        }

6301
        case (ast.item_tag(?name, ?variants, ?tps, ?tag_id, _)) {
6302 6303
            cx.items.insert(tag_id, i);
        }
6304 6305 6306 6307

        case (ast.item_obj(_, _, _, ?did, _)) {
            // handled below
        }
6308
    }
6309
    ret extend_path(cx, item_name(i));
6310 6311 6312
}

fn collect_item_pass2(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
6313
    alt (i.node) {
6314
        case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
6315
            cx.items.insert(fid, i);
6316
            if (! cx.obj_methods.contains_key(fid)) {
6317 6318
                decl_fn_and_pair(extend_path(cx, name), "fn",
                                 tps, ann, fid);
6319
            }
6320 6321
        }

6322
        case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
6323
            cx.items.insert(oid.ctor, i);
6324
            decl_fn_and_pair(extend_path(cx, name), "obj_ctor",
6325
                             tps, ann, oid.ctor);
6326 6327 6328
            for (@ast.method m in ob.methods) {
                cx.obj_methods.insert(m.node.id, ());
            }
6329 6330
        }

6331
        case (_) { /* fall through */ }
6332
    }
6333
    ret extend_path(cx, item_name(i));
6334 6335 6336
}


6337
fn collect_items(@crate_ctxt cx, @ast.crate crate) {
6338

6339 6340
    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();
6341

6342 6343
    // FIXME: It might be better to use a worklist for this. An item
    // would be added to it if it depends on a not yet seen tag for example.
6344 6345 6346 6347 6348 6349 6350 6351 6352 6353

    auto fld1 =
        @rec( update_env_for_item = bind collect_item(_,_),
              update_env_for_native_item = bind collect_native_item(_,_)
              with *fld );

    fold.fold_crate[@crate_ctxt](cx, fld1, crate);

    auto fld2 = @rec( update_env_for_item = bind collect_item_pass2(_,_)
                      with *fld );
6354

6355
    fold.fold_crate[@crate_ctxt](cx, fld2, crate);
6356 6357
}

6358 6359 6360 6361
fn collect_tag_ctor(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {

    alt (i.node) {

6362
        case (ast.item_tag(_, ?variants, ?tps, _, _)) {
6363
            for (ast.variant variant in variants) {
6364 6365 6366 6367
                if (_vec.len[ast.variant_arg](variant.node.args) != 0u) {
                    decl_fn_and_pair(extend_path(cx, variant.node.name),
                                     "tag", tps, variant.node.ann,
                                     variant.node.id);
6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387
                }
            }
        }

        case (_) { /* fall through */ }
    }
    ret cx;
}

fn collect_tag_ctors(@crate_ctxt cx, @ast.crate crate) {

    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();

    fld = @rec( update_env_for_item = bind collect_tag_ctor(_,_)
                with *fld );

    fold.fold_crate[@crate_ctxt](cx, fld, crate);
}

6388 6389 6390 6391
// The constant translation pass.

fn trans_constant(&@crate_ctxt cx, @ast.item it) -> @crate_ctxt {
    alt (it.node) {
6392
        case (ast.item_tag(?ident, ?variants, _, ?tag_id, _)) {
6393
            auto i = 0u;
6394 6395 6396
            auto n_variants = _vec.len[ast.variant](variants);
            while (i < n_variants) {
                auto variant = variants.(i);
6397 6398 6399

                auto discrim_val = C_int(i as int);

6400 6401 6402
                auto s = mangle_name_by_seq(cx,
                                            #fmt("_rust_tag_discrim_%s_%u",
                                                 ident, i));
6403
                auto discrim_gvar = llvm.LLVMAddGlobal(cx.llmod, T_int(),
P
Patrick Walton 已提交
6404
                                                       _str.buf(s));
6405 6406 6407 6408

                llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
                llvm.LLVMSetGlobalConstant(discrim_gvar, True);

6409 6410
                cx.discrims.insert(variant.node.id, discrim_gvar);
                cx.discrim_symbols.insert(variant.node.id, s);
6411 6412 6413 6414

                i += 1u;
            }
        }
6415 6416 6417 6418 6419 6420

        case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
            // FIXME: The whole expr-translation system needs cloning to deal
            // with consts.
            auto v = C_int(1);
            cx.item_ids.insert(cid, v);
6421 6422
            auto s = mangle_name_by_type(extend_path(cx, name),
                                         node_ann_type(cx, ann));
P
Patrick Walton 已提交
6423
            cx.item_symbols.insert(cid, s);
6424 6425
        }

6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442
        case (_) {
            // empty
        }
    }

    ret cx;
}

fn trans_constants(@crate_ctxt cx, @ast.crate crate) {
    let fold.ast_fold[@crate_ctxt] fld =
        fold.new_identity_fold[@crate_ctxt]();

    fld = @rec(update_env_for_item = bind trans_constant(_,_) with *fld);

    fold.fold_crate[@crate_ctxt](cx, fld, crate);
}

6443 6444 6445 6446 6447 6448 6449 6450 6451 6452

fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
    ret cx.build.PtrToInt(v, T_int());
}


fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
    ret cx.build.IntToPtr(v, t);
}

6453 6454 6455 6456
fn p2i(ValueRef v) -> ValueRef {
    ret llvm.LLVMConstPtrToInt(v, T_int());
}

6457 6458 6459 6460
fn i2p(ValueRef v, TypeRef t) -> ValueRef {
    ret llvm.LLVMConstIntToPtr(v, t);
}

6461
fn trans_exit_task_glue(@glue_fns glues,
6462
                        &hashmap[str, ValueRef] externs,
6463
                        type_names tn, ModuleRef llmod) {
6464 6465 6466
    let vec[TypeRef] T_args = vec();
    let vec[ValueRef] V_args = vec();

6467
    auto llfn = glues.exit_task_glue;
6468
    let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 4u);
6469

6470 6471
    auto entrybb = llvm.LLVMAppendBasicBlock(llfn, _str.buf("entry"));
    auto build = new_builder(entrybb);
6472
    auto tptr = build.PtrToInt(lltaskptr, T_int());
6473
    auto V_args2 = vec(tptr) + V_args;
6474 6475
    trans_native_call(build, glues, lltaskptr,
                      externs, tn, llmod, "upcall_exit", true, V_args2);
6476
    build.RetVoid();
6477 6478
}

6479
fn create_typedefs(@crate_ctxt cx) {
6480 6481 6482
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("crate"), T_crate(cx.tn));
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("task"), T_task(cx.tn));
    llvm.LLVMAddTypeName(cx.llmod, _str.buf("tydesc"), T_tydesc(cx.tn));
6483 6484
}

6485
fn create_crate_constant(ValueRef crate_ptr, @glue_fns glues) {
6486

6487
    let ValueRef crate_addr = p2i(crate_ptr);
6488 6489

    let ValueRef activate_glue_off =
6490
        llvm.LLVMConstSub(p2i(glues.activate_glue), crate_addr);
6491 6492

    let ValueRef yield_glue_off =
6493
        llvm.LLVMConstSub(p2i(glues.yield_glue), crate_addr);
6494

6495
    let ValueRef exit_task_glue_off =
6496
        llvm.LLVMConstSub(p2i(glues.exit_task_glue), crate_addr);
6497 6498 6499

    let ValueRef crate_val =
        C_struct(vec(C_null(T_int()),     // ptrdiff_t image_base_off
6500
                     p2i(crate_ptr),   // uintptr_t self_addr
6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511
                     C_null(T_int()),     // ptrdiff_t debug_abbrev_off
                     C_null(T_int()),     // size_t debug_abbrev_sz
                     C_null(T_int()),     // ptrdiff_t debug_info_off
                     C_null(T_int()),     // size_t debug_info_sz
                     activate_glue_off,   // size_t activate_glue_off
                     yield_glue_off,      // size_t yield_glue_off
                     C_null(T_int()),     // size_t unwind_glue_off
                     C_null(T_int()),     // size_t gc_glue_off
                     exit_task_glue_off,  // size_t main_exit_task_glue_off
                     C_null(T_int()),     // int n_rust_syms
                     C_null(T_int()),     // int n_c_syms
6512 6513
                     C_null(T_int()),     // int n_libs
                     C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
6514 6515
                     ));

6516
    llvm.LLVMSetInitializer(crate_ptr, crate_val);
6517 6518
}

6519 6520 6521 6522
fn find_main_fn(@crate_ctxt cx) -> ValueRef {
    auto e = sep() + "main";
    let ValueRef v = C_nil();
    let uint n = 0u;
6523 6524
    for each (@tup(ast.def_id, str) i in cx.item_symbols.items()) {
        if (_str.ends_with(i._1, e)) {
6525
            n += 1u;
6526
            v = cx.item_ids.get(i._0);
6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542
        }
    }
    alt (n) {
        case (0u) {
            cx.sess.err("main fn not found");
        }
        case (1u) {
            ret v;
        }
        case (_) {
            cx.sess.err("multiple main fns found");
        }
    }
    fail;
}

6543
fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
6544 6545 6546
    auto T_main_args = vec(T_int(), T_int());
    auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int());

6547 6548 6549 6550 6551 6552 6553
    auto main_name;
    if (_str.eq(std.os.target_os(), "win32")) {
        main_name = "WinMain@16";
    } else {
        main_name = "main";
    }

6554
    auto llmain =
6555
        decl_cdecl_fn(cx.llmod, main_name, T_fn(T_main_args, T_int()));
6556

6557 6558
    auto llrust_start = decl_cdecl_fn(cx.llmod, "rust_start",
                                      T_fn(T_rust_start_args, T_int()));
6559 6560 6561

    auto llargc = llvm.LLVMGetParam(llmain, 0u);
    auto llargv = llvm.LLVMGetParam(llmain, 1u);
6562
    auto llrust_main = find_main_fn(cx);
6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573

    //
    // Emit the moral equivalent of:
    //
    // main(int argc, char **argv) {
    //     rust_start(&_rust.main, &crate, argc, argv);
    // }
    //

    let BasicBlockRef llbb =
        llvm.LLVMAppendBasicBlock(llmain, _str.buf(""));
6574
    auto b = new_builder(llbb);
6575 6576 6577 6578 6579 6580

    auto start_args = vec(p2i(llrust_main), p2i(llcrate), llargc, llargv);

    b.Ret(b.Call(llrust_start, start_args));
}

6581 6582
fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {

6583
    let vec[TypeRef] T_trap_args = vec();
6584 6585 6586 6587 6588 6589
    auto trap = decl_cdecl_fn(llmod, "llvm.trap",
                              T_fn(T_trap_args, T_void()));

    auto intrinsics = new_str_hash[ValueRef]();
    intrinsics.insert("llvm.trap", trap);
    ret intrinsics;
6590 6591
}

6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609

fn trace_str(@block_ctxt cx, str s) {
    trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.ccx, s))));
}

fn trace_word(@block_ctxt cx, ValueRef v) {
    trans_upcall(cx, "upcall_trace_word", vec(v));
}

fn trace_ptr(@block_ctxt cx, ValueRef v) {
    trace_word(cx, cx.build.PtrToInt(v, T_int()));
}

fn trap(@block_ctxt bcx) {
    let vec[ValueRef] v = vec();
    bcx.build.Call(bcx.fcx.ccx.intrinsics.get("llvm.trap"), v);
}

R
Rafael Ávila de Espíndola 已提交
6610
fn run_passes(ModuleRef llmod, bool opt) {
6611 6612 6613
    auto pm = mk_pass_manager();

    // TODO: run the linter here also, once there are llvm-c bindings for it.
R
Rafael Ávila de Espíndola 已提交
6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674

    // FIXME: This is mostly a copy of the bits of opt's -O2 that are
    // available in the C api.
    // FIXME2: We might want to add optmization levels like -O1, -O2, -Os, etc
    // FIXME3: Should we expose and use the pass lists used by the opt tool?
    if (opt) {
        auto fpm = mk_pass_manager();

        // createStandardFunctionPasses
        llvm.LLVMAddCFGSimplificationPass(fpm.llpm);
        llvm.LLVMAddScalarReplAggregatesPass(fpm.llpm);
        //llvm.LLVMAddEarlyCSEPass(fpm.llpm);

        llvm.LLVMRunPassManager(fpm.llpm, llmod);

        // createStandardModulePasses
        llvm.LLVMAddGlobalOptimizerPass(pm.llpm);
        llvm.LLVMAddIPSCCPPass(pm.llpm);
        llvm.LLVMAddDeadArgEliminationPass(pm.llpm);
        llvm.LLVMAddInstructionCombiningPass(pm.llpm);
        llvm.LLVMAddCFGSimplificationPass(pm.llpm);
        llvm.LLVMAddPruneEHPass(pm.llpm);
        llvm.LLVMAddFunctionInliningPass(pm.llpm);

        // FIXME: crashes!
        // llvm.LLVMAddFunctionAttrsPass(pm.llpm);

        // llvm.LLVMAddScalarReplAggregatesPassSSA(pm.llpm);
        // llvm.LLVMAddEarlyCSEPass(pm.llpm);
        llvm.LLVMAddSimplifyLibCallsPass(pm.llpm);
        llvm.LLVMAddJumpThreadingPass(pm.llpm);
        // llvm.LLVMAddCorrelatedValuePropagationPass(pm.llpm);
        llvm.LLVMAddCFGSimplificationPass(pm.llpm);
        llvm.LLVMAddInstructionCombiningPass(pm.llpm);
        llvm.LLVMAddTailCallEliminationPass(pm.llpm);
        llvm.LLVMAddCFGSimplificationPass(pm.llpm);
        llvm.LLVMAddReassociatePass(pm.llpm);
        llvm.LLVMAddLoopRotatePass(pm.llpm);
        llvm.LLVMAddLICMPass(pm.llpm);
        llvm.LLVMAddLoopUnswitchPass(pm.llpm);
        llvm.LLVMAddInstructionCombiningPass(pm.llpm);
        llvm.LLVMAddIndVarSimplifyPass(pm.llpm);
        // llvm.LLVMAddLoopIdiomPass(pm.llpm);
        llvm.LLVMAddLoopDeletionPass(pm.llpm);
        llvm.LLVMAddLoopUnrollPass(pm.llpm);
        llvm.LLVMAddInstructionCombiningPass(pm.llpm);
        llvm.LLVMAddGVNPass(pm.llpm);
        llvm.LLVMAddMemCpyOptPass(pm.llpm);
        llvm.LLVMAddSCCPPass(pm.llpm);
        llvm.LLVMAddInstructionCombiningPass(pm.llpm);
        llvm.LLVMAddJumpThreadingPass(pm.llpm);
        // llvm.LLVMAddCorrelatedValuePropagationPass(pm.llpm);
        llvm.LLVMAddDeadStoreEliminationPass(pm.llpm);
        llvm.LLVMAddAggressiveDCEPass(pm.llpm);
        llvm.LLVMAddCFGSimplificationPass(pm.llpm);
        llvm.LLVMAddStripDeadPrototypesPass(pm.llpm);
        llvm.LLVMAddDeadTypeEliminationPass(pm.llpm);
        llvm.LLVMAddConstantMergePass(pm.llpm);
    }
    llvm.LLVMAddVerifierPass(pm.llpm);
    llvm.LLVMRunPassManager(pm.llpm, llmod);
6675 6676
}

6677
fn decl_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
6678
    auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
6679 6680 6681 6682
    ret decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
}

fn make_no_op_type_glue(ValueRef fun) {
6683 6684
    auto bb_name = _str.buf("_rust_no_op_type_glue_bb");
    auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
6685
    new_builder(llbb).RetVoid();
6686 6687
}

6688
fn decl_memcpy_glue(ModuleRef llmod) -> ValueRef {
6689 6690 6691
    auto p8 = T_ptr(T_i8());

    auto ty = T_fn(vec(p8, p8, T_int()), T_void());
6692 6693
    ret decl_fastcall_fn(llmod, abi.memcpy_glue_name(), ty);
}
6694

6695 6696 6697 6698
fn make_memcpy_glue(ValueRef fun) {
    // We're not using the LLVM memcpy intrinsic. It appears to call through
    // to the platform memcpy in some cases, which is not terribly safe to run
    // on a rust stack.
6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731
    auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
    auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
    auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
    auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));

    auto dst = llvm.LLVMGetParam(fun, 0u);
    auto src = llvm.LLVMGetParam(fun, 1u);
    auto count = llvm.LLVMGetParam(fun, 2u);

    // Init block.
    auto ib = new_builder(initbb);
    auto ip = ib.Alloca(T_int());
    ib.Store(C_int(0), ip);
    ib.Br(hdrbb);

    // Loop-header block
    auto hb = new_builder(hdrbb);
    auto i = hb.Load(ip);
    hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);

    // Loop-body block
    auto lb = new_builder(loopbb);
    i = lb.Load(ip);
    lb.Store(lb.Load(lb.GEP(src, vec(i))),
             lb.GEP(dst, vec(i)));
    lb.Store(lb.Add(i, C_int(1)), ip);
    lb.Br(hdrbb);

    // End block
    auto eb = new_builder(endbb);
    eb.RetVoid();
}

6732
fn decl_bzero_glue(ModuleRef llmod) -> ValueRef {
6733 6734 6735
    auto p8 = T_ptr(T_i8());

    auto ty = T_fn(vec(p8, T_int()), T_void());
6736 6737
    ret decl_fastcall_fn(llmod, abi.bzero_glue_name(), ty);
}
6738

6739
fn make_bzero_glue(ValueRef fun) -> ValueRef {
6740
    // We're not using the LLVM memset intrinsic. Same as with memcpy.
6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772
    auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
    auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
    auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
    auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));

    auto dst = llvm.LLVMGetParam(fun, 0u);
    auto count = llvm.LLVMGetParam(fun, 1u);

    // Init block.
    auto ib = new_builder(initbb);
    auto ip = ib.Alloca(T_int());
    ib.Store(C_int(0), ip);
    ib.Br(hdrbb);

    // Loop-header block
    auto hb = new_builder(hdrbb);
    auto i = hb.Load(ip);
    hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);

    // Loop-body block
    auto lb = new_builder(loopbb);
    i = lb.Load(ip);
    lb.Store(C_integral(0, T_i8()), lb.GEP(dst, vec(i)));
    lb.Store(lb.Add(i, C_int(1)), ip);
    lb.Br(hdrbb);

    // End block
    auto eb = new_builder(endbb);
    eb.RetVoid();
    ret fun;
}

6773
fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
6774
    /*
6775
     * Args to vec_append_glue:
6776 6777 6778 6779 6780 6781 6782 6783 6784 6785
     *
     *   0. (Implicit) task ptr
     *
     *   1. Pointer to the tydesc of the vec, so that we can tell if it's gc
     *      mem, and have a tydesc to pass to malloc if we're allocating anew.
     *
     *   2. Pointer to the tydesc of the vec's stored element type, so that
     *      elements can be copied to a newly alloc'ed vec if one must be
     *      created.
     *
6786
     *   3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
6787 6788
     *
     *   4. Src vec (i.e. ptr to rust_vec).
6789
     *
6790
     *   5. Flag indicating whether to skip trailing null on dst.
6791 6792 6793 6794 6795 6796
     *
     */

    auto ty = T_fn(vec(T_taskptr(tn),
                       T_ptr(T_tydesc(tn)),
                       T_ptr(T_tydesc(tn)),
6797 6798 6799
                       T_ptr(T_opaque_vec_ptr()),
                       T_opaque_vec_ptr(), T_bool()),
                   T_void());
6800

6801
    auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
6802 6803 6804
    ret llfn;
}

6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843

fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
    ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
                                            C_int(abi.vec_elt_fill))));
}

fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
    ret bcx.build.Store(fill,
                        bcx.build.GEP(v,
                                      vec(C_int(0),
                                          C_int(abi.vec_elt_fill))));
}

fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
                     ValueRef skipnull) -> ValueRef {
    auto f = bcx.build.Load(bcx.build.GEP(v,
                                          vec(C_int(0),
                                              C_int(abi.vec_elt_fill))));
    ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
}

fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
    auto p = bcx.build.GEP(v, vec(C_int(0),
                                  C_int(abi.vec_elt_data)));
    ret bcx.build.PointerCast(p, T_ptr(T_i8()));
}


fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
    auto len = vec_fill(bcx, v);
    ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}

fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
                   ValueRef skipnull) -> ValueRef {
    auto len = vec_fill_adjusted(bcx, v, skipnull);
    ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}

6844
fn trans_vec_append_glue(@crate_ctxt cx) {
6845

6846
    auto llfn = cx.glues.vec_append_glue;
6847 6848 6849 6850

    let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
    let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
    let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
6851 6852 6853
    let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
    let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
    let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
6854

6855 6856 6857
    let BasicBlockRef llallocas =
        llvm.LLVMAppendBasicBlock(llfn, _str.buf("allocas"));

6858 6859 6860 6861
    auto fcx = @rec(llfn=llfn,
                    lltaskptr=lltaskptr,
                    llenv=C_null(T_ptr(T_nil())),
                    llretptr=C_null(T_ptr(T_nil())),
6862
                    mutable llallocas = llallocas,
6863
                    mutable llself=none[self_vt],
6864 6865 6866 6867
                    mutable lliterbody=none[ValueRef],
                    llargs=new_def_hash[ValueRef](),
                    llobjfields=new_def_hash[ValueRef](),
                    lllocals=new_def_hash[ValueRef](),
6868
                    llupvars=new_def_hash[ValueRef](),
6869 6870 6871 6872
                    lltydescs=new_def_hash[ValueRef](),
                    ccx=cx);

    auto bcx = new_top_block_ctxt(fcx);
6873
    auto lltop = bcx.llbb;
6874

6875 6876
    auto lldst_vec = bcx.build.Load(lldst_vec_ptr);

6877 6878
    // First the dst vec needs to grow to accommodate the src vec.
    // To do this we have to figure out how many bytes to add.
6879

6880
    auto llcopy_dst_ptr = alloca(bcx, T_int());
6881 6882
    auto llnew_vec_res =
        trans_upcall(bcx, "upcall_vec_grow",
6883 6884 6885
                     vec(vp2i(bcx, lldst_vec),
                         vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
                         vp2i(bcx, llcopy_dst_ptr),
6886 6887 6888
                         vp2i(bcx, llvec_tydesc)));

    bcx = llnew_vec_res.bcx;
6889 6890
    auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
                          T_opaque_vec_ptr());
6891

6892
    put_vec_fill(bcx, llnew_vec, C_int(0));
6893

6894 6895
    auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
    auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
6896

6897
    auto pp0 = alloca(bcx, T_ptr(T_i8()));
6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920
    bcx.build.Store(vec_p0(bcx, llnew_vec), pp0);

    bcx.build.CondBr(bcx.build.TruncOrBitCast
                     (bcx.build.Load(llcopy_dst_ptr),
                      T_i1()),
                     copy_dst_cx.llbb,
                     copy_src_cx.llbb);


    fn copy_elts(@block_ctxt cx,
                 ValueRef elt_tydesc,
                 ValueRef dst,
                 ValueRef src,
                 ValueRef n_bytes) -> result {

        auto src_lim = cx.build.GEP(src, vec(n_bytes));

        auto elt_llsz =
            cx.build.Load(cx.build.GEP(elt_tydesc,
                                       vec(C_int(0),
                                           C_int(abi.tydesc_field_size))));

        fn take_one(ValueRef elt_tydesc,
6921 6922 6923
                    @block_ctxt cx,
                    ValueRef dst, ValueRef src) -> result {
            call_tydesc_glue_full(cx, src,
6924 6925
                                  elt_tydesc,
                                  abi.tydesc_field_take_glue_off);
6926
            ret res(cx, src);
6927 6928
        }

6929
        auto bcx = iter_sequence_raw(cx, dst, src, src_lim,
6930
                                     elt_llsz, bind take_one(elt_tydesc,
6931
                                                             _, _, _)).bcx;
6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944

        ret call_memcpy(bcx, dst, src, n_bytes);
    }

    // Copy any dst elements in, omitting null if doing str.
    auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
    copy_dst_cx = copy_elts(copy_dst_cx,
                            llelt_tydesc,
                            copy_dst_cx.build.Load(pp0),
                            vec_p0(copy_dst_cx, lldst_vec),
                            n_bytes).bcx;

    put_vec_fill(copy_dst_cx, llnew_vec, n_bytes);
6945
    copy_dst_cx.build.Store(vec_p1(copy_dst_cx, llnew_vec), pp0);
6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959
    copy_dst_cx.build.Br(copy_src_cx.llbb);


    // Copy any src elements in, carrying along null if doing str.
    n_bytes = vec_fill(copy_src_cx, llsrc_vec);
    copy_src_cx = copy_elts(copy_src_cx,
                            llelt_tydesc,
                            copy_src_cx.build.Load(pp0),
                            vec_p0(copy_src_cx, llsrc_vec),
                            n_bytes).bcx;

    put_vec_fill(copy_src_cx, llnew_vec,
                 copy_src_cx.build.Add(vec_fill(copy_src_cx,
                                                llnew_vec),
6960
                                        n_bytes));
6961 6962 6963 6964

    // Write new_vec back through the alias we were given.
    copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
    copy_src_cx.build.RetVoid();
6965 6966 6967

    // Tie up the llallocas -> lltop edge.
    new_builder(fcx.llallocas).Br(lltop);
6968 6969 6970
}


6971 6972 6973
fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
    ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
             yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
6974 6975
             /*
              * Note: the signature passed to decl_cdecl_fn here looks unusual
6976
              * because it is. It corresponds neither to a native signature
6977 6978 6979 6980 6981 6982 6983 6984
              * nor a normal rust-ABI signature. In fact it is a fake
              * signature, that exists solely to acquire the task pointer as
              * an argument to the upcall. It so happens that the runtime sets
              * up the task pointer as the sole incoming argument to the frame
              * that we return into when returning to the exit task glue. So
              * this is the signature required to retrieve it.
              */
             exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
6985
                                            T_fn(vec(T_int(),
6986
                                                     T_int(),
6987 6988 6989
                                                     T_int(),
                                                     T_int(),
                                                     T_taskptr(tn)),
6990
                                                 T_void())),
6991

6992 6993
             native_glues_rust =
             _vec.init_fn[ValueRef](bind decl_native_glue(llmod, tn, true,
6994
                                                          _),
6995 6996 6997
                                    abi.n_native_glues + 1 as uint),
             native_glues_cdecl =
             _vec.init_fn[ValueRef](bind decl_native_glue(llmod, tn, false,
6998
                                                          _),
6999
                                    abi.n_native_glues + 1 as uint),
7000 7001 7002
             no_op_type_glue = decl_no_op_type_glue(llmod, tn),
             memcpy_glue = decl_memcpy_glue(llmod),
             bzero_glue = decl_bzero_glue(llmod),
7003
             vec_append_glue = make_vec_append_glue(llmod, tn));
7004 7005
}

7006 7007 7008 7009
fn make_common_glue(str output) {
    // FIXME: part of this is repetitive and is probably a good idea
    // to autogen it, but things like the memcpy implementation are not
    // and it might be better to just check in a .ll file.
7010 7011 7012 7013
    auto llmod =
        llvm.LLVMModuleCreateWithNameInContext(_str.buf("rust_out"),
                                               llvm.LLVMGetGlobalContext());

7014 7015
    llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
    llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
7016
    auto td = mk_target_data(x86.get_data_layout());
7017
    auto tn = mk_type_names();
7018
    let ValueRef crate_ptr =
7019
        llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));
7020

7021 7022
    auto intrinsics = declare_intrinsics(llmod);

7023
    llvm.LLVMSetModuleInlineAsm(llmod, _str.buf(x86.get_module_asm()));
7024

7025 7026 7027
    auto glues = make_glues(llmod, tn);
    create_crate_constant(crate_ptr, glues);
    make_memcpy_glue(glues.memcpy_glue);
7028
    make_bzero_glue(glues.bzero_glue);
7029 7030 7031

    trans_exit_task_glue(glues, new_str_hash[ValueRef](), tn, llmod);

R
Rafael Ávila de Espíndola 已提交
7032 7033
    run_passes(llmod, true);

7034 7035 7036 7037
    llvm.LLVMWriteBitcodeToFile(llmod, _str.buf(output));
    llvm.LLVMDisposeModule(llmod);
}

7038 7039
fn trans_crate(session.session sess, @ast.crate crate,
               &ty.type_cache type_cache, str output, bool shared) {
7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050
    auto llmod =
        llvm.LLVMModuleCreateWithNameInContext(_str.buf("rust_out"),
                                               llvm.LLVMGetGlobalContext());

    llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
    llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
    auto td = mk_target_data(x86.get_data_layout());
    auto tn = mk_type_names();
    let ValueRef crate_ptr =
        llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));

7051
    auto intrinsics = declare_intrinsics(llmod);
7052

7053
    auto glues = make_glues(llmod, tn);
7054 7055
    auto hasher = ty.hash_ty;
    auto eqer = ty.eq_ty;
7056
    auto tag_sizes = map.mk_hashmap[@ty.t,uint](hasher, eqer);
7057
    auto tydescs = map.mk_hashmap[@ty.t,@tydesc_info](hasher, eqer);
7058
    let vec[ast.ty_param] obj_typarams = vec();
7059
    let vec[ast.obj_field] obj_fields = vec();
7060

7061
    let vec[str] pth = vec();
7062 7063
    auto cx = @rec(sess = sess,
                   llmod = llmod,
7064
                   td = td,
7065
                   tn = tn,
7066
                   crate_ptr = crate_ptr,
7067
                   externs = new_str_hash[ValueRef](),
7068
                   intrinsics = intrinsics,
7069
                   item_ids = new_def_hash[ValueRef](),
7070
                   items = new_def_hash[@ast.item](),
7071
                   native_items = new_def_hash[@ast.native_item](),
7072
                   type_cache = type_cache,
P
Patrick Walton 已提交
7073
                   item_symbols = new_def_hash[str](),
7074
                   tag_sizes = tag_sizes,
7075
                   discrims = new_def_hash[ValueRef](),
P
Patrick Walton 已提交
7076
                   discrim_symbols = new_def_hash[str](),
7077
                   fn_pairs = new_def_hash[ValueRef](),
7078
                   consts = new_def_hash[ValueRef](),
7079
                   obj_methods = new_def_hash[()](),
7080
                   tydescs = tydescs,
7081
                   obj_typarams = obj_typarams,
7082
                   obj_fields = obj_fields,
7083
                   glues = glues,
7084
                   names = namegen(0),
7085 7086
                   path = pth,
                   sha = std.sha1.mk_sha1());
7087

7088 7089
    create_typedefs(cx);

7090
    collect_items(cx, crate);
7091
    collect_tag_ctors(cx, crate);
7092
    trans_constants(cx, crate);
7093
    trans_mod(cx, crate.node.module);
7094
    trans_vec_append_glue(cx);
7095
    if (!shared) {
7096
        trans_main_fn(cx, cx.crate_ptr);
7097
    }
7098

7099 7100 7101
    // Translate the metadata.
    middle.metadata.write_metadata(cx, crate);

R
Rafael Ávila de Espíndola 已提交
7102 7103
    // FIXME: Add an -O option
    run_passes(llmod, true);
7104

7105
    llvm.LLVMWriteBitcodeToFile(llmod, _str.buf(output));
7106 7107 7108 7109 7110 7111 7112 7113 7114 7115
    llvm.LLVMDisposeModule(llmod);
}

//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
7116
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
7117 7118
// End:
//