提交 ff429645 编写于 作者: M Marijn Haverbeke

Clean up some of trans using block combinators

`with_scope` and `with_cond` can be used to wrap a piece of code in a
scope block, or conditionalize it on a value, without doing all the
context-creation and jumping by hand.

Also renames @block_ctxt to block to reduce noise.
上级 1c1261bc
......@@ -224,7 +224,7 @@ fn line_from_span(cm: codemap::codemap, sp: span) -> uint {
codemap::lookup_char_pos(cm, sp.lo).line
}
fn create_block(cx: @block_ctxt) -> @metadata<block_md> {
fn create_block(cx: block) -> @metadata<block_md> {
let cache = get_cache(bcx_ccx(cx));
let cx = cx;
while option::is_none(cx.block_span) {
......@@ -677,7 +677,7 @@ fn create_var(type_tag: int, context: ValueRef, name: str, file: ValueRef,
ret llmdnode(lldata);
}
fn create_local_var(bcx: @block_ctxt, local: @ast::local)
fn create_local_var(bcx: block, local: @ast::local)
-> @metadata<local_var_md> unsafe {
let cx = bcx_ccx(bcx);
let cache = get_cache(cx);
......@@ -728,7 +728,7 @@ fn create_local_var(bcx: @block_ctxt, local: @ast::local)
ret mdval;
}
fn create_arg(bcx: @block_ctxt, arg: ast::arg, sp: span)
fn create_arg(bcx: block, arg: ast::arg, sp: span)
-> @metadata<argument_md> unsafe {
let fcx = bcx_fcx(bcx);
let cx = fcx_ccx(fcx);
......@@ -763,7 +763,7 @@ fn create_arg(bcx: @block_ctxt, arg: ast::arg, sp: span)
ret mdval;
}
fn update_source_pos(cx: @block_ctxt, s: span) {
fn update_source_pos(cx: block, s: span) {
if !bcx_ccx(cx).sess.opts.debuginfo {
ret;
}
......
......@@ -3,8 +3,7 @@
import lib::llvm::{ValueRef, BasicBlockRef};
import pat_util::*;
import build::*;
import base::{new_sub_block_ctxt, new_scope_block_ctxt,
new_real_block_ctxt, load_if_immediate};
import base::*;
import syntax::ast;
import syntax::ast_util;
import syntax::ast_util::{dummy_sp};
......@@ -38,28 +37,28 @@ enum opt_result {
single_result(result),
range_result(result, result),
}
fn trans_opt(bcx: @block_ctxt, o: opt) -> opt_result {
fn trans_opt(bcx: block, o: opt) -> opt_result {
let ccx = bcx_ccx(bcx), bcx = bcx;
alt o {
lit(l) {
alt l.node {
ast::expr_lit(@{node: ast::lit_str(s), _}) {
let strty = ty::mk_str(bcx_tcx(bcx));
let cell = base::empty_dest_cell();
bcx = tvec::trans_str(bcx, s, base::by_val(cell));
let cell = empty_dest_cell();
bcx = tvec::trans_str(bcx, s, by_val(cell));
add_clean_temp(bcx, *cell, strty);
ret single_result(rslt(bcx, *cell));
}
_ {
ret single_result(
rslt(bcx, base::trans_const_expr(ccx, l)));
rslt(bcx, trans_const_expr(ccx, l)));
}
}
}
var(disr_val, _) { ret single_result(rslt(bcx, C_int(ccx, disr_val))); }
range(l1, l2) {
ret range_result(rslt(bcx, base::trans_const_expr(ccx, l1)),
rslt(bcx, base::trans_const_expr(ccx, l2)));
ret range_result(rslt(bcx, trans_const_expr(ccx, l1)),
rslt(bcx, trans_const_expr(ccx, l2)));
}
}
}
......@@ -259,9 +258,9 @@ fn add_to_set(&set: [opt], val: opt) {
ret found;
}
fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
fn extract_variant_args(bcx: block, pat_id: ast::node_id,
vdefs: {enm: def_id, var: def_id}, val: ValueRef) ->
{vals: [ValueRef], bcx: @block_ctxt} {
{vals: [ValueRef], bcx: block} {
let ccx = bcx.fcx.ccx, bcx = bcx;
// invariant:
// pat_id must have the same length ty_param_substs as vdefs?
......@@ -285,7 +284,7 @@ fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
// invariant needed:
// how do we know it even makes sense to pass in ty_param_substs
// here? What if it's [] and the enum type has variables in it?
base::GEP_enum(bcx, blobptr, vdefs_tg, vdefs_var,
GEP_enum(bcx, blobptr, vdefs_tg, vdefs_var,
ty_param_substs, i);
bcx = r.bcx;
args += [r.val];
......@@ -363,7 +362,7 @@ fn score(p: @ast::pat) -> uint {
ret best_col;
}
fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
fn compile_submatch(bcx: block, m: match, vals: [ValueRef], f: mk_fail,
&exits: [exit_node]) {
let bcx = bcx;
if m.len() == 0u { Br(bcx, f()); ret; }
......@@ -371,23 +370,19 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
let data = m[0].data;
alt data.guard {
some(e) {
let guard_cx = new_scope_block_ctxt(bcx, "submatch_guard");
Br(bcx, guard_cx.llbb);
// Temporarily set bindings. They'll be rewritten to PHI nodes for
// the actual arm block.
// Temporarily set bindings. They'll be rewritten to PHI nodes
// for the actual arm block.
data.id_map.items {|key, val|
let local = local_mem(option::get(assoc(key, m[0].bound)));
bcx.fcx.lllocals.insert(val, local);
let loc = local_mem(option::get(assoc(key, m[0].bound)));
bcx.fcx.lllocals.insert(val, loc);
};
let {bcx: guard_cx, val} = with_scope_result(bcx, "guard") {|bcx|
trans_temp_expr(bcx, e)
};
bcx = with_cond(guard_cx, Not(guard_cx, val)) {|bcx|
compile_submatch(bcx, vec::tail(m), vals, f, exits);
bcx
};
let {bcx: guard_bcx, val: guard_val} =
base::trans_temp_expr(guard_cx, e);
guard_bcx = base::trans_block_cleanups(guard_bcx, guard_cx);
let next_cx = new_sub_block_ctxt(guard_cx, "submatch_next");
let else_cx = new_sub_block_ctxt(guard_cx, "submatch_else");
CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb);
compile_submatch(else_cx, vec::slice(m, 1u, m.len()), vals, f,
exits);
bcx = next_cx;
}
_ { }
}
......@@ -425,7 +420,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
let rec_vals = [];
for field_name: ast::ident in rec_fields {
let ix = option::get(ty::field_idx(field_name, fields));
let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
let r = GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
rec_vals += [r.val];
bcx = r.bcx;
}
......@@ -442,7 +437,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
};
let tup_vals = [], i = 0u;
while i < n_tup_elts {
let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
let r = GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
tup_vals += [r.val];
bcx = r.bcx;
i += 1u;
......@@ -507,7 +502,7 @@ enum branch_kind { no_branch, single, switch, compare, }
let else_cx =
alt kind {
no_branch | single { bcx }
_ { new_sub_block_ctxt(bcx, "match_else") }
_ { sub_block(bcx, "match_else") }
};
let sw;
if kind == switch {
......@@ -521,7 +516,7 @@ enum branch_kind { no_branch, single, switch, compare, }
// Compile subtrees for each option
for opt: opt in opts {
let opt_cx = new_sub_block_ctxt(bcx, "match_case");
let opt_cx = sub_block(bcx, "match_case");
alt kind {
single { Br(bcx, opt_cx.llbb); }
switch {
......@@ -536,35 +531,24 @@ enum branch_kind { no_branch, single, switch, compare, }
}
}
compare {
let compare_cx = new_scope_block_ctxt(bcx, "compare_scope");
Br(bcx, compare_cx.llbb);
bcx = compare_cx;
let t = node_id_type(bcx, pat_id);
let res = trans_opt(bcx, opt);
alt res {
single_result(r) {
bcx = r.bcx;
let eq =
base::trans_compare(bcx, ast::eq, test_val, t, r.val, t);
let cleanup_cx = base::trans_block_cleanups(
eq.bcx, compare_cx);
bcx = new_sub_block_ctxt(bcx, "compare_next");
CondBr(cleanup_cx, eq.val, opt_cx.llbb, bcx.llbb);
}
range_result(rbegin, rend) {
bcx = rend.bcx;
let ge = base::trans_compare(bcx, ast::ge, test_val, t,
rbegin.val, t);
let le = base::trans_compare(ge.bcx, ast::le, test_val, t,
rend.val, t);
let in_range = rslt(le.bcx, And(le.bcx, ge.val, le.val));
bcx = in_range.bcx;
let cleanup_cx =
base::trans_block_cleanups(bcx, compare_cx);
bcx = new_sub_block_ctxt(bcx, "compare_next");
CondBr(cleanup_cx, in_range.val, opt_cx.llbb, bcx.llbb);
}
}
let {bcx: after_cx, val: matches} =
with_scope_result(bcx, "compare_scope") {|bcx|
alt trans_opt(bcx, opt) {
single_result({bcx, val}) {
trans_compare(bcx, ast::eq, test_val, t, val, t)
}
range_result({val: vbegin, _}, {bcx, val: vend}) {
let {bcx, val: ge} = trans_compare(bcx, ast::ge, test_val,
t, vbegin, t);
let {bcx, val: le} = trans_compare(bcx, ast::le, test_val,
t, vend, t);
{bcx: bcx, val: And(bcx, ge, le)}
}
}
};
bcx = sub_block(after_cx, "compare_next");
CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb);
}
_ { }
}
......@@ -592,7 +576,7 @@ enum branch_kind { no_branch, single, switch, compare, }
}
// Returns false for unreachable blocks
fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
fn make_phi_bindings(bcx: block, map: [exit_node],
ids: pat_util::pat_id_map) -> bool {
let our_block = bcx.llbb as uint;
let success = true, bcx = bcx;
......@@ -623,8 +607,8 @@ fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
make_phi_bindings"); }
};
let e_ty = node_id_type(bcx, node_id);
let {bcx: abcx, val: alloc} = base::alloc_ty(bcx, e_ty);
bcx = base::copy_val(abcx, base::INIT, alloc,
let {bcx: abcx, val: alloc} = alloc_ty(bcx, e_ty);
bcx = copy_val(abcx, INIT, alloc,
load_if_immediate(abcx, local, e_ty),
e_ty);
add_clean(bcx, alloc, e_ty);
......@@ -637,76 +621,72 @@ fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
ret success;
}
fn trans_alt(cx: @block_ctxt, expr: @ast::expr, arms_: [ast::arm],
dest: base::dest) -> @block_ctxt {
let bodies = [];
let match: match = [];
let alt_cx = new_scope_block_ctxt(cx, "alt");
Br(cx, alt_cx.llbb);
let er = base::trans_temp_expr(alt_cx, expr);
if er.bcx.unreachable { ret er.bcx; }
/*
n.b. nothing else in this module should need to normalize,
b/c of this call
*/
let arms = normalize_arms(bcx_tcx(cx), arms_);
for a: ast::arm in arms {
let body = new_real_block_ctxt(er.bcx, "case_body",
a.body.span);
let id_map = pat_util::pat_id_map(bcx_tcx(cx), a.pats[0]);
fn trans_alt(bcx: block, expr: @ast::expr, arms: [ast::arm],
dest: dest) -> block {
with_scope(bcx, "alt") {|bcx| trans_alt_inner(bcx, expr, arms, dest)}
}
fn trans_alt_inner(scope_cx: block, expr: @ast::expr, arms: [ast::arm],
dest: dest) -> block {
let bcx = scope_cx, tcx = bcx_tcx(bcx);
let bodies = [], match = [];
let {bcx, val, _} = trans_temp_expr(bcx, expr);
if bcx.unreachable { ret bcx; }
// n.b. nothing else in this module should need to normalize,
// b/c of this call
let arms = normalize_arms(tcx, arms);
for a in arms {
let body = scope_block(bcx, "case_body");
body.block_span = some(a.body.span);
let id_map = pat_util::pat_id_map(tcx, a.pats[0]);
bodies += [body];
for p: @ast::pat in a.pats {
match +=
[@{pats: [p],
bound: [],
data: @{body: body.llbb, guard: a.guard, id_map: id_map}}];
for p in a.pats {
match += [@{pats: [p],
bound: [],
data: @{body: body.llbb, guard: a.guard,
id_map: id_map}}];
}
}
// Cached fail-on-fallthrough block
let fail_cx = @mutable none;
fn mk_fail(cx: @block_ctxt, sp: span,
fn mk_fail(bcx: block, sp: span,
done: @mutable option<BasicBlockRef>) -> BasicBlockRef {
alt *done { some(bb) { ret bb; } _ { } }
let fail_cx = new_sub_block_ctxt(cx, "case_fallthrough");
base::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");;
let fail_cx = sub_block(bcx, "case_fallthrough");
trans_fail(fail_cx, some(sp), "non-exhaustive match failure");;
*done = some(fail_cx.llbb);
ret fail_cx.llbb;
}
let exit_map = [];
let t = node_id_type(cx, expr.id);
let vr = base::spill_if_immediate(er.bcx, er.val, t);
compile_submatch(vr.bcx, match, [vr.val],
bind mk_fail(alt_cx, expr.span, fail_cx), exit_map);
let t = node_id_type(bcx, expr.id);
let {bcx, val: spilled} = spill_if_immediate(bcx, val, t);
compile_submatch(bcx, match, [spilled],
bind mk_fail(scope_cx, expr.span, fail_cx), exit_map);
let arm_cxs = [], arm_dests = [], i = 0u;
for a: ast::arm in arms {
for a in arms {
let body_cx = bodies[i];
if make_phi_bindings(body_cx, exit_map,
pat_util::pat_id_map(bcx_tcx(cx),
a.pats[0])) {
let arm_dest = base::dup_for_join(dest);
pat_util::pat_id_map(tcx, a.pats[0])) {
let arm_dest = dup_for_join(dest);
arm_dests += [arm_dest];
let arm_cx = base::trans_block(body_cx, a.body, arm_dest);
arm_cx = base::trans_block_cleanups(arm_cx, body_cx);
let arm_cx = trans_block(body_cx, a.body, arm_dest);
arm_cx = trans_block_cleanups(arm_cx, body_cx);
arm_cxs += [arm_cx];
}
i += 1u;
}
let after_cx = base::join_returns(alt_cx, arm_cxs, arm_dests, dest);
let next_cx = new_sub_block_ctxt(cx, "next");
if after_cx.unreachable { Unreachable(next_cx); }
base::cleanup_and_Br(after_cx, alt_cx, next_cx.llbb);
ret next_cx;
join_returns(scope_cx, arm_cxs, arm_dests, dest)
}
// Not alt-related, but similar to the pattern-munging code above
fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
make_copy: bool) -> @block_ctxt {
fn bind_irrefutable_pat(bcx: block, pat: @ast::pat, val: ValueRef,
make_copy: bool) -> block {
let ccx = bcx.fcx.ccx, bcx = bcx;
// Necessary since bind_irrefutable_pat is called outside trans_alt
......@@ -717,10 +697,10 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
// FIXME: Could constrain pat_bind to make this
// check unnecessary.
check (type_has_static_size(ccx, ty));
let llty = base::type_of(ccx, ty);
let alloc = base::alloca(bcx, llty);
bcx = base::copy_val(bcx, base::INIT, alloc,
base::load_if_immediate(bcx, val, ty), ty);
let llty = type_of(ccx, ty);
let alloc = alloca(bcx, llty);
bcx = copy_val(bcx, INIT, alloc,
load_if_immediate(bcx, val, ty), ty);
bcx.fcx.lllocals.insert(pat.id, local_mem(alloc));
add_clean(bcx, alloc, ty);
} else { bcx.fcx.lllocals.insert(pat.id, local_mem(val)); }
......@@ -745,7 +725,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
for f: ast::field_pat in fields {
let ix = option::get(ty::field_idx(f.ident, rec_fields));
// how to get rid of this check?
let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
let r = GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, make_copy);
}
}
......@@ -753,7 +733,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
let tup_ty = node_id_type(bcx, pat.id);
let i = 0u;
for elem in elems {
let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
let r = GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
bcx = bind_irrefutable_pat(r.bcx, elem, r.val, make_copy);
i += 1u;
}
......
此差异已折叠。
此差异已折叠。
......@@ -162,15 +162,15 @@ fn mk_closure_tys(tcx: ty::ctxt,
ret (cdata_ty, bound_tys);
}
fn allocate_cbox(bcx: @block_ctxt,
fn allocate_cbox(bcx: block,
ck: ty::closure_kind,
cdata_ty: ty::t)
-> (@block_ctxt, ValueRef, [ValueRef]) {
-> (block, ValueRef, [ValueRef]) {
// let ccx = bcx_ccx(bcx);
let ccx = bcx_ccx(bcx), tcx = ccx.tcx;
fn nuke_ref_count(bcx: @block_ctxt, box: ValueRef) {
fn nuke_ref_count(bcx: block, box: ValueRef) {
// Initialize ref count to arbitrary value for debugging:
let ccx = bcx_ccx(bcx);
let box = PointerCast(bcx, box, T_opaque_box_ptr(ccx));
......@@ -179,10 +179,10 @@ fn nuke_ref_count(bcx: @block_ctxt, box: ValueRef) {
Store(bcx, rc, ref_cnt);
}
fn store_uniq_tydesc(bcx: @block_ctxt,
fn store_uniq_tydesc(bcx: block,
cdata_ty: ty::t,
box: ValueRef,
&ti: option::t<@tydesc_info>) -> @block_ctxt {
&ti: option::t<@tydesc_info>) -> block {
let ccx = bcx_ccx(bcx);
let bound_tydesc = GEPi(bcx, box, [0, abi::box_field_tydesc]);
let {bcx, val: td} = base::get_tydesc(bcx, cdata_ty, true, ti);
......@@ -224,10 +224,10 @@ fn store_uniq_tydesc(bcx: @block_ctxt,
type closure_result = {
llbox: ValueRef, // llvalue of ptr to closure
cdata_ty: ty::t, // type of the closure data
bcx: @block_ctxt // final bcx
bcx: block // final bcx
};
fn cast_if_we_can(bcx: @block_ctxt, llbox: ValueRef, t: ty::t) -> ValueRef {
fn cast_if_we_can(bcx: block, llbox: ValueRef, t: ty::t) -> ValueRef {
let ccx = bcx_ccx(bcx);
if check type_has_static_size(ccx, t) {
let llty = type_of(ccx, t);
......@@ -242,12 +242,12 @@ fn cast_if_we_can(bcx: @block_ctxt, llbox: ValueRef, t: ty::t) -> ValueRef {
// heap allocated closure that copies the upvars into environment.
// Otherwise, it is stack allocated and copies pointers to the upvars.
fn store_environment(
bcx: @block_ctxt, lltyparams: [fn_ty_param],
bcx: block, lltyparams: [fn_ty_param],
bound_values: [environment_value],
ck: ty::closure_kind)
-> closure_result {
fn maybe_clone_tydesc(bcx: @block_ctxt,
fn maybe_clone_tydesc(bcx: block,
ck: ty::closure_kind,
td: ValueRef) -> ValueRef {
ret alt ck {
......@@ -349,7 +349,7 @@ fn maybe_clone_tydesc(bcx: @block_ctxt,
// Given a context and a list of upvars, build a closure. This just
// collects the upvars and packages them up for store_environment.
fn build_closure(bcx0: @block_ctxt,
fn build_closure(bcx0: block,
cap_vars: [capture::capture_var],
ck: ty::closure_kind)
-> closure_result {
......@@ -386,12 +386,12 @@ fn build_closure(bcx0: @block_ctxt,
// Given an enclosing block context, a new function context, a closure type,
// and a list of upvars, generate code to load and populate the environment
// with the upvars and type descriptors.
fn load_environment(enclosing_cx: @block_ctxt,
fn load_environment(enclosing_cx: block,
fcx: @fn_ctxt,
cdata_ty: ty::t,
cap_vars: [capture::capture_var],
ck: ty::closure_kind) {
let bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
let bcx = raw_block(fcx, fcx.llloadenv);
// Load a pointer to the closure data, skipping over the box header:
let llcdata = base::opaque_box_body(bcx, cdata_ty, fcx.llenv);
......@@ -440,14 +440,14 @@ fn load_environment(enclosing_cx: @block_ctxt,
}
}
fn trans_expr_fn(bcx: @block_ctxt,
fn trans_expr_fn(bcx: block,
proto: ast::proto,
decl: ast::fn_decl,
body: ast::blk,
sp: span,
id: ast::node_id,
cap_clause: ast::capture_clause,
dest: dest) -> @block_ctxt {
dest: dest) -> block {
if dest == ignore { ret bcx; }
let ccx = bcx_ccx(bcx), bcx = bcx;
let fty = node_id_type(bcx, id);
......@@ -482,17 +482,17 @@ fn trans_expr_fn(bcx: @block_ctxt,
ret bcx;
}
fn trans_bind(cx: @block_ctxt, f: @ast::expr, args: [option<@ast::expr>],
id: ast::node_id, dest: dest) -> @block_ctxt {
fn trans_bind(cx: block, f: @ast::expr, args: [option<@ast::expr>],
id: ast::node_id, dest: dest) -> block {
let f_res = trans_callee(cx, f);
ret trans_bind_1(cx, expr_ty(cx, f), f_res, args,
node_id_type(cx, id), dest);
}
fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
fn trans_bind_1(cx: block, outgoing_fty: ty::t,
f_res: lval_maybe_callee,
args: [option<@ast::expr>], pair_ty: ty::t,
dest: dest) -> @block_ctxt {
dest: dest) -> block {
let ccx = bcx_ccx(cx);
let bound: [@ast::expr] = [];
for argopt: option<@ast::expr> in args {
......@@ -572,33 +572,19 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
ret bcx;
}
fn make_null_test(
in_bcx: @block_ctxt,
ptr: ValueRef,
blk: fn(@block_ctxt) -> @block_ctxt)
-> @block_ctxt {
let not_null_bcx = new_sub_block_ctxt(in_bcx, "not null");
let next_bcx = new_sub_block_ctxt(in_bcx, "next");
let null_test = IsNull(in_bcx, ptr);
CondBr(in_bcx, null_test, next_bcx.llbb, not_null_bcx.llbb);
let not_null_bcx = blk(not_null_bcx);
Br(not_null_bcx, next_bcx.llbb);
ret next_bcx;
}
fn make_fn_glue(
cx: @block_ctxt,
cx: block,
v: ValueRef,
t: ty::t,
glue_fn: fn@(@block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt)
-> @block_ctxt {
glue_fn: fn@(block, v: ValueRef, t: ty::t) -> block)
-> block {
let bcx = cx;
let tcx = bcx_tcx(cx);
let fn_env = fn@(ck: ty::closure_kind) -> @block_ctxt {
let fn_env = fn@(ck: ty::closure_kind) -> block {
let box_cell_v = GEPi(cx, v, [0, abi::fn_field_box]);
let box_ptr_v = Load(cx, box_cell_v);
make_null_test(cx, box_ptr_v) {|bcx|
with_cond(cx, IsNotNull(cx, box_ptr_v)) {|bcx|
let closure_ty = ty::mk_opaque_closure_ptr(tcx, ck);
glue_fn(bcx, box_cell_v, closure_ty)
}
......@@ -615,10 +601,10 @@ fn make_fn_glue(
}
fn make_opaque_cbox_take_glue(
bcx: @block_ctxt,
bcx: block,
ck: ty::closure_kind,
cboxptr: ValueRef) // ptr to ptr to the opaque closure
-> @block_ctxt {
-> block {
// Easy cases:
alt ck {
ty::ck_block { ret bcx; }
......@@ -631,7 +617,7 @@ fn make_opaque_cbox_take_glue(
let tcx = bcx_tcx(bcx);
let llopaquecboxty = T_opaque_box_ptr(ccx);
let cbox_in = Load(bcx, cboxptr);
make_null_test(bcx, cbox_in) {|bcx|
with_cond(bcx, IsNotNull(bcx, cbox_in)) {|bcx|
// Load the size from the type descr found in the cbox
let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty);
let tydescptr = GEPi(bcx, cbox_in, [0, abi::box_field_tydesc]);
......@@ -663,10 +649,10 @@ fn make_opaque_cbox_take_glue(
}
fn make_opaque_cbox_drop_glue(
bcx: @block_ctxt,
bcx: block,
ck: ty::closure_kind,
cboxptr: ValueRef) // ptr to the opaque closure
-> @block_ctxt {
-> block {
alt ck {
ty::ck_block { bcx }
ty::ck_box {
......@@ -681,10 +667,10 @@ fn make_opaque_cbox_drop_glue(
}
fn make_opaque_cbox_free_glue(
bcx: @block_ctxt,
bcx: block,
ck: ty::closure_kind,
cbox: ValueRef) // ptr to the opaque closure
-> @block_ctxt {
-> block {
alt ck {
ty::ck_block { ret bcx; }
ty::ck_box | ty::ck_uniq { /* hard cases: */ }
......@@ -692,7 +678,7 @@ fn make_opaque_cbox_free_glue(
let ccx = bcx_ccx(bcx);
let tcx = bcx_tcx(bcx);
make_null_test(bcx, cbox) {|bcx|
with_cond(bcx, IsNotNull(bcx, cbox)) {|bcx|
// Load the type descr found in the cbox
let lltydescty = T_ptr(ccx.tydesc_type);
let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx));
......@@ -783,13 +769,13 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// Create a new function context and block context for the thunk, and hold
// onto a pointer to the first block in the function for later use.
let fcx = new_fn_ctxt(ccx, path, llthunk, none);
let bcx = new_top_block_ctxt(fcx, none);
let bcx = top_scope_block(fcx, none);
let lltop = bcx.llbb;
// Since we might need to construct derived tydescs that depend on
// our bound tydescs, we need to load tydescs out of the environment
// before derived tydescs are constructed. To do this, we load them
// in the load_env block.
let l_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
let l_bcx = raw_block(fcx, fcx.llloadenv);
// The 'llenv' that will arrive in the thunk we're creating is an
// environment that will contain the values of its arguments and a
......
......@@ -218,8 +218,8 @@ fn warn_not_to_commit(ccx: @crate_ctxt, msg: str) {
}
enum cleanup {
clean(fn@(@block_ctxt) -> @block_ctxt),
clean_temp(ValueRef, fn@(@block_ctxt) -> @block_ctxt),
clean(fn@(block) -> block),
clean_temp(ValueRef, fn@(block) -> block),
}
// Used to remember and reuse existing cleanup paths
......@@ -232,17 +232,17 @@ fn scope_clean_changed(info: scope_info) {
info.landing_pad = none;
}
fn add_clean(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
fn add_clean(cx: block, val: ValueRef, ty: ty::t) {
if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
in_scope_cx(cx) {|info|
info.cleanups += [clean(bind drop_ty(_, val, ty))];
scope_clean_changed(info);
}
}
fn add_clean_temp(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
fn add_clean_temp(cx: block, val: ValueRef, ty: ty::t) {
if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
fn do_drop(bcx: @block_ctxt, val: ValueRef, ty: ty::t) ->
@block_ctxt {
fn do_drop(bcx: block, val: ValueRef, ty: ty::t) ->
block {
if ty::type_is_immediate(ty) {
ret base::drop_ty_immediate(bcx, val, ty);
} else {
......@@ -254,14 +254,14 @@ fn do_drop(bcx: @block_ctxt, val: ValueRef, ty: ty::t) ->
scope_clean_changed(info);
}
}
fn add_clean_temp_mem(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
fn add_clean_temp_mem(cx: block, val: ValueRef, ty: ty::t) {
if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
in_scope_cx(cx) {|info|
info.cleanups += [clean_temp(val, bind drop_ty(_, val, ty))];
scope_clean_changed(info);
}
}
fn add_clean_free(cx: @block_ctxt, ptr: ValueRef, shared: bool) {
fn add_clean_free(cx: block, ptr: ValueRef, shared: bool) {
let free_fn = if shared { bind base::trans_shared_free(_, ptr) }
else { bind base::trans_free(_, ptr) };
in_scope_cx(cx) {|info|
......@@ -274,7 +274,7 @@ fn add_clean_free(cx: @block_ctxt, ptr: ValueRef, shared: bool) {
// to a system where we can also cancel the cleanup on local variables, but
// this will be more involved. For now, we simply zero out the local, and the
// drop glue checks whether it is zero.
fn revoke_clean(cx: @block_ctxt, val: ValueRef) {
fn revoke_clean(cx: block, val: ValueRef) {
in_scope_cx(cx) {|info|
let i = 0u;
for cu in info.cleanups {
......@@ -317,18 +317,18 @@ enum block_kind {
// cleaned up. May correspond to an actual block in the language, but also
// to an implicit scope, for example, calls introduce an implicit scope in
// which the arguments are evaluated and cleaned up.
scope_block(scope_info),
block_scope(scope_info),
// A non-scope block is a basic block created as a translation artifact
// from translating code that expresses conditional logic rather than by
// explicit { ... } block structure in the source language. It's called a
// non-scope block because it doesn't introduce a new variable scope.
non_scope_block,
block_non_scope,
}
enum loop_cont { cont_self, cont_other(@block_ctxt), }
enum loop_cont { cont_self, cont_other(block), }
type scope_info = {
is_loop: option<{cnt: loop_cont, brk: @block_ctxt}>,
is_loop: option<{cnt: loop_cont, brk: block}>,
// A list of functions that must be run at when leaving this
// block, cleaning up any variables that were introduced in the
// block.
......@@ -345,7 +345,7 @@ enum loop_cont { cont_self, cont_other(@block_ctxt), }
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
type block_ctxt = {
type block = @{
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
......@@ -359,7 +359,7 @@ enum loop_cont { cont_self, cont_other(@block_ctxt), }
kind: block_kind,
// The source span where the block came from, if it is a block that
// actually appears in the source code.
block_span: option<span>,
mutable block_span: option<span>,
// The function context for the function to which this block is
// attached.
fcx: @fn_ctxt
......@@ -367,12 +367,12 @@ enum loop_cont { cont_self, cont_other(@block_ctxt), }
// FIXME: we should be able to use option<@block_parent> here but
// the infinite-enum check in rustboot gets upset.
enum block_parent { parent_none, parent_some(@block_ctxt), }
enum block_parent { parent_none, parent_some(block), }
type result = {bcx: @block_ctxt, val: ValueRef};
type result_t = {bcx: @block_ctxt, val: ValueRef, ty: ty::t};
type result = {bcx: block, val: ValueRef};
type result_t = {bcx: block, val: ValueRef, ty: ty::t};
fn rslt(bcx: @block_ctxt, val: ValueRef) -> result {
fn rslt(bcx: block, val: ValueRef) -> result {
{bcx: bcx, val: val}
}
......@@ -393,23 +393,27 @@ fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef unsafe {
ret llvm::LLVMGetElementType(elt_tys[n]);
}
fn in_scope_cx(cx: @block_ctxt, f: fn(scope_info)) {
fn in_scope_cx(cx: block, f: fn(scope_info)) {
let cur = cx;
while true {
alt cur.kind {
scope_block(info) { f(info); ret; }
block_scope(info) { f(info); ret; }
_ {}
}
cur = alt check cur.parent { parent_some(b) { b } };
cur = block_parent(cur);
}
}
fn block_parent(cx: block) -> block {
alt check cx.parent { parent_some(b) { b } }
}
// Accessors
// TODO: When we have overloading, simplify these names!
pure fn bcx_tcx(bcx: @block_ctxt) -> ty::ctxt { ret bcx.fcx.ccx.tcx; }
pure fn bcx_ccx(bcx: @block_ctxt) -> @crate_ctxt { ret bcx.fcx.ccx; }
pure fn bcx_fcx(bcx: @block_ctxt) -> @fn_ctxt { ret bcx.fcx; }
pure fn bcx_tcx(bcx: block) -> ty::ctxt { ret bcx.fcx.ccx.tcx; }
pure fn bcx_ccx(bcx: block) -> @crate_ctxt { ret bcx.fcx.ccx; }
pure fn bcx_fcx(bcx: block) -> @fn_ctxt { ret bcx.fcx; }
pure fn fcx_ccx(fcx: @fn_ctxt) -> @crate_ctxt { ret fcx.ccx; }
pure fn fcx_tcx(fcx: @fn_ctxt) -> ty::ctxt { ret fcx.ccx.tcx; }
pure fn ccx_tcx(ccx: @crate_ctxt) -> ty::ctxt { ret ccx.tcx; }
......@@ -838,7 +842,7 @@ fn C_shape(ccx: @crate_ctxt, bytes: [u8]) -> ValueRef {
}
pure fn valid_variant_index(ix: uint, cx: @block_ctxt, enum_id: ast::def_id,
pure fn valid_variant_index(ix: uint, cx: block, enum_id: ast::def_id,
variant_id: ast::def_id) -> bool {
// Handwaving: it's ok to pretend this code is referentially
......@@ -882,17 +886,17 @@ fn hash_mono_id(&&mi: mono_id) -> uint {
h
}
fn umax(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
ret build::Select(cx, cond, b, a);
}
fn umin(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
ret build::Select(cx, cond, a, b);
}
fn align_to(cx: @block_ctxt, off: ValueRef, align: ValueRef) -> ValueRef {
fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef {
let mask = build::Sub(cx, align, C_int(bcx_ccx(cx), 1));
let bumped = build::Add(cx, off, mask);
ret build::And(cx, bumped, build::Not(cx, mask));
......@@ -910,7 +914,7 @@ fn path_str(p: path) -> str {
r
}
fn node_id_type(bcx: @block_ctxt, id: ast::node_id) -> ty::t {
fn node_id_type(bcx: block, id: ast::node_id) -> ty::t {
let tcx = bcx_tcx(bcx);
let t = ty::node_id_to_type(tcx, id);
alt bcx.fcx.param_substs {
......@@ -918,10 +922,10 @@ fn node_id_type(bcx: @block_ctxt, id: ast::node_id) -> ty::t {
_ { t }
}
}
fn expr_ty(bcx: @block_ctxt, ex: @ast::expr) -> ty::t {
fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t {
node_id_type(bcx, ex.id)
}
fn node_id_type_params(bcx: @block_ctxt, id: ast::node_id) -> [ty::t] {
fn node_id_type_params(bcx: block, id: ast::node_id) -> [ty::t] {
let tcx = bcx_tcx(bcx);
let params = ty::node_id_to_type_params(tcx, id);
alt bcx.fcx.param_substs {
......
......@@ -59,14 +59,14 @@ fn trans_impl(ccx: @crate_ctxt, path: path, name: ast::ident,
}
}
fn trans_self_arg(bcx: @block_ctxt, base: @ast::expr) -> result {
fn trans_self_arg(bcx: block, base: @ast::expr) -> result {
let basety = expr_ty(bcx, base);
let m_by_ref = ast::expl(ast::by_ref);
trans_arg_expr(bcx, {mode: m_by_ref, ty: basety},
T_ptr(type_of_or_i8(bcx_ccx(bcx), basety)), base)
}
fn trans_method_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_method_callee(bcx: block, callee_id: ast::node_id,
self: @ast::expr, origin: typeck::method_origin)
-> lval_maybe_callee {
alt origin {
......@@ -91,7 +91,7 @@ fn trans_method_callee(bcx: @block_ctxt, callee_id: ast::node_id,
}
// Method callee where the method is statically known
fn trans_static_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_static_callee(bcx: block, callee_id: ast::node_id,
base: @ast::expr, did: ast::def_id,
substs: option<([ty::t], typeck::dict_res)>)
-> lval_maybe_callee {
......@@ -107,7 +107,7 @@ fn wrapper_fn_ty(ccx: @crate_ctxt, dict_ty: TypeRef, fty: ty::t,
{ty: fty, llty: T_fn([dict_ty] + inputs, output)}
}
fn trans_vtable_callee(bcx: @block_ctxt, env: callee_env, dict: ValueRef,
fn trans_vtable_callee(bcx: block, env: callee_env, dict: ValueRef,
callee_id: ast::node_id, iface_id: ast::def_id,
n_method: uint) -> lval_maybe_callee {
let bcx = bcx, ccx = bcx_ccx(bcx), tcx = ccx.tcx;
......@@ -140,7 +140,7 @@ fn trans_vtable_callee(bcx: @block_ctxt, env: callee_env, dict: ValueRef,
generic: generic}
}
fn trans_monomorphized_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_monomorphized_callee(bcx: block, callee_id: ast::node_id,
base: @ast::expr, iface_id: ast::def_id,
n_method: uint, n_param: uint, n_bound: uint,
substs: param_substs) -> lval_maybe_callee {
......@@ -172,7 +172,7 @@ fn trans_monomorphized_callee(bcx: @block_ctxt, callee_id: ast::node_id,
// Method callee where the dict comes from a type param
fn trans_param_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_param_callee(bcx: block, callee_id: ast::node_id,
base: @ast::expr, iface_id: ast::def_id, n_method: uint,
n_param: uint, n_bound: uint) -> lval_maybe_callee {
let {bcx, val} = trans_self_arg(bcx, base);
......@@ -182,7 +182,7 @@ fn trans_param_callee(bcx: @block_ctxt, callee_id: ast::node_id,
}
// Method callee where the dict comes from a boxed iface
fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_iface_callee(bcx: block, callee_id: ast::node_id,
base: @ast::expr, iface_id: ast::def_id, n_method: uint)
-> lval_maybe_callee {
let {bcx, val} = trans_temp_expr(bcx, base);
......@@ -266,12 +266,12 @@ fn resolve_dicts_in_fn_ctxt(fcx: @fn_ctxt, dicts: typeck::dict_res)
}
fn trans_wrapper(ccx: @crate_ctxt, pt: path, llfty: TypeRef,
fill: fn(ValueRef, @block_ctxt) -> @block_ctxt)
fill: fn(ValueRef, block) -> block)
-> ValueRef {
let name = link::mangle_internal_name_by_path(ccx, pt);
let llfn = decl_internal_cdecl_fn(ccx.llmod, name, llfty);
let fcx = new_fn_ctxt(ccx, [], llfn, none);
let bcx = new_top_block_ctxt(fcx, none), lltop = bcx.llbb;
let bcx = top_scope_block(fcx, none), lltop = bcx.llbb;
let bcx = fill(llfn, bcx);
build_return(bcx);
finish_fn(fcx, lltop);
......@@ -396,7 +396,7 @@ fn dict_is_static(tcx: ty::ctxt, origin: typeck::dict_origin) -> bool {
}
}
fn get_dict(bcx: @block_ctxt, origin: typeck::dict_origin) -> result {
fn get_dict(bcx: block, origin: typeck::dict_origin) -> result {
let ccx = bcx_ccx(bcx);
alt origin {
typeck::dict_static(impl_did, tys, sub_origins) {
......@@ -453,7 +453,7 @@ fn dict_id(tcx: ty::ctxt, origin: typeck::dict_origin) -> dict_id {
}
}
fn get_static_dict(bcx: @block_ctxt, origin: typeck::dict_origin)
fn get_static_dict(bcx: block, origin: typeck::dict_origin)
-> ValueRef {
let ccx = bcx_ccx(bcx);
let id = dict_id(ccx.tcx, origin);
......@@ -474,8 +474,8 @@ fn get_static_dict(bcx: @block_ctxt, origin: typeck::dict_origin)
cast
}
fn get_dict_ptrs(bcx: @block_ctxt, origin: typeck::dict_origin)
-> {bcx: @block_ctxt, ptrs: [ValueRef]} {
fn get_dict_ptrs(bcx: block, origin: typeck::dict_origin)
-> {bcx: block, ptrs: [ValueRef]} {
let ccx = bcx_ccx(bcx);
fn get_vtable(ccx: @crate_ctxt, did: ast::def_id) -> ValueRef {
if did.crate == ast::local_crate {
......@@ -517,8 +517,8 @@ fn get_vtable(ccx: @crate_ctxt, did: ast::def_id) -> ValueRef {
}
}
fn trans_cast(bcx: @block_ctxt, val: @ast::expr, id: ast::node_id, dest: dest)
-> @block_ctxt {
fn trans_cast(bcx: block, val: @ast::expr, id: ast::node_id, dest: dest)
-> block {
if dest == ignore { ret trans_expr(bcx, val, ignore); }
let ccx = bcx_ccx(bcx);
let v_ty = expr_ty(bcx, val);
......
......@@ -54,10 +54,10 @@ fn c_stack_tys(ccx: @crate_ctxt,
};
}
type shim_arg_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
type shim_arg_builder = fn(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) -> [ValueRef];
type shim_ret_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
type shim_ret_builder = fn(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef, llretval: ValueRef);
fn build_shim_fn_(ccx: @crate_ctxt,
......@@ -73,7 +73,7 @@ fn build_shim_fn_(ccx: @crate_ctxt,
// Declare the body of the shim function:
let fcx = new_fn_ctxt(ccx, [], llshimfn, none);
let bcx = new_top_block_ctxt(fcx, none);
let bcx = top_scope_block(fcx, none);
let lltop = bcx.llbb;
let llargbundle = llvm::LLVMGetParam(llshimfn, 0 as c_uint);
let llargvals = arg_builder(bcx, tys, llargbundle);
......@@ -90,11 +90,11 @@ fn build_shim_fn_(ccx: @crate_ctxt,
ret llshimfn;
}
type wrap_arg_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
type wrap_arg_builder = fn(bcx: block, tys: @c_stack_tys,
llwrapfn: ValueRef,
llargbundle: ValueRef);
type wrap_ret_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
type wrap_ret_builder = fn(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef);
fn build_wrap_fn_(ccx: @crate_ctxt,
......@@ -106,7 +106,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt,
ret_builder: wrap_ret_builder) {
let fcx = new_fn_ctxt(ccx, [], llwrapfn, none);
let bcx = new_top_block_ctxt(fcx, none);
let bcx = top_scope_block(fcx, none);
let lltop = bcx.llbb;
// Allocate the struct and write the arguments into it.
......@@ -122,7 +122,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt,
tie_up_header_blocks(fcx, lltop);
// Make sure our standard return block (that we didn't use) is terminated
let ret_cx = new_raw_block_ctxt(fcx, fcx.llreturn);
let ret_cx = raw_block(fcx, fcx.llreturn);
Unreachable(ret_cx);
}
......@@ -168,7 +168,7 @@ fn build_shim_fn(ccx: @crate_ctxt,
tys: @c_stack_tys,
cc: lib::llvm::CallConv) -> ValueRef {
fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_args(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) -> [ValueRef] {
let llargvals = [];
let i = 0u;
......@@ -181,7 +181,7 @@ fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
ret llargvals;
}
fn build_ret(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef, llretval: ValueRef) {
if tys.ret_def {
let n = vec::len(tys.arg_tys);
......@@ -210,7 +210,7 @@ fn build_wrap_fn(ccx: @crate_ctxt,
llshimfn: ValueRef,
llwrapfn: ValueRef) {
fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_args(bcx: block, tys: @c_stack_tys,
llwrapfn: ValueRef, llargbundle: ValueRef,
num_tps: uint) {
let i = 0u, n = vec::len(tys.arg_tys);
......@@ -226,7 +226,7 @@ fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
}
fn build_ret(bcx: @block_ctxt, _tys: @c_stack_tys,
fn build_ret(bcx: block, _tys: @c_stack_tys,
_llargbundle: ValueRef) {
RetVoid(bcx);
}
......@@ -283,7 +283,7 @@ fn build_rust_fn(ccx: @crate_ctxt, path: ast_map::path,
fn build_shim_fn(ccx: @crate_ctxt, path: ast_map::path,
llrustfn: ValueRef, tys: @c_stack_tys) -> ValueRef {
fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_args(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) -> [ValueRef] {
let llargvals = [];
let i = 0u;
......@@ -300,7 +300,7 @@ fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
ret llargvals;
}
fn build_ret(_bcx: @block_ctxt, _tys: @c_stack_tys,
fn build_ret(_bcx: block, _tys: @c_stack_tys,
_llargbundle: ValueRef, _llretval: ValueRef) {
// Nop. The return pointer in the Rust ABI function
// is wired directly into the return slot in the shim struct
......@@ -316,7 +316,7 @@ fn build_ret(_bcx: @block_ctxt, _tys: @c_stack_tys,
fn build_wrap_fn(ccx: @crate_ctxt, llshimfn: ValueRef,
llwrapfn: ValueRef, tys: @c_stack_tys) {
fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_args(bcx: block, tys: @c_stack_tys,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let llretptr = alloca(bcx, tys.ret_ty);
let i = 0u, n = vec::len(tys.arg_tys);
......@@ -329,7 +329,7 @@ fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
}
fn build_ret(bcx: @block_ctxt, tys: @c_stack_tys,
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) {
let n = vec::len(tys.arg_tys);
let llretval = load_inbounds(bcx, llargbundle, [0, n as int]);
......
......@@ -8,7 +8,7 @@
import trans::base;
import middle::trans::common::{crate_ctxt, val_ty, C_bytes, C_int,
C_named_struct, C_struct, T_enum_variant,
block_ctxt, result, rslt, bcx_ccx, bcx_tcx,
block, result, rslt, bcx_ccx, bcx_tcx,
type_has_static_size, umax, umin, align_to,
tydesc_info};
import back::abi;
......@@ -593,19 +593,19 @@ fn gen_shape_tables(ccx: @crate_ctxt) {
// compute sizeof / alignof
type metrics = {
bcx: @block_ctxt,
bcx: block,
sz: ValueRef,
align: ValueRef
};
type tag_metrics = {
bcx: @block_ctxt,
bcx: block,
sz: ValueRef,
align: ValueRef,
payload_align: ValueRef
};
fn size_of(bcx: @block_ctxt, t: ty::t) -> result {
fn size_of(bcx: block, t: ty::t) -> result {
let ccx = bcx_ccx(bcx);
if check type_has_static_size(ccx, t) {
rslt(bcx, llsize_of(ccx, base::type_of(ccx, t)))
......@@ -615,7 +615,7 @@ fn size_of(bcx: @block_ctxt, t: ty::t) -> result {
}
}
fn align_of(bcx: @block_ctxt, t: ty::t) -> result {
fn align_of(bcx: block, t: ty::t) -> result {
let ccx = bcx_ccx(bcx);
if check type_has_static_size(ccx, t) {
rslt(bcx, llalign_of(ccx, base::type_of(ccx, t)))
......@@ -625,7 +625,7 @@ fn align_of(bcx: @block_ctxt, t: ty::t) -> result {
}
}
fn metrics(bcx: @block_ctxt, t: ty::t) -> metrics {
fn metrics(bcx: block, t: ty::t) -> metrics {
let ccx = bcx_ccx(bcx);
if check type_has_static_size(ccx, t) {
let llty = base::type_of(ccx, t);
......@@ -688,8 +688,8 @@ fn static_size_of_enum(cx: @crate_ctxt, t: ty::t)
}
}
fn dynamic_metrics(cx: @block_ctxt, t: ty::t) -> metrics {
fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> metrics {
fn dynamic_metrics(cx: block, t: ty::t) -> metrics {
fn align_elements(cx: block, elts: [ty::t]) -> metrics {
//
// C padding rules:
//
......@@ -736,7 +736,7 @@ fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> metrics {
let bcx = cx;
let ccx = bcx_ccx(bcx);
let compute_max_variant_size = fn@(bcx: @block_ctxt) -> result {
let compute_max_variant_size = fn@(bcx: block) -> result {
// Compute max(variant sizes).
let bcx = bcx;
let max_size: ValueRef = C_int(ccx, 0);
......@@ -799,7 +799,7 @@ fn simplifier(tcx: ty::ctxt, typ: ty::t) -> ty::t {
}
// Given a tag type `ty`, returns the offset of the payload.
//fn tag_payload_offs(bcx: @block_ctxt, tag_id: ast::def_id, tps: [ty::t])
//fn tag_payload_offs(bcx: block, tag_id: ast::def_id, tps: [ty::t])
// -> ValueRef {
// alt tag_kind(tag_id) {
// tk_unit | tk_enum | tk_newtype { C_int(bcx_ccx(bcx), 0) }
......
......@@ -4,28 +4,28 @@
import back::abi;
import base::{call_memmove, trans_shared_malloc, type_of_or_i8,
INIT, copy_val, load_if_immediate, get_tydesc,
new_sub_block_ctxt, do_spill_noroot,
sub_block, do_spill_noroot,
dest};
import shape::{llsize_of, size_of};
import build::*;
import common::*;
fn get_fill(bcx: @block_ctxt, vptr: ValueRef) -> ValueRef {
fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef {
Load(bcx, GEPi(bcx, vptr, [0, abi::vec_elt_fill]))
}
fn get_dataptr(bcx: @block_ctxt, vptr: ValueRef, unit_ty: TypeRef)
fn get_dataptr(bcx: block, vptr: ValueRef, unit_ty: TypeRef)
-> ValueRef {
let ptr = GEPi(bcx, vptr, [0, abi::vec_elt_elems]);
PointerCast(bcx, ptr, T_ptr(unit_ty))
}
fn pointer_add(bcx: @block_ctxt, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
let old_ty = val_ty(ptr);
let bptr = PointerCast(bcx, ptr, T_ptr(T_i8()));
ret PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty);
}
fn alloc_raw(bcx: @block_ctxt, fill: ValueRef, alloc: ValueRef) -> result {
fn alloc_raw(bcx: block, fill: ValueRef, alloc: ValueRef) -> result {
let ccx = bcx_ccx(bcx);
let llvecty = ccx.opaque_vec_type;
let vecsize = Add(bcx, alloc, llsize_of(ccx, llvecty));
......@@ -37,13 +37,13 @@ fn alloc_raw(bcx: @block_ctxt, fill: ValueRef, alloc: ValueRef) -> result {
}
type alloc_result =
{bcx: @block_ctxt,
{bcx: block,
val: ValueRef,
unit_ty: ty::t,
llunitsz: ValueRef,
llunitty: TypeRef};
fn alloc(bcx: @block_ctxt, vec_ty: ty::t, elts: uint) -> alloc_result {
fn alloc(bcx: block, vec_ty: ty::t, elts: uint) -> alloc_result {
let ccx = bcx_ccx(bcx);
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let llunitty = type_of_or_i8(ccx, unit_ty);
......@@ -66,7 +66,7 @@ fn alloc(bcx: @block_ctxt, vec_ty: ty::t, elts: uint) -> alloc_result {
llunitty: llunitty};
}
fn duplicate(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) -> result {
fn duplicate(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> result {
let ccx = bcx_ccx(bcx);
let fill = get_fill(bcx, vptr);
let size = Add(bcx, fill, llsize_of(ccx, ccx.opaque_vec_type));
......@@ -80,23 +80,19 @@ fn duplicate(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) -> result {
}
ret rslt(bcx, newptr);
}
fn make_free_glue(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) ->
@block_ctxt {
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let drop_cx = new_sub_block_ctxt(bcx, "drop");
let next_cx = new_sub_block_ctxt(bcx, "next");
let null_test = IsNull(bcx, vptr);
CondBr(bcx, null_test, next_cx.llbb, drop_cx.llbb);
if ty::type_needs_drop(bcx_tcx(bcx), unit_ty) {
drop_cx = iter_vec(drop_cx, vptr, vec_ty, base::drop_ty);
fn make_free_glue(bcx: block, vptr: ValueRef, vec_ty: ty::t) ->
block {
let tcx = bcx_tcx(bcx), unit_ty = ty::sequence_element_type(tcx, vec_ty);
base::with_cond(bcx, IsNotNull(bcx, vptr)) {|bcx|
let bcx = if ty::type_needs_drop(tcx, unit_ty) {
iter_vec(bcx, vptr, vec_ty, base::drop_ty)
} else { bcx };
base::trans_shared_free(bcx, vptr)
}
drop_cx = base::trans_shared_free(drop_cx, vptr);
Br(drop_cx, next_cx.llbb);
ret next_cx;
}
fn trans_vec(bcx: @block_ctxt, args: [@ast::expr], id: ast::node_id,
dest: dest) -> @block_ctxt {
fn trans_vec(bcx: block, args: [@ast::expr], id: ast::node_id,
dest: dest) -> block {
let ccx = bcx_ccx(bcx), bcx = bcx;
if dest == base::ignore {
for arg in args {
......@@ -129,7 +125,7 @@ fn trans_vec(bcx: @block_ctxt, args: [@ast::expr], id: ast::node_id,
ret base::store_in_dest(bcx, vptr, dest);
}
fn trans_str(bcx: @block_ctxt, s: str, dest: dest) -> @block_ctxt {
fn trans_str(bcx: block, s: str, dest: dest) -> block {
let veclen = str::len_bytes(s) + 1u; // +1 for \0
let {bcx: bcx, val: sptr, _} =
alloc(bcx, ty::mk_str(bcx_tcx(bcx)), veclen);
......@@ -142,8 +138,8 @@ fn trans_str(bcx: @block_ctxt, s: str, dest: dest) -> @block_ctxt {
ret base::store_in_dest(bcx, sptr, dest);
}
fn trans_append(cx: @block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
rhs: ValueRef) -> @block_ctxt {
fn trans_append(cx: block, vec_ty: ty::t, lhsptr: ValueRef,
rhs: ValueRef) -> block {
// Cast to opaque interior vector types if necessary.
let ccx = bcx_ccx(cx);
let unit_ty = ty::sequence_element_type(bcx_tcx(cx), vec_ty);
......@@ -206,8 +202,8 @@ fn trans_append(cx: @block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
ret bcx;
}
fn trans_append_literal(bcx: @block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
vals: [@ast::expr]) -> @block_ctxt {
fn trans_append_literal(bcx: block, vptrptr: ValueRef, vec_ty: ty::t,
vals: [@ast::expr]) -> block {
let ccx = bcx_ccx(bcx);
let elt_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let ti = none;
......@@ -227,8 +223,8 @@ fn trans_append_literal(bcx: @block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
ret bcx;
}
fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
rhs: ValueRef, dest: dest) -> @block_ctxt {
fn trans_add(bcx: block, vec_ty: ty::t, lhs: ValueRef,
rhs: ValueRef, dest: dest) -> block {
let ccx = bcx_ccx(bcx);
let strings = alt ty::get(vec_ty).struct {
ty::ty_str { true }
......@@ -247,8 +243,8 @@ fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
let write_ptr_ptr = do_spill_noroot
(bcx, get_dataptr(bcx, new_vec_ptr, llunitty));
let copy_fn = fn@(bcx: @block_ctxt, addr: ValueRef,
_ty: ty::t) -> @block_ctxt {
let copy_fn = fn@(bcx: block, addr: ValueRef,
_ty: ty::t) -> block {
let ccx = bcx_ccx(bcx);
let write_ptr = Load(bcx, write_ptr_ptr);
let bcx = copy_val(bcx, INIT, write_ptr,
......@@ -269,12 +265,12 @@ fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
ret base::store_in_dest(bcx, new_vec_ptr, dest);
}
type val_and_ty_fn = fn@(@block_ctxt, ValueRef, ty::t) -> result;
type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> result;
type iter_vec_block = fn(@block_ctxt, ValueRef, ty::t) -> @block_ctxt;
type iter_vec_block = fn(block, ValueRef, ty::t) -> block;
fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
fill: ValueRef, f: iter_vec_block) -> @block_ctxt {
fn iter_vec_raw(bcx: block, vptr: ValueRef, vec_ty: ty::t,
fill: ValueRef, f: iter_vec_block) -> block {
let ccx = bcx_ccx(bcx);
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let llunitty = type_of_or_i8(ccx, unit_ty);
......@@ -288,13 +284,13 @@ fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
let data_end_ptr = pointer_add(bcx, data_ptr, fill);
// Now perform the iteration.
let header_cx = new_sub_block_ctxt(bcx, "iter_vec_loop_header");
let header_cx = sub_block(bcx, "iter_vec_loop_header");
Br(bcx, header_cx.llbb);
let data_ptr = Phi(header_cx, val_ty(data_ptr), [data_ptr], [bcx.llbb]);
let not_yet_at_end =
ICmp(header_cx, lib::llvm::IntULT, data_ptr, data_end_ptr);
let body_cx = new_sub_block_ctxt(header_cx, "iter_vec_loop_body");
let next_cx = new_sub_block_ctxt(header_cx, "iter_vec_next");
let body_cx = sub_block(header_cx, "iter_vec_loop_body");
let next_cx = sub_block(header_cx, "iter_vec_next");
CondBr(header_cx, not_yet_at_end, body_cx.llbb, next_cx.llbb);
body_cx = f(body_cx, data_ptr, unit_ty);
let increment =
......@@ -307,8 +303,8 @@ fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
ret next_cx;
}
fn iter_vec(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
f: iter_vec_block) -> @block_ctxt {
fn iter_vec(bcx: block, vptr: ValueRef, vec_ty: ty::t,
f: iter_vec_block) -> block {
let ccx = bcx_ccx(bcx);
let vptr = PointerCast(bcx, vptr, T_ptr(ccx.opaque_vec_type));
ret iter_vec_raw(bcx, vptr, vec_ty, get_fill(bcx, vptr), f);
......
......@@ -2,31 +2,22 @@
import lib::llvm::ValueRef;
import common::*;
import build::*;
import base::{
trans_shared_malloc,
type_of,
INIT,
trans_shared_free,
drop_ty,
new_sub_block_ctxt,
load_if_immediate,
dest
};
import shape::{size_of};
import base::*;
import shape::size_of;
export trans_uniq, make_free_glue, autoderef, duplicate, alloc_uniq;
fn trans_uniq(bcx: @block_ctxt, contents: @ast::expr,
node_id: ast::node_id, dest: dest) -> @block_ctxt {
fn trans_uniq(bcx: block, contents: @ast::expr,
node_id: ast::node_id, dest: dest) -> block {
let uniq_ty = node_id_type(bcx, node_id);
let {bcx, val: llptr} = alloc_uniq(bcx, uniq_ty);
add_clean_free(bcx, llptr, true);
bcx = base::trans_expr_save_in(bcx, contents, llptr);
bcx = trans_expr_save_in(bcx, contents, llptr);
revoke_clean(bcx, llptr);
ret base::store_in_dest(bcx, llptr, dest);
ret store_in_dest(bcx, llptr, dest);
}
fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t) -> result {
fn alloc_uniq(cx: block, uniq_ty: ty::t) -> result {
let bcx = cx;
let contents_ty = content_ty(uniq_ty);
let r = size_of(bcx, contents_ty);
......@@ -42,19 +33,12 @@ fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t) -> result {
ret rslt(bcx, llptr);
}
fn make_free_glue(cx: @block_ctxt, vptr: ValueRef, t: ty::t)
-> @block_ctxt {
let bcx = cx;
let free_cx = new_sub_block_ctxt(bcx, "uniq_free");
let next_cx = new_sub_block_ctxt(bcx, "uniq_free_next");
let null_test = IsNull(bcx, vptr);
CondBr(bcx, null_test, next_cx.llbb, free_cx.llbb);
let bcx = free_cx;
let bcx = drop_ty(bcx, vptr, content_ty(t));
let bcx = trans_shared_free(bcx, vptr);
Br(bcx, next_cx.llbb);
next_cx
fn make_free_glue(bcx: block, vptr: ValueRef, t: ty::t)
-> block {
with_cond(bcx, IsNotNull(bcx, vptr)) {|bcx|
let bcx = drop_ty(bcx, vptr, content_ty(t));
trans_shared_free(bcx, vptr)
}
}
fn content_ty(t: ty::t) -> ty::t {
......@@ -69,12 +53,12 @@ fn autoderef(v: ValueRef, t: ty::t) -> {v: ValueRef, t: ty::t} {
ret {v: v, t: content_ty};
}
fn duplicate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
fn duplicate(bcx: block, v: ValueRef, t: ty::t) -> result {
let content_ty = content_ty(t);
let {bcx, val: llptr} = alloc_uniq(bcx, t);
let src = load_if_immediate(bcx, v, content_ty);
let dst = llptr;
let bcx = base::copy_val(bcx, INIT, dst, src, content_ty);
let bcx = copy_val(bcx, INIT, dst, src, content_ty);
ret rslt(bcx, dst);
}
\ No newline at end of file
......@@ -186,11 +186,8 @@ fn from_mut<T>(+v: [mutable T]) -> [T] unsafe {
Function: tail
Returns all but the first element of a vector
Predicates:
<is_not_empty> (v)
*/
fn tail<T: copy>(v: [const T]) : is_not_empty(v) -> [T] {
fn tail<T: copy>(v: [const T]) -> [T] {
ret slice(v, 1u, len(v));
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册