提交 8673c4f1 编写于 作者: M Marijn Haverbeke

Make ty::t type self-sufficient

It is now no longer needed to have a ty::ctxt to get at the contents
of a ty::t. The straight-forward approach of doing this, simply making
ty::t a box type, unfortunately killed our compiler performance (~15%
slower) through refcounting cost. Thus, this patch now represents
ty::t as an unsafe pointer, assuming that the ty::ctxt, which holds
these boxes alive, outlives any uses of the ty::t values. In the
current compiler this trivially holds, but it is does of course add a
new potential pitfall.

ty::get takes a ty::t and returns a boxed representation of the type.
I've changed calls to ty::struct(X) to do ty::get(X).struct. Type
structs are full of vectors, and copying them every time we wanted to
access them was a bit of a cost.
上级 6ed8d037
......@@ -113,7 +113,7 @@ fn doc_type(doc: ebml::doc, tcx: ty::ctxt, cdata: cmd) -> ty::t {
fn item_type(item: ebml::doc, tcx: ty::ctxt, cdata: cmd) -> ty::t {
let t = doc_type(item, tcx, cdata);
if family_names_type(item_family(item)) {
ty::mk_named(tcx, t, @item_name(item))
ty::mk_named(tcx, t, item_name(item))
} else { t }
}
......@@ -247,7 +247,7 @@ fn get_enum_variants(cdata: cmd, id: ast::node_id, tcx: ty::ctxt)
let ctor_ty = item_type(item, tcx, cdata);
let name = item_name(item);
let arg_tys: [ty::t] = [];
alt ty::struct(tcx, ctor_ty) {
alt ty::get(ctor_ty).struct {
ty::ty_fn(f) {
for a: ty::arg in f.inputs { arg_tys += [a.ty]; }
}
......@@ -302,7 +302,7 @@ fn get_iface_methods(cdata: cmd, id: ast::node_id, tcx: ty::ctxt)
let bounds = item_ty_param_bounds(mth, tcx, cdata);
let name = item_name(mth);
let ty = doc_type(mth, tcx, cdata);
let fty = alt ty::struct(tcx, ty) { ty::ty_fn(f) { f }
let fty = alt ty::get(ty).struct { ty::ty_fn(f) { f }
_ { tcx.sess.bug("get_iface_methods: id has non-function type");
} };
result += [{ident: name, tps: bounds, fty: fty}];
......
......@@ -353,7 +353,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item,
encode_def_id(ebml_w, local_def(ctor_id));
encode_family(ebml_w, 'y' as u8);
encode_type_param_bounds(ebml_w, ecx, tps);
encode_type(ecx, ebml_w, ty::ty_fn_ret(tcx, fn_ty));
encode_type(ecx, ebml_w, ty::ty_fn_ret(fn_ty));
encode_name(ebml_w, item.ident);
encode_symbol(ecx, ebml_w, item.id);
ebml::end_tag(ebml_w);
......
......@@ -299,7 +299,7 @@ fn parse_ty(st: @pstate, conv: conv_did) -> ty::t {
while peek(st) as char != '"' { str::push_byte(name, next(st)); }
st.pos = st.pos + 1u;
let inner = parse_ty(st, conv);
ty::mk_named(st.tcx, inner, @name)
ty::mk_named(st.tcx, inner, name)
}
c { #error("unexpected char in type string: %c", c); fail;}
}
......
......@@ -42,8 +42,7 @@ fn enc_ty(w: io::writer, cx: @ctxt, t: ty::t) {
some(s) { *s }
none {
let buf = io::mk_mem_buffer();
enc_sty(io::mem_buffer_writer(buf), cx,
ty::struct_raw(cx.tcx, t));
enc_sty(io::mem_buffer_writer(buf), cx, ty::get(t).struct);
cx.tcx.short_names_cache.insert(t, @io::mem_buffer_str(buf));
io::mem_buffer_str(buf)
}
......@@ -55,7 +54,15 @@ fn enc_ty(w: io::writer, cx: @ctxt, t: ty::t) {
some(a) { w.write_str(*a.s); ret; }
none {
let pos = w.tell();
enc_sty(w, cx, ty::struct_raw(cx.tcx, t));
alt ty::type_name(t) {
some(n) {
w.write_char('"');
w.write_str(n);
w.write_char('"');
}
_ {}
}
enc_sty(w, cx, ty::get(t).struct);
let end = w.tell();
let len = end - pos;
fn estimate_sz(u: uint) -> uint {
......@@ -185,14 +192,6 @@ fn enc_sty(w: io::writer, cx: @ctxt, st: ty::sty) {
for tc: @ty::type_constr in cs { enc_ty_constr(w, cx, tc); }
w.write_char(']');
}
ty::ty_named(t, name) {
if cx.abbrevs != ac_no_abbrevs {
w.write_char('"');
w.write_str(*name);
w.write_char('"');
}
enc_ty(w, cx, t);
}
}
}
fn enc_proto(w: io::writer, proto: proto) {
......
......@@ -80,7 +80,7 @@ fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
id: ast::node_id, sc: scope, v: vt<scope>) {
visit::visit_fn_decl(decl, sc, v);
let fty = ty::node_id_to_type(cx.tcx, id);
let args = ty::ty_fn_args(cx.tcx, fty);
let args = ty::ty_fn_args(fty);
for arg in args {
alt ty::resolved_mode(cx.tcx, arg.mode) {
ast::by_val if ty::type_has_dynamic_size(cx.tcx, arg.ty) {
......@@ -92,7 +92,7 @@ fn visit_fn(cx: @ctx, _fk: visit::fn_kind, decl: ast::fn_decl,
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
let proto = ty::ty_fn_proto(fty);
alt proto {
ast::proto_block | ast::proto_any {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
......@@ -221,7 +221,7 @@ fn cant_copy(cx: ctx, b: binding) -> bool {
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let arg_ts = ty::ty_fn_args(fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
......@@ -371,7 +371,7 @@ fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
alt ty::get(seq_t).struct {
ty::ty_vec(mt) {
if mt.mut != ast::imm {
cur_mut = some(contains(seq_t));
......@@ -510,7 +510,7 @@ fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
alt ty::get(haystack).struct {
ty::ty_enum(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
......@@ -565,7 +565,7 @@ fn local_id_of_node(cx: ctx, id: node_id) -> uint {
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ret alt ty::get(ty).struct {
ty::ty_nil | ty::ty_bot | ty::ty_bool | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type |
ty::ty_ptr(_) { 1u }
......@@ -623,7 +623,7 @@ fn walk(tcx: ty::ctxt, mut: option<unsafe_ty>, pat: @ast::pat,
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
let m = alt ty::get(ty).struct {
ty::ty_box(mt) { mt.mut != ast::imm }
_ { tcx.sess.span_bug(pat.span, "box pat has non-box type"); }
},
......@@ -632,7 +632,7 @@ fn walk(tcx: ty::ctxt, mut: option<unsafe_ty>, pat: @ast::pat,
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
let m = alt ty::get(ty).struct {
ty::ty_uniq(mt) { mt.mut != ast::imm }
_ { tcx.sess.span_bug(pat.span, "uniq pat has non-uniq type"); }
},
......
......@@ -13,7 +13,7 @@ fn check_crate(tcx: ty::ctxt, crate: @crate) {
fn visit_expr(ex: @expr, cx: ctx, v: visit::vt<ctx>) {
if !cx.allow_block {
alt ty::struct(cx.tcx, ty::expr_ty(cx.tcx, ex)) {
alt ty::get(ty::expr_ty(cx.tcx, ex)).struct {
ty::ty_fn({proto: p, _}) if is_blockish(p) {
cx.tcx.sess.span_err(ex.span, "expressions with block type \
can only appear in callee or (by-ref) argument position");
......@@ -27,7 +27,7 @@ fn visit_expr(ex: @expr, cx: ctx, v: visit::vt<ctx>) {
cx.allow_block = true;
v.visit_expr(f, cx, v);
let i = 0u;
for arg_t in ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f)) {
for arg_t in ty::ty_fn_args(ty::expr_ty(cx.tcx, f)) {
cx.allow_block = (ty::arg_mode(cx.tcx, arg_t) == by_ref);
v.visit_expr(args[i], cx, v);
i += 1u;
......
......@@ -73,7 +73,7 @@ fn check_exhaustive(tcx: ty::ctxt, sp:span, scrut_ty:ty::t, pats:[@pat]) {
/* Otherwise, get the list of variants and make sure each one is
represented. Then recurse on the columns. */
let ty_def_id = alt ty::struct(tcx, scrut_ty) {
let ty_def_id = alt ty::get(scrut_ty).struct {
ty_enum(id, _) { id }
_ { ret; } };
......
......@@ -280,7 +280,7 @@ fn create_basic_type(cx: @crate_ctxt, t: ty::t, ty: @ast::ty)
let cache = get_cache(cx);
let tg = BasicTypeDescriptorTag;
alt cached_metadata::<@metadata<tydesc_md>>(
cache, tg, {|md| t == md.data.hash}) {
cache, tg, {|md| ty::type_id(t) == md.data.hash}) {
option::some(md) { ret md; }
option::none {}
}
......@@ -325,7 +325,7 @@ fn create_basic_type(cx: @crate_ctxt, t: ty::t, ty: @ast::ty)
lli32(0), //XXX flags?
lli32(encoding)];
let llnode = llmdnode(lldata);
let mdval = @{node: llnode, data: {hash: t}};
let mdval = @{node: llnode, data: {hash: ty::type_id(t)}};
update_cache(cache, tg, tydesc_metadata(mdval));
add_named_metadata(cx, "llvm.dbg.ty", llnode);
ret mdval;
......@@ -347,7 +347,7 @@ fn create_pointer_type(cx: @crate_ctxt, t: ty::t, span: span,
//let cu_node = create_compile_unit(cx, fname);
let llnode = create_derived_type(tg, file_node.node, "", 0, size * 8,
align * 8, 0, pointee.node);
let mdval = @{node: llnode, data: {hash: t}};
let mdval = @{node: llnode, data: {hash: ty::type_id(t)}};
//update_cache(cache, tg, tydesc_metadata(mdval));
add_named_metadata(cx, "llvm.dbg.ty", llnode);
ret mdval;
......@@ -420,7 +420,7 @@ fn create_record(cx: @crate_ctxt, t: ty::t, fields: [ast::ty_field],
line_from_span(cx.sess.codemap, field.span) as int,
size as int, align as int, ty_md.node);
}
let mdval = @{node: finish_structure(scx), data:{hash: t}};
let mdval = @{node: finish_structure(scx), data:{hash: ty::type_id(t)}};
ret mdval;
}
......@@ -448,7 +448,7 @@ fn create_boxed_type(cx: @crate_ctxt, outer: ty::t, _inner: ty::t,
8, //XXX just a guess
boxed.node);
let llnode = finish_structure(scx);
let mdval = @{node: llnode, data: {hash: outer}};
let mdval = @{node: llnode, data: {hash: ty::type_id(outer)}};
//update_cache(cache, tg, tydesc_metadata(mdval));
add_named_metadata(cx, "llvm.dbg.ty", llnode);
ret mdval;
......@@ -507,7 +507,7 @@ fn create_vec(cx: @crate_ctxt, vec_t: ty::t, elem_t: ty::t,
add_member(scx, "data", 0, 0, // clang says the size should be 0
sys::align_of::<u8>() as int, data_ptr);
let llnode = finish_structure(scx);
ret @{node: llnode, data: {hash: vec_t}};
ret @{node: llnode, data: {hash: ty::type_id(vec_t)}};
}
fn member_size_and_align(ty: @ast::ty) -> (int, int) {
......@@ -561,7 +561,7 @@ fn create_ty(cx: @crate_ctxt, t: ty::t, ty: @ast::ty)
}*/
fn t_to_ty(cx: @crate_ctxt, t: ty::t, span: span) -> @ast::ty {
let ty = alt ty::struct(ccx_tcx(cx), t) {
let ty = alt ty::get(t).struct {
ty::ty_nil { ast::ty_nil }
ty::ty_bot { ast::ty_bot }
ty::ty_bool { ast::ty_bool }
......@@ -593,7 +593,7 @@ fn t_to_ty(cx: @crate_ctxt, t: ty::t, span: span) -> @ast::ty {
alt ty.node {
ast::ty_box(mt) {
let inner_t = alt ty::struct(ccx_tcx(cx), t) {
let inner_t = alt ty::get(t).struct {
ty::ty_box(boxed) { boxed.ty }
_ { cx.tcx.sess.span_bug(ty.span, "t_to_ty was incoherent"); }
};
......@@ -603,7 +603,7 @@ fn t_to_ty(cx: @crate_ctxt, t: ty::t, span: span) -> @ast::ty {
}
ast::ty_uniq(mt) {
let inner_t = alt ty::struct(ccx_tcx(cx), t) {
let inner_t = alt ty::get(t).struct {
ty::ty_uniq(boxed) { boxed.ty }
// Hoping we'll have a way to eliminate this check soon.
_ { cx.tcx.sess.span_bug(ty.span, "t_to_ty was incoherent"); }
......
......@@ -31,7 +31,7 @@ fn fn_usage_expr(expr: @ast::expr,
}
if !ctx.generic_bare_fn_legal
&& ty::expr_has_ty_params(ctx.tcx, expr) {
alt ty::struct(ctx.tcx, ty::expr_ty(ctx.tcx, expr)) {
alt ty::get(ty::expr_ty(ctx.tcx, expr)).struct {
ty::ty_fn({proto: ast::proto_bare, _}) {
ctx.tcx.sess.span_fatal(
expr.span,
......
......@@ -60,7 +60,7 @@ fn check_crate(tcx: ty::ctxt, method_map: typeck::method_map,
fn with_appropriate_checker(cx: ctx, id: node_id,
b: fn(fn@(ctx, ty::t, sp: span))) {
let fty = ty::node_id_to_type(cx.tcx, id);
alt ty::ty_fn_proto(cx.tcx, fty) {
alt ty::ty_fn_proto(fty) {
proto_uniq { b(check_send); }
proto_box { b(check_copy); }
proto_bare { b(check_none); }
......@@ -142,9 +142,10 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) {
some(ex) {
// All noncopyable fields must be overridden
let t = ty::expr_ty(cx.tcx, ex);
let ty_fields = alt ty::struct(cx.tcx, t) { ty::ty_rec(f) { f }
_ { cx.tcx.sess.span_bug(ex.span,
"Bad expr type in record"); } };
let ty_fields = alt ty::get(t).struct {
ty::ty_rec(f) { f }
_ { cx.tcx.sess.span_bug(ex.span, "Bad expr type in record"); }
};
for tf in ty_fields {
if !vec::any(fields, {|f| f.node.ident == tf.ident}) &&
!ty::kind_can_be_copied(ty::type_kind(cx.tcx, tf.mt.ty)) {
......@@ -164,7 +165,7 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) {
}
expr_call(f, args, _) {
let i = 0u;
for arg_t in ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f)) {
for arg_t in ty::ty_fn_args(ty::expr_ty(cx.tcx, f)) {
alt ty::arg_mode(cx.tcx, arg_t) {
by_copy { maybe_copy(cx, args[i]); }
by_ref | by_val | by_mut_ref | by_move { }
......@@ -242,7 +243,7 @@ fn check_copy_ex(cx: ctx, ex: @expr, _warn: bool) {
check_copy(cx, ty, ex.span);
// FIXME turn this on again once vector types are no longer unique.
// Right now, it is too annoying to be useful.
/* if warn && ty::type_is_unique(cx.tcx, ty) {
/* if warn && ty::type_is_unique(ty) {
cx.tcx.sess.span_warn(ex.span, "copying a unique value");
}*/
}
......
......@@ -65,7 +65,7 @@ fn find_last_uses(c: @crate, def_map: resolve::def_map,
}
fn ex_is_blockish(cx: ctx, id: node_id) -> bool {
alt ty::struct(cx.tcx, ty::node_id_to_type(cx.tcx, id)) {
alt ty::get(ty::node_id_to_type(cx.tcx, id)).struct {
ty::ty_fn({proto: p, _}) if is_blockish(p) { true }
_ { false }
}
......@@ -147,7 +147,7 @@ fn visit_expr(ex: @expr, cx: ctx, v: visit::vt<ctx>) {
expr_call(f, args, _) {
v.visit_expr(f, cx, v);
let i = 0u, fns = [];
let arg_ts = ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f));
let arg_ts = ty::ty_fn_args(ty::expr_ty(cx.tcx, f));
for arg in args {
alt arg.node {
expr_fn(p, _, _, _) if is_blockish(p) {
......@@ -175,7 +175,7 @@ fn visit_fn(fk: visit::fn_kind, decl: fn_decl, body: blk,
sp: span, id: node_id,
cx: ctx, v: visit::vt<ctx>) {
let fty = ty::node_id_to_type(cx.tcx, id);
let proto = ty::ty_fn_proto(cx.tcx, fty);
let proto = ty::ty_fn_proto(fty);
alt proto {
proto_any | proto_block {
visit_block(func, cx, {||
......
......@@ -18,7 +18,7 @@ fn expr_root(tcx: ty::ctxt, ex: @expr, autoderef: bool) ->
fn maybe_auto_unbox(tcx: ty::ctxt, t: ty::t) -> {t: ty::t, ds: [deref]} {
let ds = [], t = t;
while true {
alt ty::struct(tcx, t) {
alt ty::get(t).struct {
ty::ty_box(mt) {
ds += [@{mut: mt.mut == mut, kind: unbox(false), outer_t: t}];
t = mt.ty;
......@@ -51,7 +51,7 @@ fn maybe_auto_unbox(tcx: ty::ctxt, t: ty::t) -> {t: ty::t, ds: [deref]} {
expr_field(base, ident, _) {
let auto_unbox = maybe_auto_unbox(tcx, ty::expr_ty(tcx, base));
let is_mut = false;
alt ty::struct(tcx, auto_unbox.t) {
alt ty::get(auto_unbox.t).struct {
ty::ty_rec(fields) {
for fld: ty::field in fields {
if str::eq(ident, fld.ident) {
......@@ -68,7 +68,7 @@ fn maybe_auto_unbox(tcx: ty::ctxt, t: ty::t) -> {t: ty::t, ds: [deref]} {
}
expr_index(base, _) {
let auto_unbox = maybe_auto_unbox(tcx, ty::expr_ty(tcx, base));
alt ty::struct(tcx, auto_unbox.t) {
alt ty::get(auto_unbox.t).struct {
ty::ty_vec(mt) {
ds +=
[@{mut: mt.mut == mut,
......@@ -87,7 +87,7 @@ fn maybe_auto_unbox(tcx: ty::ctxt, t: ty::t) -> {t: ty::t, ds: [deref]} {
if op == deref {
let base_t = ty::expr_ty(tcx, base);
let is_mut = false, ptr = false;
alt ty::struct(tcx, base_t) {
alt ty::get(base_t).struct {
ty::ty_box(mt) { is_mut = mt.mut == mut; }
ty::ty_uniq(mt) { is_mut = mt.mut == mut; }
ty::ty_res(_, _, _) { }
......@@ -225,7 +225,7 @@ fn check_move_rhs(cx: @ctx, src: @expr) {
}
fn check_call(cx: @ctx, f: @expr, args: [@expr]) {
let arg_ts = ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f));
let arg_ts = ty::ty_fn_args(ty::expr_ty(cx.tcx, f));
let i = 0u;
for arg_t: ty::arg in arg_ts {
alt ty::resolved_mode(cx.tcx, arg_t.mode) {
......@@ -238,7 +238,7 @@ fn check_call(cx: @ctx, f: @expr, args: [@expr]) {
}
fn check_bind(cx: @ctx, f: @expr, args: [option<@expr>]) {
let arg_ts = ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f));
let arg_ts = ty::ty_fn_args(ty::expr_ty(cx.tcx, f));
let i = 0u;
for arg in args {
alt arg {
......@@ -277,7 +277,7 @@ fn is_immutable_def(cx: @ctx, def: def) -> option<str> {
def_self(_) { some("self argument") }
def_upvar(_, inner, node_id) {
let ty = ty::node_id_to_type(cx.tcx, node_id);
let proto = ty::ty_fn_proto(cx.tcx, ty);
let proto = ty::ty_fn_proto(ty);
ret alt proto {
proto_any | proto_block { is_immutable_def(cx, *inner) }
_ { some("upvar") }
......
......@@ -76,7 +76,7 @@ fn hash_res_info(ri: res_info) -> uint {
h *= 33u;
h += ri.did.node as uint;
h *= 33u;
h += ri.t as uint;
h += ty::type_id(ri.t);
ret h;
}
......@@ -121,7 +121,7 @@ fn largest_variants(ccx: @crate_ctxt, tag_id: ast::def_id) -> [uint] {
let bounded = true;
let {a: min_size, b: min_align} = {a: 0u, b: 0u};
for elem_t: ty::t in variant.args {
if ty::type_contains_params(ccx.tcx, elem_t) {
if ty::type_has_params(elem_t) {
// TODO: We could do better here; this causes us to
// conservatively assume that (int, T) has minimum size 0,
// when in fact it has minimum size sizeof(int).
......@@ -319,7 +319,7 @@ fn add_substr(&dest: [u8], src: [u8]) {
fn shape_of(ccx: @crate_ctxt, t: ty::t, ty_param_map: [uint]) -> [u8] {
let s = [];
alt ty::struct(ccx.tcx, t) {
alt ty::get(t).struct {
ty::ty_nil | ty::ty_bool | ty::ty_uint(ast::ty_u8) |
ty::ty_bot { s += [shape_u8]; }
ty::ty_int(ast::ty_i) { s += [s_int(ccx.tcx)]; }
......@@ -447,7 +447,7 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t, ty_param_map: [uint]) -> [u8] {
ty::ty_constr(inner_t, _) {
s += shape_of(ccx, inner_t, ty_param_map);
}
ty::ty_var(_) | ty::ty_named(_, _) | ty::ty_self(_) {
ty::ty_var(_) | ty::ty_self(_) {
ccx.tcx.sess.bug("shape_of: unexpected type struct found");
}
}
......@@ -664,7 +664,7 @@ fn llalign_of(cx: @crate_ctxt, t: TypeRef) -> ValueRef {
fn static_size_of_enum(cx: @crate_ctxt, t: ty::t)
: type_has_static_size(cx, t) -> uint {
if cx.enum_sizes.contains_key(t) { ret cx.enum_sizes.get(t); }
alt ty::struct(cx.tcx, t) {
alt ty::get(t).struct {
ty::ty_enum(tid, subtys) {
// Compute max(variant sizes).
......@@ -719,7 +719,7 @@ fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> metrics {
ret { bcx: bcx, sz: off, align: max_align };
}
alt ty::struct(bcx_tcx(cx), t) {
alt ty::get(t).struct {
ty::ty_param(p, _) {
let ti = none::<@tydesc_info>;
let {bcx, val: tydesc} = base::get_tydesc(cx, t, false, ti).result;
......@@ -783,7 +783,7 @@ fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> metrics {
// types.
fn simplify_type(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
fn simplifier(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
alt ty::struct(ccx.tcx, typ) {
alt ty::get(typ).struct {
ty::ty_box(_) | ty::ty_iface(_, _) {
ret ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx));
}
......
......@@ -428,8 +428,6 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
let rec_vals = [];
for field_name: ast::ident in rec_fields {
let ix = option::get(ty::field_idx(field_name, fields));
// not sure how to get rid of this check
check type_is_tup_like(bcx, rec_ty);
let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
rec_vals += [r.val];
bcx = r.bcx;
......@@ -441,18 +439,12 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
if any_tup_pat(m, col) {
let tup_ty = ty::node_id_to_type(ccx.tcx, pat_id);
let n_tup_elts =
alt ty::struct(ccx.tcx, tup_ty) {
ty::ty_tup(elts) { vec::len(elts) }
_ {
ccx.sess.bug("Non-tuple type in tuple\
pattern");
}
};
let n_tup_elts = alt ty::get(tup_ty).struct {
ty::ty_tup(elts) { vec::len(elts) }
_ { ccx.sess.bug("Non-tuple type in tuple pattern"); }
};
let tup_vals = [], i = 0u;
while i < n_tup_elts {
// how to get rid of this check?
check type_is_tup_like(bcx, tup_ty);
let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
tup_vals += [r.val];
bcx = r.bcx;
......@@ -500,11 +492,8 @@ enum branch_kind { no_branch, single, switch, compare, }
lit(l) {
test_val = Load(bcx, val);
let pty = ty::node_id_to_type(ccx.tcx, pat_id);
kind = if ty::type_is_integral(ccx.tcx, pty) {
switch
} else {
compare
};
kind = if ty::type_is_integral(pty) { switch }
else { compare };
}
range(_, _) {
test_val = Load(bcx, val);
......@@ -729,7 +718,6 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
// FIXME: Could constrain pat_bind to make this
// check unnecessary.
check (type_has_static_size(ccx, ty));
check non_ty_var(ccx, ty);
let llty = base::type_of(ccx, ty);
let alloc = base::alloca(bcx, llty);
bcx = base::copy_val(bcx, base::INIT, alloc,
......@@ -758,7 +746,6 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
for f: ast::field_pat in fields {
let ix = option::get(ty::field_idx(f.ident, rec_fields));
// how to get rid of this check?
check type_is_tup_like(bcx, rec_ty);
let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, make_copy);
}
......@@ -767,8 +754,6 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
let tup_ty = node_id_type(bcx, pat.id);
let i = 0u;
for elem in elems {
// how to get rid of this check?
check type_is_tup_like(bcx, tup_ty);
let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
bcx = bind_irrefutable_pat(r.bcx, elem, r.val, make_copy);
i += 1u;
......
此差异已折叠。
......@@ -202,7 +202,6 @@ fn store_uniq_tydesc(bcx: @block_ctxt,
}
ty::ck_uniq {
let uniq_cbox_ty = mk_tuplified_uniq_cbox_ty(tcx, cdata_ty);
check uniq::type_is_unique_box(bcx, uniq_cbox_ty);
let {bcx, val: box} = uniq::alloc_uniq(bcx, uniq_cbox_ty);
nuke_ref_count(bcx, box);
let bcx = store_uniq_tydesc(bcx, cdata_ty, box, ti);
......@@ -279,7 +278,6 @@ fn maybe_clone_tydesc(bcx: @block_ctxt,
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm});
let llbox = cast_if_we_can(bcx, llbox, cboxptr_ty);
check type_is_tup_like(bcx, cbox_ty);
// If necessary, copy tydescs describing type parameters into the
// appropriate slot in the closure.
......@@ -308,11 +306,9 @@ fn maybe_clone_tydesc(bcx: @block_ctxt,
ev_to_str(ccx, bv)));
}
let bound_data = GEP_tup_like_1(bcx, cbox_ty, llbox,
[0,
abi::box_field_body,
abi::closure_body_bindings,
i as int]);
let bound_data = GEP_tup_like(bcx, cbox_ty, llbox,
[0, abi::box_field_body,
abi::closure_body_bindings, i as int]);
bcx = bound_data.bcx;
let bound_data = bound_data.val;
alt bv {
......@@ -404,7 +400,6 @@ fn load_environment(enclosing_cx: @block_ctxt,
// Populate the type parameters from the environment. We need to
// do this first because the tydescs are needed to index into
// the bindings if they are dynamically sized.
check type_is_tup_like(bcx, cdata_ty);
let {bcx, val: lltydescs} = GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_ty_params]);
let off = 0;
......@@ -429,7 +424,6 @@ fn load_environment(enclosing_cx: @block_ctxt,
alt cap_var.mode {
capture::cap_drop { /* ignore */ }
_ {
check type_is_tup_like(bcx, cdata_ty);
let upvarptr =
GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_bindings, i as int]);
......@@ -612,7 +606,7 @@ fn make_fn_glue(
}
};
ret alt ty::struct(tcx, t) {
ret alt ty::get(t).struct {
ty::ty_fn({proto: ast::proto_bare, _}) |
ty::ty_fn({proto: ast::proto_block, _}) |
ty::ty_fn({proto: ast::proto_any, _}) { bcx }
......@@ -742,7 +736,7 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// If we supported constraints on record fields, we could make the
// constraints for this function:
/*
: returns_non_ty_var(ccx, outgoing_fty),
: returns_non_ty_var(outgoing_fty),
type_has_static_size(ccx, incoming_fty) ->
*/
// but since we don't, we have to do the checks at the beginning.
......@@ -811,8 +805,6 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
(fptr, llvm::LLVMGetUndef(T_opaque_cbox_ptr(ccx)), 0)
}
none {
// Silly check
check type_is_tup_like(bcx, cdata_ty);
let {bcx: cx, val: pair} =
GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_bindings, 0]);
......@@ -830,16 +822,15 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// Get f's return type, which will also be the return type of the entire
// bind expression.
let outgoing_ret_ty = ty::ty_fn_ret(ccx.tcx, outgoing_fty);
let outgoing_ret_ty = ty::ty_fn_ret(outgoing_fty);
// Get the types of the arguments to f.
let outgoing_args = ty::ty_fn_args(ccx.tcx, outgoing_fty);
let outgoing_args = ty::ty_fn_args(outgoing_fty);
// The 'llretptr' that will arrive in the thunk we're creating also needs
// to be the correct type. Cast it to f's return type, if necessary.
let llretptr = fcx.llretptr;
if ty::type_contains_params(ccx.tcx, outgoing_ret_ty) {
check non_ty_var(ccx, outgoing_ret_ty);
if ty::type_has_params(outgoing_ret_ty) {
let llretty = type_of_inner(ccx, outgoing_ret_ty);
llretptr = PointerCast(bcx, llretptr, T_ptr(llretty));
}
......@@ -848,7 +839,6 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
let llargs: [ValueRef] = [llretptr, lltargetenv];
// Copy in the type parameters.
check type_is_tup_like(l_bcx, cdata_ty);
let {bcx: l_bcx, val: param_record} =
GEP_tup_like(l_bcx, cdata_ty, llcdata,
[0, abi::closure_body_ty_params]);
......@@ -888,8 +878,6 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// Arg provided at binding time; thunk copies it from
// closure.
some(e) {
// Silly check
check type_is_tup_like(bcx, cdata_ty);
let bound_arg =
GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_bindings, b]);
......@@ -911,7 +899,7 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// If the type is parameterized, then we need to cast the
// type we actually have to the parameterized out type.
if ty::type_contains_params(ccx.tcx, out_arg.ty) {
if ty::type_has_params(out_arg.ty) {
val = PointerCast(bcx, val, llout_arg_ty);
}
llargs += [val];
......@@ -921,7 +909,7 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
// Arg will be provided when the thunk is invoked.
none {
let arg: ValueRef = llvm::LLVMGetParam(llthunk, a as c_uint);
if ty::type_contains_params(ccx.tcx, out_arg.ty) {
if ty::type_has_params(out_arg.ty) {
arg = PointerCast(bcx, arg, llout_arg_ty);
}
llargs += [arg];
......
......@@ -234,7 +234,7 @@ fn add_clean_temp(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
fn do_drop(bcx: @block_ctxt, val: ValueRef, ty: ty::t) ->
@block_ctxt {
if ty::type_is_immediate(bcx_tcx(bcx), ty) {
if ty::type_is_immediate(ty) {
ret base::drop_ty_immediate(bcx, val, ty);
} else {
ret drop_ty(bcx, val, ty);
......@@ -298,8 +298,6 @@ fn get_res_dtor(ccx: @crate_ctxt, did: ast::def_id, inner_t: ty::t)
let param_bounds = ty::lookup_item_type(ccx.tcx, did).bounds;
let nil_res = ty::mk_nil(ccx.tcx);
// FIXME: Silly check -- mk_nil should have a postcondition
check non_ty_var(ccx, nil_res);
let fn_mode = ast::expl(ast::by_ref);
let f_t = type_of_fn(ccx, [{mode: fn_mode, ty: inner_t}],
nil_res, *param_bounds);
......@@ -856,23 +854,6 @@ fn C_shape(ccx: @crate_ctxt, bytes: [u8]) -> ValueRef {
!ty::type_has_dynamic_size(cx.tcx, t)
}
pure fn non_ty_var(cx: @crate_ctxt, t: ty::t) -> bool {
let st = ty::struct(cx.tcx, t);
alt st {
ty::ty_var(_) { false }
_ { true }
}
}
pure fn returns_non_ty_var(cx: @crate_ctxt, t: ty::t) -> bool {
non_ty_var(cx, ty::ty_fn_ret(cx.tcx, t))
}
pure fn type_is_tup_like(cx: @block_ctxt, t: ty::t) -> bool {
let tcx = bcx_tcx(cx);
ty::type_is_tup_like(tcx, t)
}
// Used to identify cached dictionaries
enum dict_param {
dict_param_dict(dict_id),
......@@ -885,7 +866,7 @@ fn hash_dict_id(&&dp: dict_id) -> uint {
h = h << 2u;
alt param {
dict_param_dict(d) { h += hash_dict_id(d); }
dict_param_ty(t) { h += t; }
dict_param_ty(t) { h += ty::type_id(t); }
}
}
h
......@@ -896,7 +877,7 @@ fn hash_dict_id(&&dp: dict_id) -> uint {
type mono_id = @{def: ast::def_id, substs: [ty::t], dicts: [dict_id]};
fn hash_mono_id(&&mi: mono_id) -> uint {
let h = syntax::ast_util::hash_def_id(mi.def);
for ty in mi.substs { h = (h << 2u) + ty; }
for ty in mi.substs { h = (h << 2u) + ty::type_id(ty); }
for dict in mi.dicts { h = (h << 2u) + hash_dict_id(dict); }
h
}
......
......@@ -113,7 +113,7 @@ fn trans_vtable_callee(bcx: @block_ctxt, self: ValueRef, dict: ValueRef,
T_ptr(T_array(T_ptr(llfty), n_method + 1u)));
let mptr = Load(bcx, GEPi(bcx, vtable, [0, n_method as int]));
let generic = none;
if vec::len(*method.tps) > 0u || ty::type_contains_params(tcx, fty) {
if vec::len(*method.tps) > 0u || ty::type_has_params(fty) {
let tydescs = [], tis = [];
let tptys = ty::node_id_to_type_params(tcx, callee_id);
for t in vec::tail_n(tptys, vec::len(tptys) - vec::len(*method.tps)) {
......@@ -147,7 +147,6 @@ fn trans_param_callee(bcx: @block_ctxt, callee_id: ast::node_id,
fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
base: @ast::expr, n_method: uint)
-> lval_maybe_callee {
let tcx = bcx_tcx(bcx);
let {bcx, val} = trans_temp_expr(bcx, base);
let box_body = GEPi(bcx, val, [0, abi::box_field_body]);
let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, box_body, [0, 1]),
......@@ -155,7 +154,7 @@ fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
// FIXME[impl] I doubt this is alignment-safe
let self = PointerCast(bcx, GEPi(bcx, box_body, [0, 2]),
T_opaque_cbox_ptr(bcx_ccx(bcx)));
let iface_id = alt ty::struct(tcx, expr_ty(bcx, base)) {
let iface_id = alt ty::get(expr_ty(bcx, base)).struct {
ty::ty_iface(did, _) { did }
// precondition
_ { bcx_tcx(bcx).sess.span_bug(base.span, "base has non-iface type \
......@@ -310,7 +309,7 @@ fn trans_iface_vtable(ccx: @crate_ctxt, pt: path, it: @ast::item) {
fn dict_is_static(tcx: ty::ctxt, origin: typeck::dict_origin) -> bool {
alt origin {
typeck::dict_static(_, ts, origs) {
vec::all(ts, {|t| !ty::type_contains_params(tcx, t)}) &&
vec::all(ts, {|t| !ty::type_has_params(t)}) &&
vec::all(*origs, {|o| dict_is_static(tcx, o)})
}
typeck::dict_iface(_) { true }
......
......@@ -157,7 +157,7 @@ fn trans_append(cx: @block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
(PointerCast(cx, lhsptr, T_ptr(T_ptr(ccx.opaque_vec_type))),
PointerCast(cx, rhs, T_ptr(ccx.opaque_vec_type)))
};
let strings = alt ty::struct(bcx_tcx(cx), vec_ty) {
let strings = alt ty::get(vec_ty).struct {
ty::ty_str { true }
ty::ty_vec(_) { false }
_ {
......@@ -233,7 +233,7 @@ fn trans_append_literal(bcx: @block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
rhs: ValueRef, dest: dest) -> @block_ctxt {
let ccx = bcx_ccx(bcx);
let strings = alt ty::struct(bcx_tcx(bcx), vec_ty) {
let strings = alt ty::get(vec_ty).struct {
ty::ty_str { true }
_ { false }
};
......
......@@ -14,17 +14,11 @@
};
import shape::{size_of};
export trans_uniq, make_free_glue, type_is_unique_box, autoderef, duplicate,
alloc_uniq;
pure fn type_is_unique_box(bcx: @block_ctxt, ty: ty::t) -> bool {
ty::type_is_unique_box(bcx_tcx(bcx), ty)
}
export trans_uniq, make_free_glue, autoderef, duplicate, alloc_uniq;
fn trans_uniq(bcx: @block_ctxt, contents: @ast::expr,
node_id: ast::node_id, dest: dest) -> @block_ctxt {
let uniq_ty = node_id_type(bcx, node_id);
check type_is_unique_box(bcx, uniq_ty);
let {bcx, val: llptr} = alloc_uniq(bcx, uniq_ty);
add_clean_free(bcx, llptr, true);
bcx = base::trans_expr_save_in(bcx, contents, llptr);
......@@ -32,18 +26,14 @@ fn trans_uniq(bcx: @block_ctxt, contents: @ast::expr,
ret base::store_in_dest(bcx, llptr, dest);
}
fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t)
: type_is_unique_box(cx, uniq_ty) -> result {
fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t) -> result {
let bcx = cx;
let contents_ty = content_ty(bcx, uniq_ty);
let contents_ty = content_ty(uniq_ty);
let r = size_of(bcx, contents_ty);
bcx = r.bcx;
let llsz = r.val;
let ccx = bcx_ccx(bcx);
check non_ty_var(ccx, contents_ty);
let llptrty = T_ptr(type_of_inner(ccx, contents_ty));
let llptrty = T_ptr(type_of_inner(bcx_ccx(bcx), contents_ty));
r = trans_shared_malloc(bcx, llptrty, llsz);
bcx = r.bcx;
......@@ -53,8 +43,7 @@ fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t)
}
fn make_free_glue(cx: @block_ctxt, vptr: ValueRef, t: ty::t)
: type_is_unique_box(cx, t) -> @block_ctxt {
-> @block_ctxt {
let bcx = cx;
let free_cx = new_sub_block_ctxt(bcx, "uniq_free");
let next_cx = new_sub_block_ctxt(bcx, "uniq_free_next");
......@@ -62,32 +51,26 @@ fn make_free_glue(cx: @block_ctxt, vptr: ValueRef, t: ty::t)
CondBr(bcx, null_test, next_cx.llbb, free_cx.llbb);
let bcx = free_cx;
let bcx = drop_ty(bcx, vptr, content_ty(cx, t));
let bcx = drop_ty(bcx, vptr, content_ty(t));
let bcx = trans_shared_free(bcx, vptr);
Br(bcx, next_cx.llbb);
next_cx
}
fn content_ty(bcx: @block_ctxt, t: ty::t)
: type_is_unique_box(bcx, t) -> ty::t {
alt ty::struct(bcx_tcx(bcx), t) {
fn content_ty(t: ty::t) -> ty::t {
alt ty::get(t).struct {
ty::ty_uniq({ty: ct, _}) { ct }
_ { std::util::unreachable(); }
}
}
fn autoderef(bcx: @block_ctxt, v: ValueRef, t: ty::t)
: type_is_unique_box(bcx, t) -> {v: ValueRef, t: ty::t} {
let content_ty = content_ty(bcx, t);
fn autoderef(v: ValueRef, t: ty::t) -> {v: ValueRef, t: ty::t} {
let content_ty = content_ty(t);
ret {v: v, t: content_ty};
}
fn duplicate(bcx: @block_ctxt, v: ValueRef, t: ty::t)
: type_is_unique_box(bcx, t) -> result {
let content_ty = content_ty(bcx, t);
fn duplicate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
let content_ty = content_ty(t);
let {bcx, val: llptr} = alloc_uniq(bcx, t);
let src = load_if_immediate(bcx, v, content_ty);
......
......@@ -492,14 +492,14 @@ fn new_crate_ctxt(cx: ty::ctxt) -> crate_ctxt {
If it has a function type with a ! annotation,
the answer is noreturn. */
fn controlflow_expr(ccx: crate_ctxt, e: @expr) -> ret_style {
alt ty::struct(ccx.tcx, ty::node_id_to_type(ccx.tcx, e.id)) {
alt ty::get(ty::node_id_to_type(ccx.tcx, e.id)).struct {
ty::ty_fn(f) { ret f.ret_style; }
_ { ret return_val; }
}
}
fn constraints_expr(cx: ty::ctxt, e: @expr) -> [@ty::constr] {
alt ty::struct(cx, ty::node_id_to_type(cx, e.id)) {
alt ty::get(ty::node_id_to_type(cx, e.id)).struct {
ty::ty_fn(f) { ret f.constraints; }
_ { ret []; }
}
......@@ -1071,10 +1071,9 @@ fn locals_to_bindings(tcx: ty::ctxt,
}
fn callee_modes(fcx: fn_ctxt, callee: node_id) -> [mode] {
let ty =
ty::type_autoderef(fcx.ccx.tcx,
ty::node_id_to_type(fcx.ccx.tcx, callee));
alt ty::struct(fcx.ccx.tcx, ty) {
let ty = ty::type_autoderef(fcx.ccx.tcx,
ty::node_id_to_type(fcx.ccx.tcx, callee));
alt ty::get(ty).struct {
ty::ty_fn({inputs: args, _}) {
let modes = [];
for arg: ty::arg in args { modes += [arg.mode]; }
......
......@@ -113,7 +113,7 @@ fn check_states_against_conditions(fcx: fn_ctxt,
/* Check that the return value is initialized */
let post = aux::block_poststate(fcx.ccx, f_body);
if !promises(fcx, post, fcx.enclosing.i_return) &&
!type_is_nil(fcx.ccx.tcx, ret_ty_of_fn(fcx.ccx.tcx, id)) &&
!type_is_nil(ret_ty_of_fn(fcx.ccx.tcx, id)) &&
f_decl.cf == return_val {
fcx.ccx.tcx.sess.span_err(f_body.span,
"In function " + fcx.name +
......
......@@ -758,7 +758,7 @@ fn find_pre_post_state_fn(fcx: fn_ctxt,
// We don't want to clear the diverges bit for bottom typed things,
// which really do diverge. I feel like there is a cleaner way
// to do this than checking the type.
if !type_is_bot(fcx.ccx.tcx, expr_ty(fcx.ccx.tcx, tailexpr)) {
if !type_is_bot(expr_ty(fcx.ccx.tcx, tailexpr)) {
let post = false_postcond(num_constrs);
// except for the "diverges" bit...
kill_poststate_(fcx, fcx.enclosing.i_diverge, post);
......
此差异已折叠。
此差异已折叠。
......@@ -16,8 +16,8 @@ fn fn_input_to_str(cx: ctxt, input: {mode: ast::mode, ty: t}) ->
let modestr = alt canon_mode(cx, mode) {
ast::infer(_) { "" }
ast::expl(m) {
if !ty::type_contains_vars(cx, ty) &&
m == ty::default_arg_mode_for_ty(cx, ty) {
if !ty::type_has_vars(ty) &&
m == ty::default_arg_mode_for_ty(ty) {
""
} else {
mode_to_str(ast::expl(m))
......@@ -36,7 +36,7 @@ fn fn_to_str(cx: ctxt, proto: ast::proto, ident: option<ast::ident>,
for a: arg in inputs { strs += [fn_input_to_str(cx, a)]; }
s += str::connect(strs, ", ");
s += ")";
if struct(cx, output) != ty_nil {
if ty::get(output).struct != ty_nil {
s += " -> ";
alt cf {
ast::noreturn { s += "!"; }
......@@ -62,22 +62,22 @@ fn mt_to_str(cx: ctxt, m: mt) -> str {
}
ret mstr + ty_to_str(cx, m.ty);
}
alt ty_name(cx, typ) {
alt ty::type_name(typ) {
some(cs) {
alt struct(cx, typ) {
alt ty::get(typ).struct {
ty_enum(_, tps) | ty_res(_, _, tps) {
if vec::len(tps) > 0u {
let strs = vec::map(tps, {|t| ty_to_str(cx, t)});
ret *cs + "<" + str::connect(strs, ",") + ">";
ret cs + "<" + str::connect(strs, ",") + ">";
}
}
_ {}
}
ret *cs;
ret cs;
}
_ { }
}
ret alt struct(cx, typ) {
ret alt ty::get(typ).struct {
ty_nil { "()" }
ty_bot { "_|_" }
ty_bool { "bool" }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册