提交 5e36a997 编写于 作者: N Niko Matsakis

Refactor trans to replace lvalue and friends with Datum.

Also:
- report illegal move/ref combos whether or not ref comes first
- commented out fix for #3387, too restrictive and causes an ICE
上级 adc14272
......@@ -100,6 +100,9 @@ endif
ifdef TIME_LLVM_PASSES
CFG_RUSTC_FLAGS += -Z time-llvm-passes
endif
ifdef TRACE
CFG_RUSTC_FLAGS += -Z trace
endif
# platform-specific auto-configuration
include $(CFG_SRC_DIR)mk/platform.mk
......
......@@ -65,7 +65,9 @@ mod tests {
fn identity_crisis() {
// Writing a test for the identity function. How did it come to this?
let x = ~[(5, false)];
assert x.eq(id(copy x));
//FIXME #3387 assert x.eq(id(copy x));
let y = copy x;
assert x.eq(id(y));
}
#[test]
fn test_swap() {
......
......@@ -800,10 +800,11 @@ fn test_optmulti_long_multi() {
let rs = getopts(args, opts);
match rs {
Ok(m) => {
assert (opt_present(m, ~"test"));
assert (opt_str(m, ~"test") == ~"20");
assert (opt_strs(m, ~"test")[0] == ~"20");
assert (opt_strs(m, ~"test")[1] == ~"30");
assert (opt_present(m, ~"test"));
assert (opt_str(m, ~"test") == ~"20");
let pair = opt_strs(m, ~"test");
assert (pair[0] == ~"20");
assert (pair[1] == ~"30");
}
_ => fail
}
......@@ -854,8 +855,9 @@ fn test_optmulti_short_multi() {
Ok(m) => {
assert (opt_present(m, ~"t"));
assert (opt_str(m, ~"t") == ~"20");
assert (opt_strs(m, ~"t")[0] == ~"20");
assert (opt_strs(m, ~"t")[1] == ~"30");
let pair = opt_strs(m, ~"t");
assert (pair[0] == ~"20");
assert (pair[1] == ~"30");
}
_ => fail
}
......@@ -903,10 +905,12 @@ fn test_combined() {
assert (opt_present(m, ~"flag"));
assert (opt_str(m, ~"long") == ~"30");
assert (opt_present(m, ~"f"));
assert (opt_strs(m, ~"m")[0] == ~"40");
assert (opt_strs(m, ~"m")[1] == ~"50");
assert (opt_strs(m, ~"n")[0] == ~"-A B");
assert (opt_strs(m, ~"n")[1] == ~"-60 70");
let pair = opt_strs(m, ~"m");
assert (pair[0] == ~"40");
assert (pair[1] == ~"50");
let pair = opt_strs(m, ~"n");
assert (pair[0] == ~"-A B");
assert (pair[1] == ~"-60 70");
assert (!opt_present(m, ~"notpresent"));
}
_ => fail
......
......@@ -19,7 +19,7 @@
* like map or alli.
*/
fn map_slices<A: copy send, B: copy send>(
xs: ~[A],
xs: &[A],
f: fn() -> fn~(uint, v: &[A]) -> B)
-> ~[B] {
......@@ -104,7 +104,7 @@ fn mapi<A: copy send, B: copy send>(xs: ~[A],
* inner elements. This is to skirt the need for copy constructors.
*/
fn mapi_factory<A: copy send, B: copy send>(
xs: ~[A], f: fn() -> fn~(uint, A) -> B) -> ~[B] {
xs: &[A], f: fn() -> fn~(uint, A) -> B) -> ~[B] {
let slices = map_slices(xs, || {
let f = f();
fn~(base: uint, slice : &[A], move f) -> ~[B] {
......
......@@ -1169,29 +1169,38 @@ fn print_field(s: ps, field: ast::field) {
None => ()
}
word_space(s, ~"=>");
// Extract the expression from the extra block the parser adds
assert arm.body.node.view_items.is_empty();
assert arm.body.node.stmts.is_empty();
assert arm.body.node.rules == ast::default_blk;
match arm.body.node.expr {
Some(expr) => {
match expr.node {
ast::expr_block(blk) => {
// the block will close the pattern's ibox
print_block_unclosed_indent(s, blk, alt_indent_unit);
}
_ => {
end(s); // close the ibox for the pattern
print_expr(s, expr);
}
}
if !expr_is_simple_block(expr)
&& i < len - 1 {
word(s.s, ~",");
// in the case of foo => expr
if arm.body.node.view_items.is_empty() &&
arm.body.node.stmts.is_empty() &&
arm.body.node.rules == ast::default_blk &&
arm.body.node.expr.is_some()
{
match arm.body.node.expr {
Some(expr) => {
match expr.node {
ast::expr_block(blk) => {
// the block will close the pattern's ibox
print_block_unclosed_indent(
s, blk, alt_indent_unit);
}
_ => {
end(s); // close the ibox for the pattern
print_expr(s, expr);
}
}
if !expr_is_simple_block(expr)
&& i < len - 1 {
word(s.s, ~",");
}
end(s); // close enclosing cbox
}
None => fail
}
end(s); // close enclosing cbox
}
None => fail
} else {
// the block will close the pattern's ibox
print_block_unclosed_indent(s, arm.body, alt_indent_unit);
}
}
bclose_(s, expr.span, alt_indent_unit);
......
......@@ -147,7 +147,6 @@ extern "C" CDECL void
upcall_s_exchange_malloc(s_exchange_malloc_args *args) {
rust_task *task = args->task;
LOG_UPCALL_ENTRY(task);
LOG(task, mem, "upcall exchange malloc(0x%" PRIxPTR ")", args->td);
size_t total_size = get_box_size(args->size, args->td->align);
// FIXME--does this have to be calloc? (Issue #2682)
......@@ -159,6 +158,9 @@ upcall_s_exchange_malloc(s_exchange_malloc_args *args) {
header->prev = 0;
header->next = 0;
LOG(task, mem, "exchange malloced %p of size %" PRIuPTR,
header, args->size);
args->retval = (uintptr_t)header;
}
......@@ -187,6 +189,7 @@ extern "C" CDECL void
upcall_s_exchange_free(s_exchange_free_args *args) {
rust_task *task = args->task;
LOG_UPCALL_ENTRY(task);
LOG(task, mem, "exchange freed %p", args->ptr);
task->kernel->free(args->ptr);
}
......
......@@ -456,9 +456,11 @@ fn LLVMConstLShr(LHSConstant: ValueRef, RHSConstant: ValueRef) ->
ValueRef;
fn LLVMConstAShr(LHSConstant: ValueRef, RHSConstant: ValueRef) ->
ValueRef;
fn LLVMConstGEP(ConstantVal: ValueRef, ConstantIndices: *uint,
fn LLVMConstGEP(ConstantVal: ValueRef,
ConstantIndices: *ValueRef,
NumIndices: c_uint) -> ValueRef;
fn LLVMConstInBoundsGEP(ConstantVal: ValueRef, ConstantIndices: *uint,
fn LLVMConstInBoundsGEP(ConstantVal: ValueRef,
ConstantIndices: *ValueRef,
NumIndices: c_uint) -> ValueRef;
fn LLVMConstTrunc(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
fn LLVMConstSExt(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
......@@ -493,10 +495,10 @@ fn LLVMConstInsertElement(VectorConstant: ValueRef,
fn LLVMConstShuffleVector(VectorAConstant: ValueRef,
VectorBConstant: ValueRef,
MaskConstant: ValueRef) -> ValueRef;
fn LLVMConstExtractValue(AggConstant: ValueRef, IdxList: *uint,
fn LLVMConstExtractValue(AggConstant: ValueRef, IdxList: *c_uint,
NumIdx: c_uint) -> ValueRef;
fn LLVMConstInsertValue(AggConstant: ValueRef,
ElementValueConstant: ValueRef, IdxList: *uint,
ElementValueConstant: ValueRef, IdxList: *c_uint,
NumIdx: c_uint) -> ValueRef;
fn LLVMConstInlineAsm(Ty: TypeRef, AsmString: *c_char,
Constraints: *c_char, HasSideEffects: Bool,
......
......@@ -220,7 +220,8 @@
use syntax::ast_util;
use syntax::ast_map;
use syntax::codemap::span;
use util::ppaux::{ty_to_str, region_to_str, explain_region};
use util::ppaux::{ty_to_str, region_to_str, explain_region,
note_and_explain_region};
use std::map::{int_hash, hashmap, set};
use std::list;
use std::list::{List, Cons, Nil};
......@@ -464,6 +465,7 @@ fn report(err: bckerr) {
err.cmt.span,
fmt!("illegal borrow: %s",
self.bckerr_code_to_str(err.code)));
self.note_and_explain_bckerr(err.code);
}
fn span_err(s: span, m: ~str) {
......@@ -488,37 +490,65 @@ fn add_to_mutbl_map(cmt: cmt) {
fn bckerr_code_to_str(code: bckerr_code) -> ~str {
match code {
err_mutbl(req, act) => {
fmt!("creating %s alias to aliasable, %s memory",
self.mut_to_str(req), self.mut_to_str(act))
}
err_mut_uniq => {
~"unique value in aliasable, mutable location"
}
err_mut_variant => {
~"enum variant in aliasable, mutable location"
}
err_root_not_permitted => {
// note: I don't expect users to ever see this error
// message, reasons are discussed in attempt_root() in
// preserve.rs.
~"rooting is not permitted"
}
err_out_of_root_scope(super_scope, sub_scope) => {
fmt!("managed value would have to be rooted for %s, \
but can only be rooted for %s",
explain_region(self.tcx, sub_scope),
explain_region(self.tcx, super_scope))
}
err_out_of_scope(super_scope, sub_scope) => {
fmt!("borrowed pointer must be valid for %s, \
but the borrowed value is only valid for %s",
explain_region(self.tcx, sub_scope),
explain_region(self.tcx, super_scope))
err_mutbl(req, act) => {
fmt!("creating %s alias to aliasable, %s memory",
self.mut_to_str(req), self.mut_to_str(act))
}
err_mut_uniq => {
~"unique value in aliasable, mutable location"
}
err_mut_variant => {
~"enum variant in aliasable, mutable location"
}
err_root_not_permitted => {
// note: I don't expect users to ever see this error
// message, reasons are discussed in attempt_root() in
// preserve.rs.
~"rooting is not permitted"
}
err_out_of_root_scope(*) => {
~"cannot root managed value long enough"
}
err_out_of_scope(*) => {
~"borrowed value does not live long enough"
}
}
}
fn note_and_explain_bckerr(code: bckerr_code) {
match code {
err_mutbl(*) | err_mut_uniq | err_mut_variant |
err_root_not_permitted => {}
err_out_of_root_scope(super_scope, sub_scope) => {
note_and_explain_region(
self.tcx,
~"managed value would have to be rooted for ",
sub_scope,
~"...");
note_and_explain_region(
self.tcx,
~"...but can only be rooted for ",
super_scope,
~"");
}
err_out_of_scope(super_scope, sub_scope) => {
note_and_explain_region(
self.tcx,
~"borrowed pointer must be valid for ",
sub_scope,
~"...");
note_and_explain_region(
self.tcx,
~"...but borrowed value is only valid for ",
super_scope,
~"");
}
}
}
fn cmt_to_str(cmt: cmt) -> ~str {
let mc = &mem_categorization_ctxt {tcx: self.tcx,
method_map: self.method_map};
......
......@@ -423,6 +423,7 @@ fn report_purity_error(pc: purity_cause, sp: span, msg: ~str) {
e.cmt.span,
fmt!("illegal borrow unless pure: %s",
self.bccx.bckerr_code_to_str(e.code)));
self.bccx.note_and_explain_bckerr(e.code);
self.tcx().sess.span_note(
sp,
fmt!("impure due to %s", msg));
......@@ -484,10 +485,14 @@ fn check_move_out_from_cmt(cmt: cmt) {
// when there is an outstanding loan. In that case, it is not
// safe to consider the use a last_use.
fn check_last_use(expr: @ast::expr) {
debug!("Checking last use of expr %?", expr.id);
let cmt = self.bccx.cat_expr(expr);
let lp = match cmt.lp {
None => return,
Some(lp) => lp
None => {
debug!("Not a loanable expression");
return;
}
Some(lp) => lp
};
for self.walk_loans_of(cmt.id, lp) |_loan| {
debug!("Removing last use entry %? due to outstanding loan",
......@@ -592,6 +597,9 @@ fn check_loans_in_local(local: @ast::local,
fn check_loans_in_expr(expr: @ast::expr,
&&self: check_loan_ctxt,
vt: visit::vt<check_loan_ctxt>) {
debug!("check_loans_in_expr(expr=%?/%s)",
expr.id, pprust::expr_to_str(expr, self.tcx().sess.intr()));
self.check_for_conflicting_loans(expr.id);
match expr.node {
......
......@@ -90,8 +90,8 @@ fn req_loans_in_expr(ex: @ast::expr,
let tcx = bccx.tcx;
let old_root_ub = self.root_ub;
debug!("req_loans_in_expr(ex=%s)",
pprust::expr_to_str(ex, tcx.sess.intr()));
debug!("req_loans_in_expr(expr=%?/%s)",
ex.id, pprust::expr_to_str(ex, tcx.sess.intr()));
// If this expression is borrowed, have to ensure it remains valid:
for tcx.borrowings.find(ex.id).each |borrow| {
......@@ -200,6 +200,21 @@ fn req_loans_in_expr(ex: @ast::expr,
visit::visit_expr(ex, self, vt);
}
// FIXME--#3387
// ast::expr_binary(_, lhs, rhs) => {
// // Universal comparison operators like ==, >=, etc
// // take their arguments by reference.
// let lhs_ty = ty::expr_ty(self.tcx(), lhs);
// if !ty::type_is_scalar(lhs_ty) {
// let scope_r = ty::re_scope(ex.id);
// let lhs_cmt = self.bccx.cat_expr(lhs);
// self.guarantee_valid(lhs_cmt, m_imm, scope_r);
// let rhs_cmt = self.bccx.cat_expr(rhs);
// self.guarantee_valid(rhs_cmt, m_imm, scope_r);
// }
// visit::visit_expr(ex, self, vt);
// }
ast::expr_field(rcvr, _, _)
if self.bccx.method_map.contains_key(ex.id) => {
// Receivers in method calls are always passed by ref.
......@@ -395,14 +410,15 @@ fn check_mutbl(req_mutbl: ast::mutability,
}
fn add_loans(scope_id: ast::node_id, loans: @DVec<loan>) {
debug!("adding %u loans to scope_id %?", loans.len(), scope_id);
match self.req_maps.req_loan_map.find(scope_id) {
Some(l) => {
(*l).push(loans);
}
None => {
self.req_maps.req_loan_map.insert(
scope_id, @dvec::from_vec(~[mut loans]));
}
Some(l) => {
l.push(loans);
}
None => {
self.req_maps.req_loan_map.insert(
scope_id, @dvec::from_vec(~[mut loans]));
}
}
}
......
......@@ -419,7 +419,7 @@ fn is_nullary_variant(cx: ctx, ex: @expr) -> bool {
fn check_copy_ex(cx: ctx, ex: @expr, implicit_copy: bool,
why: Option<(&str,&str)>) {
if ty::expr_is_lval(cx.method_map, ex) &&
if ty::expr_is_lval(cx.tcx, cx.method_map, ex) &&
// this is a move
!cx.last_use_map.contains_key(ex.id) &&
......
......@@ -1572,17 +1572,16 @@ fn check_expr(expr: @expr, &&self: @Liveness, vt: vt<@Liveness>) {
expr_call(f, args, _) => {
let targs = ty::ty_fn_args(ty::expr_ty(self.tcx, f));
vt.visit_expr(f, self, vt);
do vec::iter2(args, targs) |arg_expr, arg_ty| {
match ty::resolved_mode(self.tcx, arg_ty.mode) {
by_val | by_copy | by_ref | by_mutbl_ref => {
vt.visit_expr(arg_expr, self, vt);
}
by_move => {
self.check_move_from_expr(arg_expr, vt);
}
by_val | by_copy | by_ref | by_mutbl_ref => {}
by_move => {
self.check_move_from_expr(arg_expr, vt);
}
}
}
visit::visit_expr(expr, self, vt);
}
// no correctness conditions related to liveness
......@@ -1670,6 +1669,9 @@ fn check_move_from_var(span: span, ln: LiveNode, var: Variable) {
}
fn consider_last_use(expr: @expr, ln: LiveNode, var: Variable) {
debug!("consider_last_use(expr.id=%?, ln=%s, var=%s)",
expr.id, ln.to_str(), var.to_str());
match self.live_on_exit(ln, var) {
Some(_) => {}
None => (*self.ir).add_last_use(expr.id, var)
......@@ -1682,7 +1684,7 @@ fn check_move_from_expr(expr: @expr, vt: vt<@Liveness>) {
if self.ir.method_map.contains_key(expr.id) {
// actually an rvalue, since this calls a method
return vt.visit_expr(expr, self, vt);
return;
}
match expr.node {
......@@ -1703,18 +1705,16 @@ fn check_move_from_expr(expr: @expr, vt: vt<@Liveness>) {
self.check_move_from_expr(base, vt);
}
expr_index(base, idx) => {
expr_index(base, _) => {
// Moving from x[y] is allowed if x is never used later.
// (Note that the borrowck guarantees that anything
// being moved from is uniquely tied to the stack frame)
self.check_move_from_expr(base, vt);
vt.visit_expr(idx, self, vt);
}
_ => {
// For other kinds of lvalues, no checks are required,
// and any embedded expressions are actually rvalues
vt.visit_expr(expr, self, vt);
}
}
}
......
......@@ -254,6 +254,10 @@ fn resolve_expr(expr: @ast::expr, cx: ctxt, visitor: visit::vt<ctxt>) {
let mut new_cx = cx;
match expr.node {
// Calls or overloadable operators
// FIXME #3387
// ast::expr_index(*) | ast::expr_binary(*) |
// ast::expr_unary(*) |
ast::expr_call(*) => {
debug!("node %d: %s", expr.id, pprust::expr_to_str(expr,
cx.sess.intr()));
......
此差异已折叠。
此差异已折叠。
......@@ -6,8 +6,8 @@
use lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
use lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
CallConv, TypeKind, AtomicBinOp, AtomicOrdering};
use common::*;
use driver::session::session;
use common::*;
fn B(cx: block) -> BuilderRef {
let b = cx.fcx.ccx.builder.B;
......@@ -670,7 +670,7 @@ fn add_comment(bcx: block, text: ~str) {
}
}
fn Call(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
fn Call(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef {
if cx.unreachable { return _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, "call");
......@@ -679,8 +679,9 @@ fn Call(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
val_str(cx.ccx().tn, Fn),
Args.map(|arg| val_str(cx.ccx().tn, arg)));
return llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
do vec::as_buf(Args) |ptr, len| {
llvm::LLVMBuildCall(B(cx), Fn, ptr, len as c_uint, noname())
}
}
}
......
此差异已折叠。
......@@ -17,6 +17,7 @@
use syntax::ast_map::{path, path_mod, path_name};
use driver::session::session;
use std::map::hashmap;
use datum::{Datum, INIT, ByRef, ByValue, FromLvalue};
// ___Good to know (tm)__________________________________________________
//
......@@ -87,25 +88,35 @@
//
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
enum environment_value {
// Copy the value from this llvm ValueRef into the environment.
env_copy(ValueRef, ty::t, lval_kind),
enum EnvAction {
/// Copy the value from this llvm ValueRef into the environment.
EnvStore,
// Move the value from this llvm ValueRef into the environment.
env_move(ValueRef, ty::t, lval_kind),
/// Move the value from this llvm ValueRef into the environment.
EnvMove,
// Access by reference (used for blocks).
env_ref(ValueRef, ty::t, lval_kind),
/// Access by reference (used for stack closures).
EnvRef
}
fn ev_to_str(ccx: @crate_ctxt, ev: environment_value) -> ~str {
match ev {
env_copy(v, t, _) => fmt!("copy(%s,%s)", val_str(ccx.tn, v),
ty_to_str(ccx.tcx, t)),
env_move(v, t, _) => fmt!("move(%s,%s)", val_str(ccx.tn, v),
ty_to_str(ccx.tcx, t)),
env_ref(v, t, _) => fmt!("ref(%s,%s)", val_str(ccx.tn, v),
ty_to_str(ccx.tcx, t))
struct EnvValue {
action: EnvAction;
datum: Datum;
}
impl EnvAction {
fn to_str() -> ~str {
match self {
EnvStore => ~"EnvStore",
EnvMove => ~"EnvMove",
EnvRef => ~"EnvRef"
}
}
}
impl EnvValue {
fn to_str(ccx: @crate_ctxt) -> ~str {
fmt!("%s(%s)", self.action.to_str(), self.datum.to_str(ccx))
}
}
......@@ -116,18 +127,18 @@ fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t {
// Given a closure ty, emits a corresponding tuple ty
fn mk_closure_tys(tcx: ty::ctxt,
bound_values: ~[environment_value])
bound_values: ~[EnvValue])
-> ty::t {
let mut bound_tys = ~[];
// Compute the closed over data
for vec::each(bound_values) |bv| {
vec::push(bound_tys, match bv {
env_copy(_, t, _) => t,
env_move(_, t, _) => t,
env_ref(_, t, _) => t
});
}
// determine the types of the values in the env. Note that this
// is the actual types that will be stored in the map, not the
// logical types as the user sees them, so by-ref upvars must be
// converted to ptrs.
let bound_tys = bound_values.map(|bv| {
match bv.action {
EnvStore | EnvMove => bv.datum.ty,
EnvRef => ty::mk_mut_ptr(tcx, bv.datum.ty)
}
});
let cdata_ty = ty::mk_tup(tcx, bound_tys);
debug!("cdata_ty=%s", ty_to_str(tcx, cdata_ty));
return cdata_ty;
......@@ -136,7 +147,8 @@ fn mk_closure_tys(tcx: ty::ctxt,
fn allocate_cbox(bcx: block,
ck: ty::closure_kind,
cdata_ty: ty::t)
-> result {
-> Result
{
let _icx = bcx.insn_ctxt("closure::allocate_cbox");
let ccx = bcx.ccx(), tcx = ccx.tcx;
......@@ -151,18 +163,16 @@ fn nuke_ref_count(bcx: block, llbox: ValueRef) {
}
// Allocate and initialize the box:
let {bcx, val} = match ck {
match ck {
ty::ck_box => malloc_raw(bcx, cdata_ty, heap_shared),
ty::ck_uniq => malloc_raw(bcx, cdata_ty, heap_exchange),
ty::ck_block => {
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = base::alloc_ty(bcx, cbox_ty);
nuke_ref_count(bcx, llbox);
{bcx: bcx, val: llbox}
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = base::alloc_ty(bcx, cbox_ty);
nuke_ref_count(bcx, llbox);
rslt(bcx, llbox)
}
};
return {bcx: bcx, val: val};
}
}
type closure_result = {
......@@ -176,7 +186,7 @@ fn nuke_ref_count(bcx: block, llbox: ValueRef) {
// heap allocated closure that copies the upvars into environment.
// Otherwise, it is stack allocated and copies pointers to the upvars.
fn store_environment(bcx: block,
bound_values: ~[environment_value],
bound_values: ~[EnvValue],
ck: ty::closure_kind) -> closure_result {
let _icx = bcx.insn_ctxt("closure::store_environment");
let ccx = bcx.ccx(), tcx = ccx.tcx;
......@@ -185,7 +195,7 @@ fn store_environment(bcx: block,
let cdata_ty = mk_closure_tys(tcx, bound_values);
// allocate closure in the heap
let {bcx: bcx, val: llbox} = allocate_cbox(bcx, ck, cdata_ty);
let Result {bcx: bcx, val: llbox} = allocate_cbox(bcx, ck, cdata_ty);
let mut temp_cleanups = ~[];
// cbox_ty has the form of a tuple: (a, b, c) we want a ptr to a
......@@ -200,43 +210,27 @@ fn store_environment(bcx: block,
// Copy expr values into boxed bindings.
let mut bcx = bcx;
do vec::iteri(bound_values) |i, bv| {
debug!("Copy %s into closure", ev_to_str(ccx, bv));
debug!("Copy %s into closure", bv.to_str(ccx));
if !ccx.sess.no_asm_comments() {
add_comment(bcx, fmt!("Copy %s into closure",
ev_to_str(ccx, bv)));
bv.to_str(ccx)));
}
let bound_data = GEPi(bcx, llbox, [0u, abi::box_field_body, i]);
match bv {
env_copy(val, ty, lv_owned) => {
let val1 = load_if_immediate(bcx, val, ty);
bcx = base::copy_val(bcx, INIT, bound_data, val1, ty);
}
env_copy(val, ty, lv_owned_imm) => {
bcx = base::copy_val(bcx, INIT, bound_data, val, ty);
}
env_copy(_, _, lv_temporary) => {
fail ~"cannot capture temporary upvar";
}
env_move(val, ty, kind) => {
let src = {bcx:bcx, val:val, kind:kind};
bcx = move_val(bcx, INIT, bound_data, src, ty);
}
env_ref(val, _, lv_owned) => {
debug!("> storing %s into %s",
val_str(bcx.ccx().tn, val),
val_str(bcx.ccx().tn, bound_data));
Store(bcx, val, bound_data);
}
env_ref(val, _, lv_owned_imm) => {
let addr = do_spill_noroot(bcx, val);
Store(bcx, addr, bound_data);
}
env_ref(_, _, lv_temporary) => {
fail ~"cannot capture temporary upvar";
}
match bv.action {
EnvStore => {
bcx = bv.datum.store_to(bcx, INIT, bound_data);
}
EnvMove => {
bcx = bv.datum.move_to(bcx, INIT, bound_data);
}
EnvRef => {
Store(bcx, bv.datum.to_ref_llval(bcx), bound_data);
}
}
}
for vec::each(temp_cleanups) |cleanup| { revoke_clean(bcx, cleanup); }
......@@ -252,56 +246,57 @@ fn build_closure(bcx0: block,
include_ret_handle: Option<ValueRef>) -> closure_result {
let _icx = bcx0.insn_ctxt("closure::build_closure");
// If we need to, package up the iterator body to call
let mut env_vals = ~[];
let mut bcx = bcx0;
let mut bcx = bcx0;;
let ccx = bcx.ccx(), tcx = ccx.tcx;
// Package up the captured upvars
let mut env_vals = ~[];
do vec::iter(cap_vars) |cap_var| {
debug!("Building closure: captured variable %?", cap_var);
let lv = trans_local_var(bcx, cap_var.def);
let nid = ast_util::def_id_of_def(cap_var.def).node;
debug!("Node id is %s",
syntax::ast_map::node_id_to_str
(bcx.ccx().tcx.items, nid,
bcx.ccx().sess.parse_sess.interner));
let mut ty = node_id_type(bcx, nid);
let datum = expr::trans_local_var(bcx, id, cap_var.def);
match cap_var.mode {
capture::cap_ref => {
assert ck == ty::ck_block;
ty = ty::mk_mut_ptr(tcx, ty);
vec::push(env_vals, env_ref(lv.val, ty, lv.kind));
}
capture::cap_copy => {
let mv = match ccx.maps.last_use_map.find(id) {
None => false,
Some(vars) => (*vars).contains(nid)
};
if mv { vec::push(env_vals, env_move(lv.val, ty, lv.kind)); }
else { vec::push(env_vals, env_copy(lv.val, ty, lv.kind)); }
}
capture::cap_move => {
vec::push(env_vals, env_move(lv.val, ty, lv.kind));
}
capture::cap_drop => {
assert lv.kind == lv_owned;
bcx = drop_ty(bcx, lv.val, ty);
bcx = zero_mem(bcx, lv.val, ty);
}
capture::cap_ref => {
assert ck == ty::ck_block;
vec::push(env_vals, EnvValue {action: EnvRef,
datum: datum});
}
capture::cap_copy => {
vec::push(env_vals, EnvValue {action: EnvStore,
datum: datum});
}
capture::cap_move => {
vec::push(env_vals, EnvValue {action: EnvMove,
datum: datum});
}
capture::cap_drop => {
bcx = datum.drop_val(bcx);
datum.cancel_clean(bcx);
}
}
}
// If this is a `for` loop body, add two special environment
// variables:
do option::iter(include_ret_handle) |flagptr| {
let our_ret = match bcx.fcx.loop_ret {
Some({retptr, _}) => retptr,
None => bcx.fcx.llretptr
// Flag indicating we have returned (a by-ref bool):
let flag_datum = Datum {val: flagptr, ty: ty::mk_bool(tcx),
mode: ByRef, source: FromLvalue};
vec::push(env_vals, EnvValue {action: EnvRef,
datum: flag_datum});
// Return value (we just pass a by-ref () and cast it later to
// the right thing):
let ret_true = match bcx.fcx.loop_ret {
Some({retptr, _}) => retptr,
None => bcx.fcx.llretptr
};
let nil_ret = PointerCast(bcx, our_ret, T_ptr(T_nil()));
vec::push(env_vals,
env_ref(flagptr,
ty::mk_mut_ptr(tcx, ty::mk_bool(tcx)), lv_owned));
vec::push(env_vals,
env_ref(nil_ret, ty::mk_nil_ptr(tcx), lv_owned));
let ret_casted = PointerCast(bcx, ret_true, T_ptr(T_nil()));
let ret_datum = Datum {val: ret_casted, ty: ty::mk_nil(tcx),
mode: ByRef, source: FromLvalue};
vec::push(env_vals, EnvValue {action: EnvRef,
datum: ret_datum});
}
return store_environment(bcx, env_vals, ck);
}
......@@ -351,9 +346,16 @@ fn trans_expr_fn(bcx: block,
id: ast::node_id,
cap_clause: ast::capture_clause,
is_loop_body: Option<Option<ValueRef>>,
dest: dest) -> block {
dest: expr::Dest) -> block {
let _icx = bcx.insn_ctxt("closure::trans_expr_fn");
if dest == ignore { return bcx; }
let dest_addr = match dest {
expr::SaveIn(p) => p,
expr::Ignore => {
return bcx; // closure construction is non-side-effecting
}
};
let ccx = bcx.ccx();
let fty = node_id_type(bcx, id);
let llfnty = type_of_fn_from_ty(ccx, fty);
......@@ -362,7 +364,7 @@ fn trans_expr_fn(bcx: block,
let s = mangle_internal_name_by_path(ccx, sub_path);
let llfn = decl_internal_cdecl_fn(ccx.llmod, s, llfnty);
let trans_closure_env = fn@(ck: ty::closure_kind) -> result {
let trans_closure_env = fn@(ck: ty::closure_kind) -> Result {
let cap_vars = capture::compute_capture_vars(ccx.tcx, id, proto,
cap_clause);
let ret_handle = match is_loop_body { Some(x) => x, None => None };
......@@ -377,25 +379,29 @@ fn trans_expr_fn(bcx: block,
Store(bcx, C_bool(true), bcx.fcx.llretptr);
}
});
{bcx: bcx, val: llbox}
rslt(bcx, llbox)
};
let {bcx: bcx, val: closure} = match proto {
ty::proto_vstore(ty::vstore_slice(_)) =>
trans_closure_env(ty::ck_block),
ty::proto_vstore(ty::vstore_box) =>
trans_closure_env(ty::ck_box),
ty::proto_vstore(ty::vstore_uniq) =>
trans_closure_env(ty::ck_uniq),
ty::proto_bare => {
trans_closure(ccx, sub_path, decl, body, llfn, no_self, None,
id, |_fcx| { }, |_bcx| { });
{bcx: bcx, val: C_null(T_opaque_box_ptr(ccx))}
}
ty::proto_vstore(ty::vstore_fixed(_)) =>
fail ~"vstore_fixed unexpected"
let Result {bcx: bcx, val: closure} = match proto {
ty::proto_vstore(ty::vstore_slice(_)) => {
trans_closure_env(ty::ck_block)
}
ty::proto_vstore(ty::vstore_box) => {
trans_closure_env(ty::ck_box)
}
ty::proto_vstore(ty::vstore_uniq) => {
trans_closure_env(ty::ck_uniq)
}
ty::proto_bare => {
trans_closure(ccx, sub_path, decl, body, llfn, no_self, None,
id, |_fcx| { }, |_bcx| { });
rslt(bcx, C_null(T_opaque_box_ptr(ccx)))
}
ty::proto_vstore(ty::vstore_fixed(_)) => {
fail ~"vstore_fixed unexpected"
}
};
fill_fn_pair(bcx, get_dest_addr(dest), llfn, closure);
fill_fn_pair(bcx, dest_addr, llfn, closure);
return bcx;
}
......@@ -440,12 +446,12 @@ fn make_opaque_cbox_take_glue(
// Easy cases:
let _icx = bcx.insn_ctxt("closure::make_opaque_cbox_take_glue");
match ck {
ty::ck_block => return bcx,
ty::ck_box => {
incr_refcnt_of_boxed(bcx, Load(bcx, cboxptr));
return bcx;
}
ty::ck_uniq => { /* hard case: */ }
ty::ck_block => return bcx,
ty::ck_box => {
glue::incr_refcnt_of_boxed(bcx, Load(bcx, cboxptr));
return bcx;
}
ty::ck_uniq => { /* hard case: */ }
}
// Hard case, a deep copy:
......@@ -467,20 +473,20 @@ fn make_opaque_cbox_take_glue(
let malloc = ~"exchange_malloc";
let opaque_tydesc = PointerCast(bcx, tydesc, T_ptr(T_i8()));
let rval = alloca_zeroed(bcx, T_ptr(T_i8()));
let bcx = trans_rtcall(bcx, malloc, ~[opaque_tydesc, sz],
save_in(rval));
let bcx = callee::trans_rtcall(bcx, malloc, ~[opaque_tydesc, sz],
expr::SaveIn(rval));
let cbox_out = PointerCast(bcx, Load(bcx, rval), llopaquecboxty);
call_memmove(bcx, cbox_out, cbox_in, sz);
Store(bcx, cbox_out, cboxptr);
// Take the (deeply cloned) type descriptor
let tydesc_out = GEPi(bcx, cbox_out, [0u, abi::box_field_tydesc]);
let bcx = take_ty(bcx, tydesc_out, ty::mk_type(tcx));
let bcx = glue::take_ty(bcx, tydesc_out, ty::mk_type(tcx));
// Take the data in the tuple
let cdata_out = GEPi(bcx, cbox_out, [0u, abi::box_field_body]);
call_tydesc_glue_full(bcx, cdata_out, tydesc,
abi::tydesc_field_take_glue, None);
glue::call_tydesc_glue_full(bcx, cdata_out, tydesc,
abi::tydesc_field_take_glue, None);
bcx
}
}
......@@ -492,15 +498,17 @@ fn make_opaque_cbox_drop_glue(
-> block {
let _icx = bcx.insn_ctxt("closure::make_opaque_cbox_drop_glue");
match ck {
ty::ck_block => bcx,
ty::ck_box => {
decr_refcnt_maybe_free(bcx, Load(bcx, cboxptr),
ty::mk_opaque_closure_ptr(bcx.tcx(), ck))
}
ty::ck_uniq => {
free_ty(bcx, cboxptr,
ty::ck_block => bcx,
ty::ck_box => {
glue::decr_refcnt_maybe_free(
bcx, Load(bcx, cboxptr),
ty::mk_opaque_closure_ptr(bcx.tcx(), ck))
}
}
ty::ck_uniq => {
glue::free_ty(
bcx, cboxptr,
ty::mk_opaque_closure_ptr(bcx.tcx(), ck))
}
}
}
......@@ -526,14 +534,14 @@ fn make_opaque_cbox_free_glue(
// Drop the tuple data then free the descriptor
let cdata = GEPi(bcx, cbox, [0u, abi::box_field_body]);
call_tydesc_glue_full(bcx, cdata, tydesc,
abi::tydesc_field_drop_glue, None);
glue::call_tydesc_glue_full(bcx, cdata, tydesc,
abi::tydesc_field_drop_glue, None);
// Free the ty descr (if necc) and the box itself
match ck {
ty::ck_block => fail ~"Impossible",
ty::ck_box => trans_free(bcx, cbox),
ty::ck_uniq => trans_unique_free(bcx, cbox)
ty::ck_block => fail ~"Impossible",
ty::ck_box => glue::trans_free(bcx, cbox),
ty::ck_uniq => glue::trans_unique_free(bcx, cbox)
}
}
}
......
......@@ -20,6 +20,7 @@
use metadata::common::link_meta;
use syntax::ast_map::path;
use util::ppaux::ty_to_str;
use syntax::print::pprust::expr_to_str;
use syntax::parse::token::ident_interner;
use syntax::ast::ident;
......@@ -165,7 +166,11 @@ struct BuilderRef_res {
mut do_not_commit_warning_issued: bool};
// Types used for llself.
type val_self_data = {v: ValueRef, t: ty::t, is_owned: bool};
struct ValSelfData {
v: ValueRef;
t: ty::t;
is_owned: bool;
}
enum local_val { local_mem(ValueRef), local_imm(ValueRef), }
......@@ -201,7 +206,7 @@ enum local_val { local_mem(ValueRef), local_imm(ValueRef), }
mut llreturn: BasicBlockRef,
// The 'self' value currently in use in this function, if there
// is one.
mut llself: Option<val_self_data>,
mut llself: Option<ValSelfData>,
// The a value alloca'd for calls to upcalls.rust_personality. Used when
// outputting the resume instruction.
mut personality: Option<ValueRef>,
......@@ -257,6 +262,25 @@ enum cleanup {
clean_temp(ValueRef, fn@(block) -> block, cleantype),
}
impl cleantype : cmp::Eq {
pure fn eq(&&other: cleantype) -> bool {
match self {
normal_exit_only => {
match other {
normal_exit_only => true,
_ => false
}
}
normal_exit_and_unwind => {
match other {
normal_exit_and_unwind => true,
_ => false
}
}
}
}
}
// Used to remember and reuse existing cleanup paths
// target: none means the path ends in an resume instruction
type cleanup_path = {target: Option<BasicBlockRef>,
......@@ -275,12 +299,12 @@ fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype {
}
}
// This is not the same as base::root_value, which appears to be the vestigial
// remains of the previous GC regime. In the new GC, we can identify
// immediates on the stack without difficulty, but have trouble knowing where
// non-immediates are on the stack. For non-immediates, we must add an
// additional level of indirection, which allows us to alloca a pointer with
// the right addrspace.
// This is not the same as datum::Datum::root(), which is used to keep copies
// of @ values live for as long as a borrowed pointer to the interior exists.
// In the new GC, we can identify immediates on the stack without difficulty,
// but have trouble knowing where non-immediates are on the stack. For
// non-immediates, we must add an additional level of indirection, which
// allows us to alloca a pointer with the right addrspace.
fn root_for_cleanup(bcx: block, v: ValueRef, t: ty::t)
-> {root: ValueRef, rooted: bool} {
let ccx = bcx.ccx();
......@@ -305,11 +329,12 @@ fn add_clean(bcx: block, val: ValueRef, t: ty::t) {
let cleanup_type = cleanup_type(bcx.tcx(), t);
do in_scope_cx(bcx) |info| {
vec::push(info.cleanups,
clean(|a| base::drop_ty_root(a, root, rooted, t),
clean(|a| glue::drop_ty_root(a, root, rooted, t),
cleanup_type));
scope_clean_changed(info);
}
}
fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) {
if !ty::type_needs_drop(cx.tcx(), ty) { return; }
debug!("add_clean_temp_immediate(%s, %s, %s)",
......@@ -318,7 +343,7 @@ fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) {
let cleanup_type = cleanup_type(cx.tcx(), ty);
do in_scope_cx(cx) |info| {
vec::push(info.cleanups,
clean_temp(val, |a| base::drop_ty_immediate(a, val, ty),
clean_temp(val, |a| glue::drop_ty_immediate(a, val, ty),
cleanup_type));
scope_clean_changed(info);
}
......@@ -332,15 +357,15 @@ fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) {
let cleanup_type = cleanup_type(bcx.tcx(), t);
do in_scope_cx(bcx) |info| {
vec::push(info.cleanups,
clean_temp(val, |a| base::drop_ty_root(a, root, rooted, t),
clean_temp(val, |a| glue::drop_ty_root(a, root, rooted, t),
cleanup_type));
scope_clean_changed(info);
}
}
fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) {
let free_fn = match heap {
heap_shared => |a| base::trans_free(a, ptr),
heap_exchange => |a| base::trans_unique_free(a, ptr)
heap_shared => |a| glue::trans_free(a, ptr),
heap_exchange => |a| glue::trans_unique_free(a, ptr)
};
do in_scope_cx(cx) |info| {
vec::push(info.cleanups, clean_temp(ptr, free_fn,
......@@ -355,12 +380,13 @@ fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) {
// drop glue checks whether it is zero.
fn revoke_clean(cx: block, val: ValueRef) {
do in_scope_cx(cx) |info| {
do option::iter(vec::position(info.cleanups, |cu| {
match cu {
clean_temp(v, _, _) if v == val => true,
_ => false
}
})) |i| {
let cleanup_pos = vec::position(
info.cleanups,
|cu| match cu {
clean_temp(v, _, _) if v == val => true,
_ => false
});
for cleanup_pos.each |i| {
info.cleanups =
vec::append(vec::slice(info.cleanups, 0u, i),
vec::view(info.cleanups,
......@@ -384,6 +410,7 @@ enum block_kind {
// to an implicit scope, for example, calls introduce an implicit scope in
// which the arguments are evaluated and cleaned up.
block_scope(scope_info),
// A non-scope block is a basic block created as a translation artifact
// from translating code that expresses conditional logic rather than by
// explicit { ... } block structure in the source language. It's called a
......@@ -480,11 +507,20 @@ fn mk_block(llbb: BasicBlockRef, parent: Option<block>, -kind: block_kind,
// First two args are retptr, env
const first_real_arg: uint = 2u;
type result = {bcx: block, val: ValueRef};
type result_t = {bcx: block, val: ValueRef, ty: ty::t};
struct Result {
bcx: block;
val: ValueRef;
}
fn rslt(bcx: block, val: ValueRef) -> Result {
Result {bcx: bcx, val: val}
}
fn rslt(bcx: block, val: ValueRef) -> result {
{bcx: bcx, val: val}
impl Result {
fn unpack(bcx: &mut block) -> ValueRef {
*bcx = self.bcx;
return self.val;
}
}
fn ty_str(tn: type_names, t: TypeRef) -> ~str {
......@@ -510,7 +546,12 @@ fn in_scope_cx(cx: block, f: fn(scope_info)) {
let mut cur = cx;
loop {
match cur.kind {
block_scope(inf) => { f(inf); return; }
block_scope(inf) => {
debug!("in_scope_cx: selected cur=%s (cx=%s)",
cur.to_str(), cx.to_str());
f(inf);
return;
}
_ => ()
}
cur = block_parent(cur);
......@@ -532,9 +573,40 @@ impl block {
pure fn tcx() -> ty::ctxt { self.fcx.ccx.tcx }
pure fn sess() -> session { self.fcx.ccx.sess }
fn node_id_to_str(id: ast::node_id) -> ~str {
ast_map::node_id_to_str(self.tcx().items, id, self.sess().intr())
}
fn expr_to_str(e: @ast::expr) -> ~str {
fmt!("expr(%d: %s)", e.id, expr_to_str(e, self.sess().intr()))
}
fn expr_is_lval(e: @ast::expr) -> bool {
ty::expr_is_lval(self.tcx(), self.ccx().maps.method_map, e)
}
fn expr_kind(e: @ast::expr) -> ty::ExprKind {
ty::expr_kind(self.tcx(), self.ccx().maps.method_map, e)
}
fn def(nid: ast::node_id) -> ast::def {
match self.tcx().def_map.find(nid) {
Some(v) => v,
None => {
self.tcx().sess.bug(fmt!(
"No def associated with node id %?", nid));
}
}
}
fn val_str(val: ValueRef) -> ~str {
val_str(self.ccx().tn, val)
}
fn llty_str(llty: TypeRef) -> ~str {
ty_str(self.ccx().tn, llty)
}
fn ty_to_str(t: ty::t) -> ~str {
ty_to_str(self.tcx(), t)
}
......@@ -954,14 +1026,16 @@ fn C_zero_byte_arr(size: uint) -> ValueRef unsafe {
elts.len() as c_uint);
}
fn C_struct(elts: ~[ValueRef]) -> ValueRef unsafe {
return llvm::LLVMConstStruct(vec::unsafe::to_ptr(elts),
elts.len() as c_uint, False);
fn C_struct(elts: &[ValueRef]) -> ValueRef {
do vec::as_buf(elts) |ptr, len| {
llvm::LLVMConstStruct(ptr, len as c_uint, False)
}
}
fn C_named_struct(T: TypeRef, elts: ~[ValueRef]) -> ValueRef unsafe {
return llvm::LLVMConstNamedStruct(T, vec::unsafe::to_ptr(elts),
elts.len() as c_uint);
fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef {
do vec::as_buf(elts) |ptr, len| {
llvm::LLVMConstNamedStruct(T, ptr, len as c_uint)
}
}
fn C_array(ty: TypeRef, elts: ~[ValueRef]) -> ValueRef unsafe {
......@@ -1100,40 +1174,22 @@ fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] {
}
}
fn field_idx_strict(cx: ty::ctxt, sp: span, ident: ast::ident,
fields: ~[ty::field])
-> uint {
match ty::field_idx(ident, fields) {
None => cx.sess.span_bug(
sp, fmt!("base expr doesn't appear to \
have a field named %s", cx.sess.str_of(ident))),
Some(i) => i
}
}
fn dummy_substs(tps: ~[ty::t]) -> ty::substs {
{self_r: Some(ty::re_bound(ty::br_self)),
self_ty: None,
tps: tps}
}
impl cleantype : cmp::Eq {
pure fn eq(&&other: cleantype) -> bool {
match self {
normal_exit_only => {
match other {
normal_exit_only => true,
_ => false
}
}
normal_exit_and_unwind => {
match other {
normal_exit_and_unwind => true,
_ => false
}
}
}
}
fn struct_field(index: uint) -> [uint]/3 {
//! The GEPi sequence to access a field of a record/struct.
[0, 0, index]
}
fn struct_dtor() -> [uint]/2 {
//! The GEPi sequence to access the dtor of a struct.
[0, 1]
}
//
......
......@@ -59,9 +59,15 @@ fn const_deref(cx: @crate_ctxt, v: ValueRef) -> ValueRef {
v
}
fn const_get_elt(v: ValueRef, u: uint) -> ValueRef {
let u = u;
llvm::LLVMConstExtractValue(v, ptr::addr_of(u), 1 as c_uint)
fn const_get_elt(cx: @crate_ctxt, v: ValueRef, us: &[c_uint]) -> ValueRef {
let r = do vec::as_buf(us) |p, len| {
llvm::LLVMConstExtractValue(v, p, len as c_uint)
};
debug!("const_get_elt(v=%s, us=%?, r=%s)",
val_str(cx.tn, v), us, val_str(cx.tn, r));
return r;
}
fn const_autoderef(cx: @crate_ctxt, ty: ty::t, v: ValueRef)
......@@ -83,7 +89,7 @@ fn const_autoderef(cx: @crate_ctxt, ty: ty::t, v: ValueRef)
fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
let _icx = cx.insn_ctxt("const_expr");
match e.node {
return match e.node {
ast::expr_lit(lit) => consts::const_lit(cx, e, *lit),
ast::expr_binary(b, e1, e2) => {
let te1 = const_expr(cx, e1);
......@@ -156,15 +162,15 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
let bt = ty::expr_ty(cx.tcx, base);
let bv = const_expr(cx, base);
let (bt, bv) = const_autoderef(cx, bt, bv);
let fields = match ty::get(bt).struct {
ty::ty_rec(fs) => fs,
ty::ty_class(did, ref substs) =>
ty::class_items_as_mutable_fields(cx.tcx, did, substs),
_ => cx.sess.span_bug(e.span,
~"field access on unknown type in const"),
};
let ix = field_idx_strict(cx.tcx, e.span, field, fields);
const_get_elt(bv, ix)
do expr::with_field_tys(cx.tcx, bt) |_has_dtor, field_tys| {
let ix = ty::field_idx_strict(cx.tcx, field, field_tys);
// Note: ideally, we'd use `struct_field()` here instead
// of hardcoding [0, ix], but we can't because it yields
// the wrong type and also inserts an extra 0 that is
// not needed in the constant variety:
const_get_elt(cx, bv, [0, ix as c_uint])
}
}
ast::expr_index(base, index) => {
......@@ -189,8 +195,8 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
let llunitty = type_of::type_of(cx, unit_ty);
let unit_sz = shape::llsize_of(cx, llunitty);
(const_deref(cx, const_get_elt(bv, 0)),
llvm::LLVMConstUDiv(const_get_elt(bv, 1),
(const_deref(cx, const_get_elt(cx, bv, [0])),
llvm::LLVMConstUDiv(const_get_elt(cx, bv, [1]),
unit_sz))
},
_ => cx.sess.span_bug(base.span,
......@@ -240,27 +246,27 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
cx.sess.span_err(e.span,
~"const index-expr is out of bounds");
}
const_get_elt(arr, iv as uint)
const_get_elt(cx, arr, [iv as c_uint])
}
ast::expr_cast(base, _) => {
let ety = ty::expr_ty(cx.tcx, e), llty = type_of::type_of(cx, ety);
let basety = ty::expr_ty(cx.tcx, base);
let v = const_expr(cx, base);
match (base::cast_type_kind(basety),
base::cast_type_kind(ety)) {
match (expr::cast_type_kind(basety),
expr::cast_type_kind(ety)) {
(base::cast_integral, base::cast_integral) => {
(expr::cast_integral, expr::cast_integral) => {
let s = if ty::type_is_signed(basety) { True } else { False };
llvm::LLVMConstIntCast(v, llty, s)
}
(base::cast_integral, base::cast_float) => {
(expr::cast_integral, expr::cast_float) => {
if ty::type_is_signed(basety) { llvm::LLVMConstSIToFP(v, llty) }
else { llvm::LLVMConstUIToFP(v, llty) }
}
(base::cast_float, base::cast_float) => {
(expr::cast_float, expr::cast_float) => {
llvm::LLVMConstFPCast(v, llty)
}
(base::cast_float, base::cast_integral) => {
(expr::cast_float, expr::cast_integral) => {
if ty::type_is_signed(ety) { llvm::LLVMConstFPToSI(v, llty) }
else { llvm::LLVMConstFPToUI(v, llty) }
}
......@@ -282,34 +288,25 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
ast::expr_tup(es) => {
C_struct(es.map(|e| const_expr(cx, e)))
}
ast::expr_struct(_, fs, _) => {
ast::expr_rec(fs, None) => {
C_struct([C_struct(
fs.map(|f| const_expr(cx, f.node.expr)))])
}
ast::expr_struct(_, ref fs, _) => {
let ety = ty::expr_ty(cx.tcx, e);
let llty = type_of::type_of(cx, ety);
let class_fields =
match ty::get(ety).struct {
ty::ty_class(clsid, _) =>
ty::lookup_class_fields(cx.tcx, clsid),
_ =>
cx.tcx.sess.span_bug(e.span,
~"didn't resolve to a struct")
};
let mut cs = ~[];
for class_fields.each |class_field| {
let mut found = false;
for fs.each |field| {
if class_field.ident == field.node.ident {
found = true;
vec::push(cs, const_expr(cx, field.node.expr));
let cs = do expr::with_field_tys(cx.tcx, ety) |_hd, field_tys| {
field_tys.map(|field_ty| {
match fs.find(|f| field_ty.ident == f.node.ident) {
Some(f) => const_expr(cx, f.node.expr),
None => {
cx.tcx.sess.span_bug(
e.span, ~"missing struct field");
}
}
}
if !found {
cx.tcx.sess.span_bug(e.span, ~"missing struct field");
}
}
C_named_struct(llty, cs)
}
ast::expr_rec(fs, None) => {
C_struct(fs.map(|f| const_expr(cx, f.node.expr)))
})
};
let llty = type_of::type_of(cx, ety);
C_named_struct(llty, [C_struct(cs)])
}
ast::expr_vec(es, ast::m_imm) => {
let (v, _, _) = const_vec(cx, e, es);
......@@ -364,7 +361,7 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef {
}
_ => cx.sess.span_bug(e.span,
~"bad constant expression type in consts::const_expr")
}
};
}
fn trans_const(ccx: @crate_ctxt, e: @ast::expr, id: ast::node_id) {
......
use lib::llvm::ValueRef;
use common::*;
use datum::*;
use base::*;
fn macros() { include!("macros.rs"); } // FIXME(#3114): Macro import/export.
fn trans_block(bcx: block, b: ast::blk, dest: expr::Dest) -> block {
let _icx = bcx.insn_ctxt("trans_block");
let mut bcx = bcx;
do block_locals(b) |local| {
bcx = alloc_local(bcx, local);
};
for vec::each(b.node.stmts) |s| {
debuginfo::update_source_pos(bcx, b.span);
bcx = trans_stmt(bcx, *s);
}
match b.node.expr {
Some(e) => {
debuginfo::update_source_pos(bcx, e.span);
bcx = expr::trans_into(bcx, e, dest);
}
None => {
assert dest == expr::Ignore || bcx.unreachable;
}
}
return bcx;
}
fn trans_if(bcx: block,
cond: @ast::expr,
thn: ast::blk,
els: Option<@ast::expr>,
dest: expr::Dest)
-> block
{
debug!("trans_if(bcx=%s, cond=%s, thn=%?, dest=%s)",
bcx.to_str(), bcx.expr_to_str(cond), thn.node.id,
dest.to_str(bcx.ccx()));
let _indenter = indenter();
let _icx = bcx.insn_ctxt("trans_if");
let Result {bcx, val: cond_val} =
expr::trans_to_appropriate_llval(bcx, cond);
let then_bcx_in = scope_block(bcx, thn.info(), ~"then");
let else_bcx_in = scope_block(bcx, els.info(), ~"else");
CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb);
debug!("then_bcx_in=%s, else_bcx_in=%s",
then_bcx_in.to_str(), else_bcx_in.to_str());
let then_bcx_out = trans_block(then_bcx_in, thn, dest);
let then_bcx_out = trans_block_cleanups(then_bcx_out,
block_cleanups(then_bcx_in));
// Calling trans_block directly instead of trans_expr
// because trans_expr will create another scope block
// context for the block, but we've already got the
// 'else' context
let else_bcx_out = match els {
Some(elexpr) => {
match elexpr.node {
ast::expr_if(_, _, _) => {
let elseif_blk = ast_util::block_from_expr(elexpr);
trans_block(else_bcx_in, elseif_blk, dest)
}
ast::expr_block(blk) => {
trans_block(else_bcx_in, blk, dest)
}
// would be nice to have a constraint on ifs
_ => bcx.tcx().sess.bug(~"strange alternative in if")
}
}
_ => else_bcx_in
};
let else_bcx_out = trans_block_cleanups(else_bcx_out,
block_cleanups(else_bcx_in));
return join_blocks(bcx, ~[then_bcx_out, else_bcx_out]);
}
fn join_blocks(parent_bcx: block, in_cxs: ~[block]) -> block {
let out = sub_block(parent_bcx, ~"join");
let mut reachable = false;
for vec::each(in_cxs) |bcx| {
if !bcx.unreachable {
Br(bcx, out.llbb);
reachable = true;
}
}
if !reachable {
Unreachable(out);
}
return out;
}
fn trans_while(bcx: block, cond: @ast::expr, body: ast::blk)
-> block {
let _icx = bcx.insn_ctxt("trans_while");
let next_bcx = sub_block(bcx, ~"while next");
// bcx
// |
// loop_bcx
// |
// cond_bcx_in <--------+
// | |
// cond_bcx_out |
// | | |
// | body_bcx_in |
// +------+ | |
// | body_bcx_out --+
// next_bcx
let loop_bcx = loop_scope_block(bcx, next_bcx, ~"`while`", body.info());
let cond_bcx_in = scope_block(loop_bcx, cond.info(), ~"while loop cond");
let body_bcx_in = scope_block(loop_bcx, body.info(), ~"while loop body");
Br(bcx, loop_bcx.llbb);
Br(loop_bcx, cond_bcx_in.llbb);
// compile the condition
let Result {bcx: cond_bcx_out, val: cond_val} =
expr::trans_to_appropriate_llval(cond_bcx_in, cond);
let cond_bcx_out =
trans_block_cleanups(cond_bcx_out, block_cleanups(cond_bcx_in));
CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, next_bcx.llbb);
// loop body:
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
cleanup_and_Br(body_bcx_out, body_bcx_in, cond_bcx_in.llbb);
return next_bcx;
}
fn trans_loop(bcx:block, body: ast::blk) -> block {
let _icx = bcx.insn_ctxt("trans_loop");
let next_bcx = sub_block(bcx, ~"next");
let body_bcx_in = loop_scope_block(bcx, next_bcx, ~"`loop`", body.info());
Br(bcx, body_bcx_in.llbb);
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
cleanup_and_Br(body_bcx_out, body_bcx_in, body_bcx_in.llbb);
return next_bcx;
}
fn trans_log(log_ex: @ast::expr,
lvl: @ast::expr,
bcx: block,
e: @ast::expr) -> block
{
let _icx = bcx.insn_ctxt("trans_log");
let ccx = bcx.ccx();
let mut bcx = bcx;
if ty::type_is_bot(expr_ty(bcx, lvl)) {
return expr::trans_into(bcx, lvl, expr::Ignore);
}
let modpath = vec::append(
~[path_mod(ccx.sess.ident_of(ccx.link_meta.name))],
vec::filter(bcx.fcx.path, |e|
match e { path_mod(_) => true, _ => false }
));
let modname = path_str(ccx.sess, modpath);
let global = if ccx.module_data.contains_key(modname) {
ccx.module_data.get(modname)
} else {
let s = link::mangle_internal_name_by_path_and_seq(
ccx, modpath, ~"loglevel");
let global = str::as_c_str(s, |buf| {
llvm::LLVMAddGlobal(ccx.llmod, T_i32(), buf)
});
llvm::LLVMSetGlobalConstant(global, False);
llvm::LLVMSetInitializer(global, C_null(T_i32()));
lib::llvm::SetLinkage(global, lib::llvm::InternalLinkage);
ccx.module_data.insert(modname, global);
global
};
let current_level = Load(bcx, global);
let level = unpack_result!(bcx, {
do with_scope_result(bcx, lvl.info(), ~"level") |bcx| {
expr::trans_to_appropriate_llval(bcx, lvl)
}
});
let llenabled = ICmp(bcx, lib::llvm::IntUGE, current_level, level);
do with_cond(bcx, llenabled) |bcx| {
do with_scope(bcx, log_ex.info(), ~"log") |bcx| {
let mut bcx = bcx;
// Translate the value to be logged
let val_datum = unpack_datum!(bcx, expr::trans_to_datum(bcx, e));
let tydesc = get_tydesc_simple(ccx, val_datum.ty);
// Call the polymorphic log function
let val = val_datum.to_ref_llval(bcx);
let val = PointerCast(bcx, val, T_ptr(T_i8()));
Call(bcx, ccx.upcalls.log_type, [tydesc, val, level]);
bcx
}
}
}
fn trans_break_cont(bcx: block, to_end: bool)
-> block {
let _icx = bcx.insn_ctxt("trans_break_cont");
// Locate closest loop block, outputting cleanup as we go.
let mut unwind = bcx;
let mut target;
loop {
match unwind.kind {
block_scope({loop_break: Some(brk), _}) => {
target = if to_end {
brk
} else {
unwind
};
break;
}
_ => ()
}
unwind = match unwind.parent {
Some(bcx) => bcx,
// This is a return from a loop body block
None => {
Store(bcx, C_bool(!to_end), bcx.fcx.llretptr);
cleanup_and_leave(bcx, None, Some(bcx.fcx.llreturn));
Unreachable(bcx);
return bcx;
}
};
}
cleanup_and_Br(bcx, unwind, target.llbb);
Unreachable(bcx);
return bcx;
}
fn trans_break(bcx: block) -> block {
return trans_break_cont(bcx, true);
}
fn trans_cont(bcx: block) -> block {
return trans_break_cont(bcx, false);
}
fn trans_ret(bcx: block, e: Option<@ast::expr>) -> block {
let _icx = bcx.insn_ctxt("trans_ret");
let mut bcx = bcx;
let retptr = match copy bcx.fcx.loop_ret {
Some({flagptr, retptr}) => {
// This is a loop body return. Must set continue flag (our retptr)
// to false, return flag to true, and then store the value in the
// parent's retptr.
Store(bcx, C_bool(true), flagptr);
Store(bcx, C_bool(false), bcx.fcx.llretptr);
match e {
Some(x) => PointerCast(bcx, retptr,
T_ptr(type_of(bcx.ccx(), expr_ty(bcx, x)))),
None => retptr
}
}
None => bcx.fcx.llretptr
};
match e {
Some(x) => {
bcx = expr::trans_into(bcx, x, expr::SaveIn(retptr));
}
_ => ()
}
cleanup_and_leave(bcx, None, Some(bcx.fcx.llreturn));
Unreachable(bcx);
return bcx;
}
fn trans_check_expr(bcx: block, chk_expr: @ast::expr,
pred_expr: @ast::expr, s: ~str) -> block {
let _icx = bcx.insn_ctxt("trans_check_expr");
let expr_str = s + ~" " + expr_to_str(pred_expr, bcx.ccx().sess.intr())
+ ~" failed";
let Result {bcx, val} = {
do with_scope_result(bcx, chk_expr.info(), ~"check") |bcx| {
expr::trans_to_appropriate_llval(bcx, pred_expr)
}
};
do with_cond(bcx, Not(bcx, val)) |bcx| {
trans_fail(bcx, Some(pred_expr.span), expr_str)
}
}
fn trans_fail_expr(bcx: block,
sp_opt: Option<span>,
fail_expr: Option<@ast::expr>) -> block {
let _icx = bcx.insn_ctxt("trans_fail_expr");
let mut bcx = bcx;
match fail_expr {
Some(arg_expr) => {
let ccx = bcx.ccx(), tcx = ccx.tcx;
let arg_datum = unpack_datum!(
bcx, expr::trans_to_datum(bcx, arg_expr));
if ty::type_is_str(arg_datum.ty) {
let (lldata, _lllen) = arg_datum.get_base_and_len(bcx);
return trans_fail_value(bcx, sp_opt, lldata);
} else if bcx.unreachable || ty::type_is_bot(arg_datum.ty) {
return bcx;
} else {
bcx.sess().span_bug(
arg_expr.span, ~"fail called with unsupported type " +
ppaux::ty_to_str(tcx, arg_datum.ty));
}
}
_ => return trans_fail(bcx, sp_opt, ~"explicit failure")
}
}
fn trans_fail(bcx: block, sp_opt: Option<span>, fail_str: ~str)
-> block
{
let _icx = bcx.insn_ctxt("trans_fail");
let V_fail_str = C_cstr(bcx.ccx(), fail_str);
return trans_fail_value(bcx, sp_opt, V_fail_str);
}
fn trans_fail_value(bcx: block, sp_opt: Option<span>, V_fail_str: ValueRef)
-> block
{
let _icx = bcx.insn_ctxt("trans_fail_value");
let ccx = bcx.ccx();
let {V_filename, V_line} = match sp_opt {
Some(sp) => {
let sess = bcx.sess();
let loc = codemap::lookup_char_pos(sess.parse_sess.cm, sp.lo);
{V_filename: C_cstr(bcx.ccx(), loc.file.name),
V_line: loc.line as int}
}
None => {
{V_filename: C_cstr(bcx.ccx(), ~"<runtime>"),
V_line: 0}
}
};
let V_str = PointerCast(bcx, V_fail_str, T_ptr(T_i8()));
let V_filename = PointerCast(bcx, V_filename, T_ptr(T_i8()));
let args = ~[V_str, V_filename, C_int(ccx, V_line)];
let bcx = callee::trans_rtcall(bcx, ~"fail", args, expr::Ignore);
Unreachable(bcx);
return bcx;
}
此差异已折叠。
此差异已折叠。
......@@ -17,6 +17,9 @@
use type_of::*;
use std::map::hashmap;
use util::ppaux::ty_to_str;
use datum::*;
use callee::*;
use expr::{Dest, Ignore};
export link_name, trans_foreign_mod, register_foreign_fn, trans_foreign_fn,
trans_intrinsic;
......@@ -87,8 +90,8 @@ fn ty_align(ty: TypeRef) -> uint {
Double => 8,
Struct => {
do vec::foldl(0, struct_tys(ty)) |a, t| {
uint::max(a, ty_align(t))
}
uint::max(a, ty_align(t))
}
}
Array => {
let elt = llvm::LLVMGetElementType(ty);
......@@ -785,210 +788,209 @@ fn build_ret(bcx: block, _tys: @c_stack_tys,
fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item,
path: ast_map::path, substs: param_substs,
ref_id: Option<ast::node_id>) {
ref_id: Option<ast::node_id>)
{
debug!("trans_intrinsic(item.ident=%s)", ccx.sess.str_of(item.ident));
let fcx = new_fn_ctxt_w_id(ccx, path, decl, item.id,
Some(substs), Some(item.span));
let mut bcx = top_scope_block(fcx, None), lltop = bcx.llbb;
match ccx.sess.str_of(item.ident) {
~"atomic_xchg" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xchg_acq" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xchg_rel" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd_acq" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd_rel" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub_acq" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub_rel" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"size_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llsize_of_real(ccx, lltp_ty)),
fcx.llretptr);
}
~"move_val" => {
let tp_ty = substs.tys[0];
let src = {bcx: bcx,
val: get_param(decl, first_real_arg + 1u),
kind: lv_owned};
bcx = move_val(bcx, DROP_EXISTING,
get_param(decl, first_real_arg),
src,
tp_ty);
}
~"move_val_init" => {
let tp_ty = substs.tys[0];
let src = {bcx: bcx,
val: get_param(decl, first_real_arg + 1u),
kind: lv_owned};
bcx = move_val(bcx, INIT,
get_param(decl, first_real_arg),
src,
tp_ty);
}
~"min_align_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llalign_of_min(ccx, lltp_ty)),
fcx.llretptr);
}
~"pref_align_of"=> {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llalign_of_pref(ccx, lltp_ty)),
fcx.llretptr);
}
~"get_tydesc" => {
let tp_ty = substs.tys[0];
let static_ti = get_tydesc(ccx, tp_ty);
lazily_emit_all_tydesc_glue(ccx, static_ti);
// FIXME (#2712): change this to T_ptr(ccx.tydesc_ty) when the
// core::sys copy of the get_tydesc interface dies off.
let td = PointerCast(bcx, static_ti.tydesc, T_ptr(T_nil()));
Store(bcx, td, fcx.llretptr);
}
~"init" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
if !ty::type_is_nil(tp_ty) {
Store(bcx, C_null(lltp_ty), fcx.llretptr);
~"atomic_xchg" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
}
~"forget" => {}
~"reinterpret_cast" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
let llout_ty = type_of::type_of(ccx, substs.tys[1]);
let tp_sz = shape::llsize_of_real(ccx, lltp_ty),
out_sz = shape::llsize_of_real(ccx, llout_ty);
if tp_sz != out_sz {
let sp = match ccx.tcx.items.get(option::get(ref_id)) {
ast_map::node_expr(e) => e.span,
_ => fail ~"reinterpret_cast or forget has non-expr arg"
};
ccx.sess.span_fatal(
sp, fmt!("reinterpret_cast called on types \
with different size: %s (%u) to %s (%u)",
ty_to_str(ccx.tcx, tp_ty), tp_sz,
ty_to_str(ccx.tcx, substs.tys[1]), out_sz));
~"atomic_xchg_acq" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
if !ty::type_is_nil(substs.tys[1]) {
// NB: Do not use a Load and Store here. This causes massive code
// bloat when reinterpret_cast is used on large structural types.
let llretptr = PointerCast(bcx, fcx.llretptr, T_ptr(T_i8()));
let llcast = get_param(decl, first_real_arg);
let llcast = PointerCast(bcx, llcast, T_ptr(T_i8()));
call_memmove(bcx, llretptr, llcast, llsize_of(ccx, lltp_ty));
~"atomic_xchg_rel" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd_acq" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xadd_rel" => {
let old = AtomicRMW(bcx, lib::llvm::Add,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
SequentiallyConsistent);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub_acq" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Acquire);
Store(bcx, old, fcx.llretptr);
}
~"atomic_xsub_rel" => {
let old = AtomicRMW(bcx, lib::llvm::Sub,
get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
Release);
Store(bcx, old, fcx.llretptr);
}
~"size_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llsize_of_real(ccx, lltp_ty)),
fcx.llretptr);
}
~"move_val" => {
let tp_ty = substs.tys[0];
let src = Datum {val: get_param(decl, first_real_arg + 1u),
ty: tp_ty, mode: ByRef, source: FromLvalue};
bcx = src.move_to(bcx, DROP_EXISTING,
get_param(decl, first_real_arg));
}
~"move_val_init" => {
let tp_ty = substs.tys[0];
let src = Datum {val: get_param(decl, first_real_arg + 1u),
ty: tp_ty, mode: ByRef, source: FromLvalue};
bcx = src.move_to(bcx, INIT, get_param(decl, first_real_arg));
}
~"min_align_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llalign_of_min(ccx, lltp_ty)),
fcx.llretptr);
}
~"pref_align_of"=> {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Store(bcx, C_uint(ccx, shape::llalign_of_pref(ccx, lltp_ty)),
fcx.llretptr);
}
~"get_tydesc" => {
let tp_ty = substs.tys[0];
let static_ti = get_tydesc(ccx, tp_ty);
glue::lazily_emit_all_tydesc_glue(ccx, static_ti);
// FIXME (#2712): change this to T_ptr(ccx.tydesc_ty) when the
// core::sys copy of the get_tydesc interface dies off.
let td = PointerCast(bcx, static_ti.tydesc, T_ptr(T_nil()));
Store(bcx, td, fcx.llretptr);
}
~"init" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
if !ty::type_is_nil(tp_ty) {
Store(bcx, C_null(lltp_ty), fcx.llretptr);
}
}
~"forget" => {}
~"reinterpret_cast" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
let llout_ty = type_of::type_of(ccx, substs.tys[1]);
let tp_sz = shape::llsize_of_real(ccx, lltp_ty),
out_sz = shape::llsize_of_real(ccx, llout_ty);
if tp_sz != out_sz {
let sp = match ccx.tcx.items.get(option::get(ref_id)) {
ast_map::node_expr(e) => e.span,
_ => fail ~"reinterpret_cast or forget has non-expr arg"
};
ccx.sess.span_fatal(
sp, fmt!("reinterpret_cast called on types \
with different size: %s (%u) to %s (%u)",
ty_to_str(ccx.tcx, tp_ty), tp_sz,
ty_to_str(ccx.tcx, substs.tys[1]), out_sz));
}
if !ty::type_is_nil(substs.tys[1]) {
// NB: Do not use a Load and Store here. This causes
// massive code bloat when reinterpret_cast is used on
// large structural types.
let llretptr = PointerCast(bcx, fcx.llretptr, T_ptr(T_i8()));
let llcast = get_param(decl, first_real_arg);
let llcast = PointerCast(bcx, llcast, T_ptr(T_i8()));
call_memmove(bcx, llretptr, llcast, llsize_of(ccx, lltp_ty));
}
}
~"addr_of" => {
Store(bcx, get_param(decl, first_real_arg), fcx.llretptr);
}
~"needs_drop" => {
let tp_ty = substs.tys[0];
Store(bcx, C_bool(ty::type_needs_drop(ccx.tcx, tp_ty)),
fcx.llretptr);
}
~"visit_tydesc" => {
let td = get_param(decl, first_real_arg);
let visitor = get_param(decl, first_real_arg + 1u);
let td = PointerCast(bcx, td, T_ptr(ccx.tydesc_type));
call_tydesc_glue_full(bcx, visitor, td,
abi::tydesc_field_visit_glue, None);
}
~"frame_address" => {
let frameaddress = ccx.intrinsics.get(~"llvm.frameaddress");
let frameaddress_val = Call(bcx, frameaddress, ~[C_i32(0i32)]);
let fty = ty::mk_fn(bcx.tcx(), {
purity: ast::impure_fn,
proto:
ty::proto_vstore(ty::vstore_slice(
ty::re_bound(ty::br_anon(0)))),
bounds: @~[],
inputs: ~[{
mode: ast::expl(ast::by_val),
ty: ty::mk_imm_ptr(
bcx.tcx(),
ty::mk_mach_uint(bcx.tcx(), ast::ty_u8))
}],
output: ty::mk_nil(bcx.tcx()),
ret_style: ast::return_val
});
bcx = trans_call_inner(bcx, None, fty, ty::mk_nil(bcx.tcx()),
|bcx| lval_no_env(
bcx,
get_param(decl, first_real_arg),
lv_temporary),
arg_vals(~[frameaddress_val]), ignore);
}
~"morestack_addr" => {
// XXX This is a hack to grab the address of this particular
// native function. There should be a general in-language
// way to do this
let llfty = type_of_fn(bcx.ccx(), ~[], ty::mk_nil(bcx.tcx()));
let morestack_addr = decl_cdecl_fn(
bcx.ccx().llmod, ~"__morestack", llfty);
let morestack_addr = PointerCast(bcx, morestack_addr, T_ptr(T_nil()));
Store(bcx, morestack_addr, fcx.llretptr);
}
_ => {
// Could we make this an enum rather than a string? does it get
// checked earlier?
ccx.sess.span_bug(item.span, ~"unknown intrinsic");
}
~"addr_of" => {
Store(bcx, get_param(decl, first_real_arg), fcx.llretptr);
}
~"needs_drop" => {
let tp_ty = substs.tys[0];
Store(bcx, C_bool(ty::type_needs_drop(ccx.tcx, tp_ty)),
fcx.llretptr);
}
~"visit_tydesc" => {
let td = get_param(decl, first_real_arg);
let visitor = get_param(decl, first_real_arg + 1u);
let td = PointerCast(bcx, td, T_ptr(ccx.tydesc_type));
glue::call_tydesc_glue_full(bcx, visitor, td,
abi::tydesc_field_visit_glue, None);
}
~"frame_address" => {
let frameaddress = ccx.intrinsics.get(~"llvm.frameaddress");
let frameaddress_val = Call(bcx, frameaddress, ~[C_i32(0i32)]);
let fty = ty::mk_fn(bcx.tcx(), {
purity: ast::impure_fn,
proto:
ty::proto_vstore(ty::vstore_slice(
ty::re_bound(ty::br_anon(0)))),
bounds: @~[],
inputs: ~[{
mode: ast::expl(ast::by_val),
ty: ty::mk_imm_ptr(
bcx.tcx(),
ty::mk_mach_uint(bcx.tcx(), ast::ty_u8))
}],
output: ty::mk_nil(bcx.tcx()),
ret_style: ast::return_val
});
let datum = Datum {val: get_param(decl, first_real_arg),
mode: ByRef, ty: fty, source: FromLvalue};
bcx = trans_call_inner(
bcx, None, fty, ty::mk_nil(bcx.tcx()),
|bcx| Callee {bcx: bcx, data: Closure(datum)},
ArgVals(~[frameaddress_val]), Ignore);
}
~"morestack_addr" => {
// XXX This is a hack to grab the address of this particular
// native function. There should be a general in-language
// way to do this
let llfty = type_of_fn(bcx.ccx(), ~[], ty::mk_nil(bcx.tcx()));
let morestack_addr = decl_cdecl_fn(
bcx.ccx().llmod, ~"__morestack", llfty);
let morestack_addr = PointerCast(bcx, morestack_addr,
T_ptr(T_nil()));
Store(bcx, morestack_addr, fcx.llretptr);
}
_ => {
// Could we make this an enum rather than a string? does it get
// checked earlier?
ccx.sess.span_bug(item.span, ~"unknown intrinsic");
}
}
build_return(bcx);
finish_fn(fcx, lltop);
......
此差异已折叠。
此差异已折叠。
use common::*;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::ast_map::{path, path_mod, path_name};
use base::{trans_item, get_item_val, self_arg, trans_fn,
impl_self, get_insn_ctxt};
fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id)
-> ast::def_id
{
let _icx = ccx.insn_ctxt("maybe_instantiate_inline");
match ccx.external.find(fn_id) {
Some(Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline(%s): already inline as node id %d",
ty::item_path_str(ccx.tcx, fn_id), node_id);
local_def(node_id)
}
Some(None) => fn_id, // Not inlinable
None => { // Not seen yet
match csearch::maybe_get_item_ast(
ccx.tcx, fn_id,
|a,b,c,d| {
astencode::decode_inlined_item(a, b, ccx.maps, c, d)
}) {
csearch::not_found => {
ccx.external.insert(fn_id, None);
fn_id
}
csearch::found(ast::ii_item(item)) => {
ccx.external.insert(fn_id, Some(item.id));
trans_item(ccx, *item);
local_def(item.id)
}
csearch::found(ast::ii_ctor(ctor, _, _, _)) => {
ccx.external.insert(fn_id, Some(ctor.node.id));
local_def(ctor.node.id)
}
csearch::found(ast::ii_foreign(item)) => {
ccx.external.insert(fn_id, Some(item.id));
local_def(item.id)
}
csearch::found_parent(parent_id, ast::ii_item(item)) => {
ccx.external.insert(parent_id, Some(item.id));
let mut my_id = 0;
match item.node {
ast::item_enum(_, _) => {
let vs_here = ty::enum_variants(ccx.tcx, local_def(item.id));
let vs_there = ty::enum_variants(ccx.tcx, parent_id);
do vec::iter2(*vs_here, *vs_there) |here, there| {
if there.id == fn_id { my_id = here.id.node; }
ccx.external.insert(there.id, Some(here.id.node));
}
}
_ => ccx.sess.bug(~"maybe_instantiate_inline: item has a \
non-enum parent")
}
trans_item(ccx, *item);
local_def(my_id)
}
csearch::found_parent(_, _) => {
ccx.sess.bug(~"maybe_get_item_ast returned a found_parent \
with a non-item parent");
}
csearch::found(ast::ii_method(impl_did, mth)) => {
ccx.external.insert(fn_id, Some(mth.id));
let {bounds: impl_bnds, region_param: _, ty: impl_ty} =
ty::lookup_item_type(ccx.tcx, impl_did);
if (*impl_bnds).len() + mth.tps.len() == 0u {
let llfn = get_item_val(ccx, mth.id);
let path = vec::append(
ty::item_path(ccx.tcx, impl_did),
~[path_name(mth.ident)]);
trans_fn(ccx, path, mth.decl, mth.body,
llfn, impl_self(impl_ty), None, mth.id);
}
local_def(mth.id)
}
csearch::found(ast::ii_dtor(dtor, _, _, _)) => {
ccx.external.insert(fn_id, Some(dtor.node.id));
local_def(dtor.node.id)
}
}
}
}
}
{
macro_rules! unpack_datum(
($bcx: ident, $inp: expr) => (
{
let db = $inp;
$bcx = db.bcx;
db.datum
}
)
);
macro_rules! unpack_result(
($bcx: ident, $inp: expr) => (
{
let db = $inp;
$bcx = db.bcx;
db.val
}
)
);
macro_rules! trace_span(
($bcx: ident, $sp: expr, $str: expr) => (
{
let bcx = $bcx;
if bcx.sess().trace() {
trans_trace(bcx, Some($sp), $str);
}
}
)
);
macro_rules! trace(
($bcx: ident, $str: expr) => (
{
let bcx = $bcx;
if bcx.sess().trace() {
trans_trace(bcx, None, $str);
}
}
)
);
}
\ No newline at end of file
此差异已折叠。
......@@ -144,7 +144,7 @@ fn traverse_public_item(cx: ctx, item: @item) {
}
fn mk_ty_visitor() -> visit::vt<ctx> {
visit::mk_vt(@{visit_ty: traverse_ty,.. *visit::default_visitor()})
visit::mk_vt(@{visit_ty: traverse_ty, ..*visit::default_visitor()})
}
fn traverse_ty(ty: @ty, cx: ctx, v: visit::vt<ctx>) {
......@@ -200,7 +200,7 @@ fn traverse_item(i: @item, cx: ctx, _v: visit::vt<ctx>) {
visit::visit_block(body, cx, visit::mk_vt(@{
visit_expr: traverse_expr,
visit_item: traverse_item,
.. *visit::default_visitor()
..*visit::default_visitor()
}));
}
......@@ -219,7 +219,7 @@ fn traverse_all_resources_and_impls(cx: ctx, crate_mod: _mod) {
_ => ()
}
},
.. *visit::default_visitor()
..*visit::default_visitor()
}));
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -225,12 +225,13 @@ fn require_same_types(
}
match infer::mk_eqty(l_infcx, t1_is_expected, span, t1, t2) {
result::Ok(()) => true,
result::Err(ref terr) => {
l_tcx.sess.span_err(span, msg() + ~": " +
ty::type_err_to_str(l_tcx, terr));
false
}
result::Ok(()) => true,
result::Err(ref terr) => {
l_tcx.sess.span_err(span, msg() + ~": " +
ty::type_err_to_str(l_tcx, terr));
ty::note_and_explain_type_err(l_tcx, terr);
false
}
}
}
......
此差异已折叠。
......@@ -35,6 +35,13 @@ use back_ = back;
mod middle {
mod trans {
mod inline;
mod monomorphize;
mod controlflow;
mod glue;
mod datum;
mod callee;
mod expr;
mod common;
mod consts;
mod type_of;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -7,7 +7,7 @@ fn foo(cond: fn() -> bool, box: fn() -> @int) {
// Here we complain because the resulting region
// of this borrow is the fn body as a whole.
y = borrow(x); //~ ERROR illegal borrow: managed value would have to be rooted
y = borrow(x); //~ ERROR illegal borrow: cannot root managed value long enough
assert *x == *y;
if cond() { break; }
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册