提交 0309af45 编写于 作者: G Graydon Hoare

Put unique allocs in managed heap when they might contain managed boxes.

上级 cec1f38c
......@@ -111,45 +111,102 @@ struct Task {
* This runs at task death to free all boxes.
*/
struct AnnihilateStats {
n_total_boxes: uint,
n_unique_boxes: uint,
n_bytes_freed: uint
}
unsafe fn each_live_alloc(f: fn(box: *mut BoxRepr, uniq: bool) -> bool) {
use managed;
let task: *Task = transmute(rustrt::rust_get_task());
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box != mut_null() {
let next = transmute(copy (*box).header.next);
let uniq =
(*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE;
if ! f(box, uniq) {
break
}
box = next
}
}
#[cfg(unix)]
fn debug_mem() -> bool {
use os;
use libc;
do os::as_c_charp("RUST_DEBUG_MEM") |p| {
unsafe { libc::getenv(p) != null() }
}
}
#[cfg(windows)]
fn debug_mem() -> bool {
false
}
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
#[cfg(notest)]
#[lang="annihilate"]
pub unsafe fn annihilate() {
use rt::rt_free;
use io::WriterUtil;
use io;
use libc;
use sys;
use managed;
let task: *Task = transmute(rustrt::rust_get_task());
let mut stats = AnnihilateStats {
n_total_boxes: 0,
n_unique_boxes: 0,
n_bytes_freed: 0
};
// Pass 1: Make all boxes immortal.
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box != mut_null() {
debug!("making box immortal: %x", box as uint);
(*box).header.ref_count = 0x77777777;
box = transmute(copy (*box).header.next);
for each_live_alloc |box, uniq| {
stats.n_total_boxes += 1;
if uniq {
stats.n_unique_boxes += 1;
} else {
(*box).header.ref_count = managed::raw::RC_IMMORTAL;
}
}
// Pass 2: Drop all boxes.
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box != mut_null() {
debug!("calling drop glue for box: %x", box as uint);
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0));
drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data));
box = transmute(copy (*box).header.next);
for each_live_alloc |box, uniq| {
if !uniq {
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0));
drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data));
}
}
// Pass 3: Free all boxes.
loop {
let box = (*task).boxed_region.live_allocs;
if box == null() { break; }
let mut box: *mut BoxRepr = transmute(copy box);
assert (*box).header.prev == null();
debug!("freeing box: %x", box as uint);
rt_free(transmute(box));
for each_live_alloc |box, uniq| {
if !uniq {
stats.n_bytes_freed +=
(*((*box).header.type_desc)).size
+ sys::size_of::<BoxRepr>();
rt_free(transmute(box));
}
}
if debug_mem() {
// We do logging here w/o allocation.
let dbg = libc::STDERR_FILENO as io::fd_t;
dbg.write_str("annihilator stats:");
dbg.write_str("\n total_boxes: ");
dbg.write_uint(stats.n_total_boxes);
dbg.write_str("\n unique_boxes: ");
dbg.write_uint(stats.n_unique_boxes);
dbg.write_str("\n bytes_freed: ");
dbg.write_uint(stats.n_bytes_freed);
dbg.write_str("\n");
}
}
......
......@@ -16,7 +16,13 @@
use prelude::*;
use ptr;
pub mod raw {
pub const RC_EXCHANGE_UNIQUE : uint = (-1) as uint;
pub const RC_MANAGED_UNIQUE : uint = (-2) as uint;
pub const RC_IMMORTAL : uint = 0x77777777;
use intrinsic::TyDesc;
pub struct BoxHeaderRepr {
......
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
......@@ -31,9 +31,14 @@
#[abi = "cdecl"]
pub extern mod rustrt {
// These names are terrible. reserve_shared applies
// to ~[] and reserve_shared_actual applies to @[].
unsafe fn vec_reserve_shared(++t: *sys::TypeDesc,
++v: **raw::VecRepr,
++n: libc::size_t);
unsafe fn vec_reserve_shared_actual(++t: *sys::TypeDesc,
++v: **raw::VecRepr,
++n: libc::size_t);
}
/// Returns true if a vector contains no elements
......@@ -59,11 +64,17 @@ unsafe fn vec_reserve_shared(++t: *sys::TypeDesc,
*/
pub fn reserve<T>(v: &mut ~[T], n: uint) {
// Only make the (slow) call into the runtime if we have to
use managed;
if capacity(v) < n {
unsafe {
let ptr: **raw::VecRepr = cast::transmute(v);
rustrt::vec_reserve_shared(sys::get_type_desc::<T>(),
ptr, n as size_t);
let td = sys::get_type_desc::<T>();
if ((**ptr).box_header.ref_count ==
managed::raw::RC_MANAGED_UNIQUE) {
rustrt::vec_reserve_shared_actual(td, ptr, n as size_t);
} else {
rustrt::vec_reserve_shared(td, ptr, n as size_t);
}
}
}
}
......
......@@ -286,7 +286,7 @@ pub fn malloc_raw_dyn(bcx: block,
let ccx = bcx.ccx();
let (mk_fn, langcall) = match heap {
heap_shared => {
heap_managed | heap_managed_unique => {
(ty::mk_imm_box, bcx.tcx().lang_items.malloc_fn())
}
heap_exchange => {
......@@ -310,7 +310,9 @@ pub fn malloc_raw_dyn(bcx: block,
langcall,
~[tydesc, size],
expr::SaveIn(rval));
return rslt(bcx, PointerCast(bcx, Load(bcx, rval), llty));
let r = rslt(bcx, PointerCast(bcx, Load(bcx, rval), llty));
maybe_set_managed_unique_rc(r.bcx, r.val, heap);
r
}
/**
......@@ -364,11 +366,31 @@ pub fn malloc_general(bcx: block, t: ty::t, heap: heap)
}
pub fn malloc_boxed(bcx: block, t: ty::t)
-> MallocResult {
malloc_general(bcx, t, heap_shared)
malloc_general(bcx, t, heap_managed)
}
pub fn heap_for_unique(bcx: block, t: ty::t) -> heap {
if ty::type_contents(bcx.tcx(), t).contains_managed() {
heap_managed_unique
} else {
heap_exchange
}
}
pub fn maybe_set_managed_unique_rc(bcx: block, bx: ValueRef, heap: heap) {
if heap == heap_managed_unique {
// In cases where we are looking at a unique-typed allocation in the
// managed heap (thus have refcount 1 from the managed allocator),
// such as a ~(@foo) or such. These need to have their refcount forced
// to -2 so the annihilator ignores them.
let rc = GEPi(bcx, bx, [0u, abi::box_field_refcnt]);
Store(bcx, C_int(bcx.ccx(), -2), rc);
}
}
pub fn malloc_unique(bcx: block, t: ty::t)
-> MallocResult {
malloc_general(bcx, t, heap_exchange)
malloc_general(bcx, t, heap_for_unique(bcx, t))
}
// Type descriptor and type glue stuff
......
......@@ -178,10 +178,10 @@ fn nuke_ref_count(bcx: block, llbox: ValueRef) {
// Allocate and initialize the box:
match sigil {
ast::ManagedSigil => {
malloc_raw(bcx, cdata_ty, heap_shared)
malloc_raw(bcx, cdata_ty, heap_managed)
}
ast::OwnedSigil => {
malloc_raw(bcx, cdata_ty, heap_exchange)
malloc_raw(bcx, cdata_ty, heap_for_unique(bcx, cdata_ty))
}
ast::BorrowedSigil => {
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
......@@ -574,7 +574,7 @@ pub fn make_opaque_cbox_free_glue(
// Free the ty descr (if necc) and the box itself
match sigil {
ast::ManagedSigil => glue::trans_free(bcx, cbox),
ast::OwnedSigil => glue::trans_unique_free(bcx, cbox),
ast::OwnedSigil => glue::trans_exchange_free(bcx, cbox),
ast::BorrowedSigil => {
bcx.sess().bug(~"impossible")
}
......
......@@ -332,8 +332,10 @@ pub fn warn_not_to_commit(ccx: @CrateContext, msg: ~str) {
}
// Heap selectors. Indicate which heap something should go on.
#[deriving_eq]
pub enum heap {
heap_shared,
heap_managed,
heap_managed_unique,
heap_exchange,
}
......@@ -458,12 +460,12 @@ pub fn add_clean_frozen_root(bcx: block, val: ValueRef, t: ty::t) {
}
pub fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) {
let free_fn = match heap {
heap_shared => {
heap_managed | heap_managed_unique => {
let f: @fn(block) -> block = |a| glue::trans_free(a, ptr);
f
}
heap_exchange => {
let f: @fn(block) -> block = |a| glue::trans_unique_free(a, ptr);
let f: @fn(block) -> block = |a| glue::trans_exchange_free(a, ptr);
f
}
};
......
......@@ -414,11 +414,12 @@ fn trans_rvalue_datum_unadjusted(bcx: block, expr: @ast::expr) -> DatumBlock {
match expr.node {
ast::expr_vstore(contents, ast::expr_vstore_box) |
ast::expr_vstore(contents, ast::expr_vstore_mut_box) => {
return tvec::trans_uniq_or_managed_vstore(bcx, heap_shared,
return tvec::trans_uniq_or_managed_vstore(bcx, heap_managed,
expr, contents);
}
ast::expr_vstore(contents, ast::expr_vstore_uniq) => {
return tvec::trans_uniq_or_managed_vstore(bcx, heap_exchange,
let heap = heap_for_unique(bcx, expr_ty(bcx, contents));
return tvec::trans_uniq_or_managed_vstore(bcx, heap,
expr, contents);
}
ast::expr_lit(lit) => {
......@@ -1272,10 +1273,12 @@ fn trans_unary_datum(bcx: block,
immediate_rvalue_bcx(bcx, llneg, un_ty)
}
ast::box(_) => {
trans_boxed_expr(bcx, un_ty, sub_expr, sub_ty, heap_shared)
trans_boxed_expr(bcx, un_ty, sub_expr, sub_ty,
heap_managed)
}
ast::uniq(_) => {
trans_boxed_expr(bcx, un_ty, sub_expr, sub_ty, heap_exchange)
let heap = heap_for_unique(bcx, un_ty);
trans_boxed_expr(bcx, un_ty, sub_expr, sub_ty, heap)
}
ast::deref => {
bcx.sess().bug(~"deref expressions should have been \
......
......@@ -37,8 +37,8 @@ pub fn trans_free(cx: block, v: ValueRef) -> block {
expr::Ignore)
}
pub fn trans_unique_free(cx: block, v: ValueRef) -> block {
let _icx = cx.insn_ctxt("trans_unique_free");
pub fn trans_exchange_free(cx: block, v: ValueRef) -> block {
let _icx = cx.insn_ctxt("trans_exchange_free");
callee::trans_rtcall_or_lang_call(
cx,
cx.tcx().lang_items.exchange_free_fn(),
......
......@@ -878,7 +878,7 @@ pub fn trans_trait_cast(bcx: block,
let MallocResult {bcx: new_bcx, box: llbox, body: body} =
malloc_boxed(bcx, v_ty);
bcx = new_bcx;
add_clean_free(bcx, llbox, heap_shared);
add_clean_free(bcx, llbox, heap_managed);
bcx = expr::trans_into(bcx, val, SaveIn(body));
revoke_clean(bcx, llbox);
......
......@@ -85,11 +85,13 @@ pub fn alloc_raw(bcx: block, unit_ty: ty::t,
base::malloc_general_dyn(bcx, vecbodyty, heap, vecsize);
Store(bcx, fill, GEPi(bcx, body, [0u, abi::vec_elt_fill]));
Store(bcx, alloc, GEPi(bcx, body, [0u, abi::vec_elt_alloc]));
base::maybe_set_managed_unique_rc(bcx, bx, heap);
return rslt(bcx, bx);
}
pub fn alloc_uniq_raw(bcx: block, unit_ty: ty::t,
fill: ValueRef, alloc: ValueRef) -> Result {
alloc_raw(bcx, unit_ty, fill, alloc, heap_exchange)
alloc_raw(bcx, unit_ty, fill, alloc, heap_for_unique(bcx, unit_ty))
}
pub fn alloc_vec(bcx: block,
......@@ -317,13 +319,14 @@ pub fn trans_uniq_or_managed_vstore(bcx: block,
_ => {}
}
}
heap_shared => {}
heap_managed | heap_managed_unique => {}
}
let vt = vec_types_from_expr(bcx, vstore_expr);
let count = elements_required(bcx, content_expr);
let Result {bcx, val} = alloc_vec(bcx, vt.unit_ty, count, heap);
add_clean_free(bcx, val, heap);
let dataptr = get_dataptr(bcx, get_bodyptr(bcx, val));
......
......@@ -30,7 +30,11 @@ pub fn make_free_glue(bcx: block, vptrptr: ValueRef, box_ty: ty::t)
let body_datum = box_datum.box_body(bcx);
let bcx = glue::drop_ty(bcx, body_datum.to_ref_llval(bcx),
body_datum.ty);
glue::trans_unique_free(bcx, box_datum.val)
if ty::type_contents(bcx.tcx(), box_ty).contains_managed() {
glue::trans_free(bcx, box_datum.val)
} else {
glue::trans_exchange_free(bcx, box_datum.val)
}
}
}
......
......@@ -1571,10 +1571,11 @@ pub fn get_element_type(ty: t, i: uint) -> t {
pub pure fn type_is_unique(ty: t) -> bool {
match get(ty).sty {
ty_uniq(_) => return true,
ty_evec(_, vstore_uniq) => true,
ty_estr(vstore_uniq) => true,
_ => return false
ty_uniq(_) |
ty_evec(_, vstore_uniq) |
ty_estr(vstore_uniq) |
ty_opaque_closure_ptr(ast::OwnedSigil) => true,
_ => return false
}
}
......@@ -1799,6 +1800,10 @@ fn is_owned(&self, cx: ctxt) -> bool {
TC_MANAGED + TC_BORROWED_POINTER
}
fn contains_managed(&self) -> bool {
self.intersects(TC_MANAGED)
}
fn is_const(&self, cx: ctxt) -> bool {
!self.intersects(TypeContents::nonconst(cx))
}
......@@ -2083,11 +2088,19 @@ fn tc_ty(cx: ctxt,
TC_ALL
}
ty_trait(_, _, vstore_fixed(_)) |
ty_type |
ty_opaque_closure_ptr(_) |
ty_opaque_box |
ty_unboxed_vec(_) |
ty_opaque_box => TC_MANAGED,
ty_unboxed_vec(mt) => tc_mt(cx, mt, cache),
ty_opaque_closure_ptr(sigil) => {
match sigil {
ast::BorrowedSigil => TC_BORROWED_POINTER,
ast::ManagedSigil => TC_MANAGED,
ast::OwnedSigil => TC_OWNED_CLOSURE
}
}
ty_type => TC_NONE,
ty_trait(_, _, vstore_fixed(_)) => TC_NONE,
ty_err => {
cx.sess.bug(~"Asked to compute contents of fictitious type");
}
......@@ -2229,8 +2242,11 @@ fn type_size(cx: ctxt, ty: t) -> uint {
ty_infer(_) => {
cx.sess.bug(~"Asked to compute kind of a type variable");
}
ty_type | ty_opaque_closure_ptr(_)
| ty_opaque_box | ty_unboxed_vec(_) | ty_err => {
ty_type => 1,
ty_opaque_closure_ptr(_) => 1,
ty_opaque_box => 1,
ty_unboxed_vec(_) => 10,
ty_err => {
cx.sess.bug(~"Asked to compute kind of fictitious type");
}
}
......
......@@ -38,7 +38,10 @@ rust_opaque_box *boxed_region::malloc(type_desc *td, size_t body_size) {
rust_opaque_box *boxed_region::realloc(rust_opaque_box *box,
size_t new_size) {
assert(box->ref_count == 1);
// We also get called on the unique-vec-in-managed-heap path.
assert(box->ref_count == 1 ||
box->ref_count == (size_t)(-2));
size_t total_size = new_size + sizeof(rust_opaque_box);
rust_opaque_box *new_box =
......@@ -47,7 +50,6 @@ rust_opaque_box *boxed_region::realloc(rust_opaque_box *box,
if (new_box->next) new_box->next->prev = new_box;
if (live_allocs == box) live_allocs = new_box;
LOG(rust_get_current_task(), box,
"@realloc()=%p with orig=%p, size %lu==%lu+%lu",
new_box, box, total_size, sizeof(rust_opaque_box), new_size);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册