提交 dee7fa58 编写于 作者: C Corey Richardson 提交者: Alex Crichton

Use `mmap` to map in task stacks and guard page

Also implement caching of stacks.
上级 462f09e9
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
use std::uint; use std::uint;
use std::cast::{transmute, transmute_mut_unsafe, use std::cast::{transmute, transmute_mut_unsafe,
transmute_region, transmute_mut_region}; transmute_region, transmute_mut_region};
use stack::Stack;
use std::unstable::stack; use std::unstable::stack;
use stack::StackSegment;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing // FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we // SSE regs. It would be marginally better not to do this. In C++ we
// use an attribute on a struct. // use an attribute on a struct.
...@@ -41,7 +40,7 @@ pub fn empty() -> Context { ...@@ -41,7 +40,7 @@ pub fn empty() -> Context {
} }
/// Create a new context that will resume execution by running proc() /// Create a new context that will resume execution by running proc()
pub fn new(start: proc(), stack: &mut StackSegment) -> Context { pub fn new(start: proc(), stack: &mut Stack) -> Context {
// The C-ABI function that is the task entry point // The C-ABI function that is the task entry point
// //
// Note that this function is a little sketchy. We're taking a // Note that this function is a little sketchy. We're taking a
...@@ -79,6 +78,7 @@ pub fn new(start: proc(), stack: &mut StackSegment) -> Context { ...@@ -79,6 +78,7 @@ pub fn new(start: proc(), stack: &mut StackSegment) -> Context {
// be passed to the spawn function. Another unfortunate // be passed to the spawn function. Another unfortunate
// allocation // allocation
let start = ~start; let start = ~start;
initialize_call_frame(&mut *regs, initialize_call_frame(&mut *regs,
task_start_wrapper as *c_void, task_start_wrapper as *c_void,
unsafe { transmute(&*start) }, unsafe { transmute(&*start) },
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use std::rt::env; use std::rt::env;
use context::Context; use context::Context;
use stack::{StackPool, StackSegment}; use stack::{StackPool, Stack};
/// A coroutine is nothing more than a (register context, stack) pair. /// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine { pub struct Coroutine {
...@@ -24,7 +24,7 @@ pub struct Coroutine { ...@@ -24,7 +24,7 @@ pub struct Coroutine {
/// ///
/// Servo needs this to be public in order to tell SpiderMonkey /// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds. /// about the stack bounds.
current_stack_segment: StackSegment, current_stack_segment: Stack,
/// Always valid if the task is alive and not running. /// Always valid if the task is alive and not running.
saved_context: Context saved_context: Context
...@@ -39,7 +39,7 @@ pub fn new(stack_pool: &mut StackPool, ...@@ -39,7 +39,7 @@ pub fn new(stack_pool: &mut StackPool,
Some(size) => size, Some(size) => size,
None => env::min_stack() None => env::min_stack()
}; };
let mut stack = stack_pool.take_segment(stack_size); let mut stack = stack_pool.take_stack(stack_size);
let initial_context = Context::new(start, &mut stack); let initial_context = Context::new(start, &mut stack);
Coroutine { Coroutine {
current_stack_segment: stack, current_stack_segment: stack,
...@@ -49,7 +49,7 @@ pub fn new(stack_pool: &mut StackPool, ...@@ -49,7 +49,7 @@ pub fn new(stack_pool: &mut StackPool,
pub fn empty() -> Coroutine { pub fn empty() -> Coroutine {
Coroutine { Coroutine {
current_stack_segment: StackSegment::new(0), current_stack_segment: Stack::new(0),
saved_context: Context::empty() saved_context: Context::empty()
} }
} }
...@@ -57,6 +57,6 @@ pub fn empty() -> Coroutine { ...@@ -57,6 +57,6 @@ pub fn empty() -> Coroutine {
/// Destroy coroutine and try to reuse std::stack segment. /// Destroy coroutine and try to reuse std::stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) { pub fn recycle(self, stack_pool: &mut StackPool) {
let Coroutine { current_stack_segment, .. } = self; let Coroutine { current_stack_segment, .. } = self;
stack_pool.give_segment(current_stack_segment); stack_pool.give_stack(current_stack_segment);
} }
} }
...@@ -8,46 +8,101 @@ ...@@ -8,46 +8,101 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use std::vec; use std::rt::env::max_cached_stacks;
use std::libc::{c_uint, uintptr_t}; use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable, MapNonStandardFlags};
#[cfg(not(windows))]
use std::libc::{MAP_STACK, MAP_PRIVATE, MAP_ANON};
use std::libc::{c_uint, c_int, c_void, uintptr_t};
pub struct StackSegment { /// A task's stack. The name "Stack" is a vestige of segmented stacks.
priv buf: ~[u8], pub struct Stack {
priv valgrind_id: c_uint priv buf: MemoryMap,
priv min_size: uint,
priv valgrind_id: c_uint,
} }
impl StackSegment { // Try to use MAP_STACK on platforms that support it (it's what we're doing
pub fn new(size: uint) -> StackSegment { // anyway), but some platforms don't support it at all. For example, it appears
unsafe { // that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
// Crate a block of uninitialized values // fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
let mut stack = vec::with_capacity(size); #[cfg(not(windows), not(target_os = "freebsd"))]
stack.set_len(size); static STACK_FLAGS: c_int = MAP_STACK | MAP_PRIVATE | MAP_ANON;
#[cfg(target_os = "freebsd")]
static STACK_FLAGS: c_int = MAP_PRIVATE | MAP_ANON;
#[cfg(windows)]
static STACK_FLAGS: c_int = 0;
let mut stk = StackSegment { impl Stack {
buf: stack, pub fn new(size: uint) -> Stack {
valgrind_id: 0 // Map in a stack. Eventually we might be able to handle stack allocation failure, which
}; // would fail to spawn the task. But there's not many sensible things to do on OOM.
// Failure seems fine (and is what the old stack allocation did).
let stack = match MemoryMap::new(size, [MapReadable, MapWritable,
MapNonStandardFlags(STACK_FLAGS)]) {
Ok(map) => map,
Err(e) => fail!("Creating memory map for stack of size {} failed: {}", size, e)
};
// XXX: Using the FFI to call a C macro. Slow // Change the last page to be inaccessible. This is to provide safety; when an FFI
stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); // function overflows it will (hopefully) hit this guard page. It isn't guaranteed, but
return stk; // that's why FFI is unsafe. buf.data is guaranteed to be aligned properly.
if !protect_last_page(&stack) {
fail!("Could not memory-protect guard page. stack={:?}, errno={}",
stack, errno());
} }
let mut stk = Stack {
buf: stack,
min_size: size,
valgrind_id: 0
};
// XXX: Using the FFI to call a C macro. Slow
stk.valgrind_id = unsafe { rust_valgrind_stack_register(stk.start(), stk.end()) };
return stk;
} }
/// Point to the low end of the allocated stack /// Point to the low end of the allocated stack
pub fn start(&self) -> *uint { pub fn start(&self) -> *uint {
self.buf.as_ptr() as *uint self.buf.data as *uint
} }
/// Point one word beyond the high end of the allocated stack /// Point one word beyond the high end of the allocated stack
pub fn end(&self) -> *uint { pub fn end(&self) -> *uint {
unsafe { unsafe {
self.buf.as_ptr().offset(self.buf.len() as int) as *uint self.buf.data.offset(self.buf.len as int) as *uint
} }
} }
} }
impl Drop for StackSegment { // These use ToPrimitive so that we never need to worry about the sizes of whatever types these
// (which we would with scalar casts). It's either a wrapper for a scalar cast or failure: fast, or
// will fail during compilation.
#[cfg(unix)]
fn protect_last_page(stack: &MemoryMap) -> bool {
use std::libc::{mprotect, PROT_NONE, size_t};
unsafe {
// This may seem backwards: the start of the segment is the last page? Yes! The stack grows
// from higher addresses (the end of the allocated block) to lower addresses (the start of
// the allocated block).
let last_page = stack.data as *c_void;
mprotect(last_page, page_size() as size_t, PROT_NONE) != -1
}
}
#[cfg(windows)]
fn protect_last_page(stack: &MemoryMap) -> bool {
use std::libc::{VirtualProtect, PAGE_NOACCESS, SIZE_T, LPDWORD, DWORD};
unsafe {
// see above
let last_page = stack.data as *mut c_void;
let mut old_prot: DWORD = 0;
VirtualProtect(last_page, page_size() as SIZE_T, PAGE_NOACCESS,
&mut old_prot as LPDWORD) != 0
}
}
impl Drop for Stack {
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
// XXX: Using the FFI to call a C macro. Slow // XXX: Using the FFI to call a C macro. Slow
...@@ -56,16 +111,30 @@ fn drop(&mut self) { ...@@ -56,16 +111,30 @@ fn drop(&mut self) {
} }
} }
pub struct StackPool(()); pub struct StackPool {
// Ideally this would be some datastructure that preserved ordering on Stack.min_size.
priv stacks: ~[Stack],
}
impl StackPool { impl StackPool {
pub fn new() -> StackPool { StackPool(()) } pub fn new() -> StackPool {
StackPool {
stacks: ~[],
}
}
pub fn take_segment(&self, min_size: uint) -> StackSegment { pub fn take_stack(&mut self, min_size: uint) -> Stack {
StackSegment::new(min_size) // Ideally this would be a binary search
match self.stacks.iter().position(|s| s.min_size < min_size) {
Some(idx) => self.stacks.swap_remove(idx),
None => Stack::new(min_size)
}
} }
pub fn give_segment(&self, _stack: StackSegment) { pub fn give_stack(&mut self, stack: Stack) {
if self.stacks.len() <= max_cached_stacks() {
self.stacks.push(stack)
}
} }
} }
......
...@@ -2863,6 +2863,7 @@ pub mod posix88 { ...@@ -2863,6 +2863,7 @@ pub mod posix88 {
pub static MAP_PRIVATE : c_int = 0x0002; pub static MAP_PRIVATE : c_int = 0x0002;
pub static MAP_FIXED : c_int = 0x0010; pub static MAP_FIXED : c_int = 0x0010;
pub static MAP_ANON : c_int = 0x1000; pub static MAP_ANON : c_int = 0x1000;
pub static MAP_STACK : c_int = 0;
pub static MAP_FAILED : *c_void = -1 as *c_void; pub static MAP_FAILED : *c_void = -1 as *c_void;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
use prelude::*; use prelude::*;
use ptr; use ptr;
use str; use str;
use to_str; use fmt;
use unstable::finally::Finally; use unstable::finally::Finally;
use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
...@@ -871,7 +871,7 @@ pub enum MapOption { ...@@ -871,7 +871,7 @@ pub enum MapOption {
MapOffset(uint), MapOffset(uint),
/// On POSIX, this can be used to specify the default flags passed to `mmap`. By default it uses /// On POSIX, this can be used to specify the default flags passed to `mmap`. By default it uses
/// `MAP_PRIVATE` and, if not using `MapFd`, `MAP_ANON`. This will override both of those. This /// `MAP_PRIVATE` and, if not using `MapFd`, `MAP_ANON`. This will override both of those. This
/// is platform-specific (the exact values used) and unused on Windows. /// is platform-specific (the exact values used) and ignored on Windows.
MapNonStandardFlags(c_int), MapNonStandardFlags(c_int),
} }
...@@ -911,23 +911,29 @@ pub enum MapError { ...@@ -911,23 +911,29 @@ pub enum MapError {
ErrMapViewOfFile(uint) ErrMapViewOfFile(uint)
} }
impl to_str::ToStr for MapError { impl fmt::Default for MapError {
fn to_str(&self) -> ~str { fn fmt(val: &MapError, out: &mut fmt::Formatter) {
match *self { let str = match *val {
ErrFdNotAvail => ~"fd not available for reading or writing", ErrFdNotAvail => "fd not available for reading or writing",
ErrInvalidFd => ~"Invalid fd", ErrInvalidFd => "Invalid fd",
ErrUnaligned => ~"Unaligned address, invalid flags, \ ErrUnaligned => "Unaligned address, invalid flags, negative length or unaligned offset",
negative length or unaligned offset", ErrNoMapSupport=> "File doesn't support mapping",
ErrNoMapSupport=> ~"File doesn't support mapping", ErrNoMem => "Invalid address, or not enough available memory",
ErrNoMem => ~"Invalid address, or not enough available memory", ErrUnsupProt => "Protection mode unsupported",
ErrUnknown(code) => format!("Unknown error={}", code), ErrUnsupOffset => "Offset in virtual memory mode is unsupported",
ErrUnsupProt => ~"Protection mode unsupported", ErrAlreadyExists => "File mapping for specified file already exists",
ErrUnsupOffset => ~"Offset in virtual memory mode is unsupported", ErrUnknown(code) => { write!(out.buf, "Unknown error = {}", code); return },
ErrAlreadyExists => ~"File mapping for specified file already exists", ErrVirtualAlloc(code) => { write!(out.buf, "VirtualAlloc failure = {}", code); return },
ErrVirtualAlloc(code) => format!("VirtualAlloc failure={}", code), ErrCreateFileMappingW(code) => {
ErrCreateFileMappingW(code) => format!("CreateFileMappingW failure={}", code), format!("CreateFileMappingW failure = {}", code);
ErrMapViewOfFile(code) => format!("MapViewOfFile failure={}", code) return
} },
ErrMapViewOfFile(code) => {
write!(out.buf, "MapViewOfFile failure = {}", code);
return
}
};
write!(out.buf, "{}", str);
} }
} }
...@@ -1130,8 +1136,7 @@ fn drop(&mut self) { ...@@ -1130,8 +1136,7 @@ fn drop(&mut self) {
unsafe { unsafe {
match self.kind { match self.kind {
MapVirtual => { MapVirtual => {
if libc::VirtualFree(self.data as *mut c_void, if libc::VirtualFree(self.data as *mut c_void, 0,
self.len as size_t,
libc::MEM_RELEASE) == FALSE { libc::MEM_RELEASE) == FALSE {
error!("VirtualFree failed: {}", errno()); error!("VirtualFree failed: {}", errno());
} }
...@@ -1487,7 +1492,7 @@ fn lseek_(fd: c_int, size: uint) { ...@@ -1487,7 +1492,7 @@ fn lseek_(fd: c_int, size: uint) {
MapOffset(size / 2) MapOffset(size / 2)
]) { ]) {
Ok(chunk) => chunk, Ok(chunk) => chunk,
Err(msg) => fail!(msg.to_str()) Err(msg) => fail!("{}", msg)
}; };
assert!(chunk.len > 0); assert!(chunk.len > 0);
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
//! Runtime environment settings //! Runtime environment settings
use from_str::FromStr; use from_str::from_str;
use option::{Some, None}; use option::{Some, None};
use os; use os;
...@@ -18,18 +18,25 @@ ...@@ -18,18 +18,25 @@
// They are expected to be initialized once then left alone. // They are expected to be initialized once then left alone.
static mut MIN_STACK: uint = 2 * 1024 * 1024; static mut MIN_STACK: uint = 2 * 1024 * 1024;
/// This default corresponds to 20M of cache per scheduler (at the default size).
static mut MAX_CACHED_STACKS: uint = 10;
static mut DEBUG_BORROW: bool = false; static mut DEBUG_BORROW: bool = false;
static mut POISON_ON_FREE: bool = false; static mut POISON_ON_FREE: bool = false;
pub fn init() { pub fn init() {
unsafe { unsafe {
match os::getenv("RUST_MIN_STACK") { match os::getenv("RUST_MIN_STACK") {
Some(s) => match FromStr::from_str(s) { Some(s) => match from_str(s) {
Some(i) => MIN_STACK = i, Some(i) => MIN_STACK = i,
None => () None => ()
}, },
None => () None => ()
} }
match os::getenv("RUST_MAX_CACHED_STACKS") {
Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \
RUST_MAX_CACHED_STACKS"),
None => ()
}
match os::getenv("RUST_DEBUG_BORROW") { match os::getenv("RUST_DEBUG_BORROW") {
Some(_) => DEBUG_BORROW = true, Some(_) => DEBUG_BORROW = true,
None => () None => ()
...@@ -45,6 +52,10 @@ pub fn min_stack() -> uint { ...@@ -45,6 +52,10 @@ pub fn min_stack() -> uint {
unsafe { MIN_STACK } unsafe { MIN_STACK }
} }
pub fn max_cached_stacks() -> uint {
unsafe { MAX_CACHED_STACKS }
}
pub fn debug_borrow() -> bool { pub fn debug_borrow() -> bool {
unsafe { DEBUG_BORROW } unsafe { DEBUG_BORROW }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册