From dee7fa58dd4203a19b83ad47c3b0a0efb92c0e9a Mon Sep 17 00:00:00 2001 From: Corey Richardson Date: Wed, 13 Nov 2013 05:21:38 -0500 Subject: [PATCH] Use `mmap` to map in task stacks and guard page Also implement caching of stacks. --- src/libgreen/context.rs | 6 +- src/libgreen/coroutine.rs | 10 ++-- src/libgreen/stack.rs | 121 ++++++++++++++++++++++++++++++-------- src/libstd/libc.rs | 1 + src/libstd/os.rs | 49 ++++++++------- src/libstd/rt/env.rs | 15 ++++- 6 files changed, 144 insertions(+), 58 deletions(-) diff --git a/src/libgreen/context.rs b/src/libgreen/context.rs index 8530e3e837e..3f2f74bbb8d 100644 --- a/src/libgreen/context.rs +++ b/src/libgreen/context.rs @@ -12,10 +12,9 @@ use std::uint; use std::cast::{transmute, transmute_mut_unsafe, transmute_region, transmute_mut_region}; +use stack::Stack; use std::unstable::stack; -use stack::StackSegment; - // FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing // SSE regs. It would be marginally better not to do this. In C++ we // use an attribute on a struct. @@ -41,7 +40,7 @@ pub fn empty() -> Context { } /// Create a new context that will resume execution by running proc() - pub fn new(start: proc(), stack: &mut StackSegment) -> Context { + pub fn new(start: proc(), stack: &mut Stack) -> Context { // The C-ABI function that is the task entry point // // Note that this function is a little sketchy. We're taking a @@ -79,6 +78,7 @@ pub fn new(start: proc(), stack: &mut StackSegment) -> Context { // be passed to the spawn function. Another unfortunate // allocation let start = ~start; + initialize_call_frame(&mut *regs, task_start_wrapper as *c_void, unsafe { transmute(&*start) }, diff --git a/src/libgreen/coroutine.rs b/src/libgreen/coroutine.rs index 7bc5d0accfe..3d7dc58a1b2 100644 --- a/src/libgreen/coroutine.rs +++ b/src/libgreen/coroutine.rs @@ -14,7 +14,7 @@ use std::rt::env; use context::Context; -use stack::{StackPool, StackSegment}; +use stack::{StackPool, Stack}; /// A coroutine is nothing more than a (register context, stack) pair. pub struct Coroutine { @@ -24,7 +24,7 @@ pub struct Coroutine { /// /// Servo needs this to be public in order to tell SpiderMonkey /// about the stack bounds. - current_stack_segment: StackSegment, + current_stack_segment: Stack, /// Always valid if the task is alive and not running. saved_context: Context @@ -39,7 +39,7 @@ pub fn new(stack_pool: &mut StackPool, Some(size) => size, None => env::min_stack() }; - let mut stack = stack_pool.take_segment(stack_size); + let mut stack = stack_pool.take_stack(stack_size); let initial_context = Context::new(start, &mut stack); Coroutine { current_stack_segment: stack, @@ -49,7 +49,7 @@ pub fn new(stack_pool: &mut StackPool, pub fn empty() -> Coroutine { Coroutine { - current_stack_segment: StackSegment::new(0), + current_stack_segment: Stack::new(0), saved_context: Context::empty() } } @@ -57,6 +57,6 @@ pub fn empty() -> Coroutine { /// Destroy coroutine and try to reuse std::stack segment. pub fn recycle(self, stack_pool: &mut StackPool) { let Coroutine { current_stack_segment, .. } = self; - stack_pool.give_segment(current_stack_segment); + stack_pool.give_stack(current_stack_segment); } } diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs index 7e6dd02dd67..a5d5174b91b 100644 --- a/src/libgreen/stack.rs +++ b/src/libgreen/stack.rs @@ -8,46 +8,101 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::vec; -use std::libc::{c_uint, uintptr_t}; +use std::rt::env::max_cached_stacks; +use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable, MapNonStandardFlags}; +#[cfg(not(windows))] +use std::libc::{MAP_STACK, MAP_PRIVATE, MAP_ANON}; +use std::libc::{c_uint, c_int, c_void, uintptr_t}; -pub struct StackSegment { - priv buf: ~[u8], - priv valgrind_id: c_uint +/// A task's stack. The name "Stack" is a vestige of segmented stacks. +pub struct Stack { + priv buf: MemoryMap, + priv min_size: uint, + priv valgrind_id: c_uint, } -impl StackSegment { - pub fn new(size: uint) -> StackSegment { - unsafe { - // Crate a block of uninitialized values - let mut stack = vec::with_capacity(size); - stack.set_len(size); +// Try to use MAP_STACK on platforms that support it (it's what we're doing +// anyway), but some platforms don't support it at all. For example, it appears +// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always +// fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html +#[cfg(not(windows), not(target_os = "freebsd"))] +static STACK_FLAGS: c_int = MAP_STACK | MAP_PRIVATE | MAP_ANON; +#[cfg(target_os = "freebsd")] +static STACK_FLAGS: c_int = MAP_PRIVATE | MAP_ANON; +#[cfg(windows)] +static STACK_FLAGS: c_int = 0; - let mut stk = StackSegment { - buf: stack, - valgrind_id: 0 - }; +impl Stack { + pub fn new(size: uint) -> Stack { + // Map in a stack. Eventually we might be able to handle stack allocation failure, which + // would fail to spawn the task. But there's not many sensible things to do on OOM. + // Failure seems fine (and is what the old stack allocation did). + let stack = match MemoryMap::new(size, [MapReadable, MapWritable, + MapNonStandardFlags(STACK_FLAGS)]) { + Ok(map) => map, + Err(e) => fail!("Creating memory map for stack of size {} failed: {}", size, e) + }; - // XXX: Using the FFI to call a C macro. Slow - stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); - return stk; + // Change the last page to be inaccessible. This is to provide safety; when an FFI + // function overflows it will (hopefully) hit this guard page. It isn't guaranteed, but + // that's why FFI is unsafe. buf.data is guaranteed to be aligned properly. + if !protect_last_page(&stack) { + fail!("Could not memory-protect guard page. stack={:?}, errno={}", + stack, errno()); } + + let mut stk = Stack { + buf: stack, + min_size: size, + valgrind_id: 0 + }; + + // XXX: Using the FFI to call a C macro. Slow + stk.valgrind_id = unsafe { rust_valgrind_stack_register(stk.start(), stk.end()) }; + return stk; } /// Point to the low end of the allocated stack pub fn start(&self) -> *uint { - self.buf.as_ptr() as *uint + self.buf.data as *uint } /// Point one word beyond the high end of the allocated stack pub fn end(&self) -> *uint { unsafe { - self.buf.as_ptr().offset(self.buf.len() as int) as *uint + self.buf.data.offset(self.buf.len as int) as *uint } } } -impl Drop for StackSegment { +// These use ToPrimitive so that we never need to worry about the sizes of whatever types these +// (which we would with scalar casts). It's either a wrapper for a scalar cast or failure: fast, or +// will fail during compilation. +#[cfg(unix)] +fn protect_last_page(stack: &MemoryMap) -> bool { + use std::libc::{mprotect, PROT_NONE, size_t}; + unsafe { + // This may seem backwards: the start of the segment is the last page? Yes! The stack grows + // from higher addresses (the end of the allocated block) to lower addresses (the start of + // the allocated block). + let last_page = stack.data as *c_void; + mprotect(last_page, page_size() as size_t, PROT_NONE) != -1 + } +} + +#[cfg(windows)] +fn protect_last_page(stack: &MemoryMap) -> bool { + use std::libc::{VirtualProtect, PAGE_NOACCESS, SIZE_T, LPDWORD, DWORD}; + unsafe { + // see above + let last_page = stack.data as *mut c_void; + let mut old_prot: DWORD = 0; + VirtualProtect(last_page, page_size() as SIZE_T, PAGE_NOACCESS, + &mut old_prot as LPDWORD) != 0 + } +} + +impl Drop for Stack { fn drop(&mut self) { unsafe { // XXX: Using the FFI to call a C macro. Slow @@ -56,16 +111,30 @@ fn drop(&mut self) { } } -pub struct StackPool(()); +pub struct StackPool { + // Ideally this would be some datastructure that preserved ordering on Stack.min_size. + priv stacks: ~[Stack], +} impl StackPool { - pub fn new() -> StackPool { StackPool(()) } + pub fn new() -> StackPool { + StackPool { + stacks: ~[], + } + } - pub fn take_segment(&self, min_size: uint) -> StackSegment { - StackSegment::new(min_size) + pub fn take_stack(&mut self, min_size: uint) -> Stack { + // Ideally this would be a binary search + match self.stacks.iter().position(|s| s.min_size < min_size) { + Some(idx) => self.stacks.swap_remove(idx), + None => Stack::new(min_size) + } } - pub fn give_segment(&self, _stack: StackSegment) { + pub fn give_stack(&mut self, stack: Stack) { + if self.stacks.len() <= max_cached_stacks() { + self.stacks.push(stack) + } } } diff --git a/src/libstd/libc.rs b/src/libstd/libc.rs index a398835824b..d5f185880fa 100644 --- a/src/libstd/libc.rs +++ b/src/libstd/libc.rs @@ -2863,6 +2863,7 @@ pub mod posix88 { pub static MAP_PRIVATE : c_int = 0x0002; pub static MAP_FIXED : c_int = 0x0010; pub static MAP_ANON : c_int = 0x1000; + pub static MAP_STACK : c_int = 0; pub static MAP_FAILED : *c_void = -1 as *c_void; diff --git a/src/libstd/os.rs b/src/libstd/os.rs index cdf0c3b6442..b594b91d2dc 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -39,7 +39,7 @@ use prelude::*; use ptr; use str; -use to_str; +use fmt; use unstable::finally::Finally; use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; @@ -871,7 +871,7 @@ pub enum MapOption { MapOffset(uint), /// On POSIX, this can be used to specify the default flags passed to `mmap`. By default it uses /// `MAP_PRIVATE` and, if not using `MapFd`, `MAP_ANON`. This will override both of those. This - /// is platform-specific (the exact values used) and unused on Windows. + /// is platform-specific (the exact values used) and ignored on Windows. MapNonStandardFlags(c_int), } @@ -911,23 +911,29 @@ pub enum MapError { ErrMapViewOfFile(uint) } -impl to_str::ToStr for MapError { - fn to_str(&self) -> ~str { - match *self { - ErrFdNotAvail => ~"fd not available for reading or writing", - ErrInvalidFd => ~"Invalid fd", - ErrUnaligned => ~"Unaligned address, invalid flags, \ - negative length or unaligned offset", - ErrNoMapSupport=> ~"File doesn't support mapping", - ErrNoMem => ~"Invalid address, or not enough available memory", - ErrUnknown(code) => format!("Unknown error={}", code), - ErrUnsupProt => ~"Protection mode unsupported", - ErrUnsupOffset => ~"Offset in virtual memory mode is unsupported", - ErrAlreadyExists => ~"File mapping for specified file already exists", - ErrVirtualAlloc(code) => format!("VirtualAlloc failure={}", code), - ErrCreateFileMappingW(code) => format!("CreateFileMappingW failure={}", code), - ErrMapViewOfFile(code) => format!("MapViewOfFile failure={}", code) - } +impl fmt::Default for MapError { + fn fmt(val: &MapError, out: &mut fmt::Formatter) { + let str = match *val { + ErrFdNotAvail => "fd not available for reading or writing", + ErrInvalidFd => "Invalid fd", + ErrUnaligned => "Unaligned address, invalid flags, negative length or unaligned offset", + ErrNoMapSupport=> "File doesn't support mapping", + ErrNoMem => "Invalid address, or not enough available memory", + ErrUnsupProt => "Protection mode unsupported", + ErrUnsupOffset => "Offset in virtual memory mode is unsupported", + ErrAlreadyExists => "File mapping for specified file already exists", + ErrUnknown(code) => { write!(out.buf, "Unknown error = {}", code); return }, + ErrVirtualAlloc(code) => { write!(out.buf, "VirtualAlloc failure = {}", code); return }, + ErrCreateFileMappingW(code) => { + format!("CreateFileMappingW failure = {}", code); + return + }, + ErrMapViewOfFile(code) => { + write!(out.buf, "MapViewOfFile failure = {}", code); + return + } + }; + write!(out.buf, "{}", str); } } @@ -1130,8 +1136,7 @@ fn drop(&mut self) { unsafe { match self.kind { MapVirtual => { - if libc::VirtualFree(self.data as *mut c_void, - self.len as size_t, + if libc::VirtualFree(self.data as *mut c_void, 0, libc::MEM_RELEASE) == FALSE { error!("VirtualFree failed: {}", errno()); } @@ -1487,7 +1492,7 @@ fn lseek_(fd: c_int, size: uint) { MapOffset(size / 2) ]) { Ok(chunk) => chunk, - Err(msg) => fail!(msg.to_str()) + Err(msg) => fail!("{}", msg) }; assert!(chunk.len > 0); diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs index f3fa482b18c..729e377e1af 100644 --- a/src/libstd/rt/env.rs +++ b/src/libstd/rt/env.rs @@ -10,7 +10,7 @@ //! Runtime environment settings -use from_str::FromStr; +use from_str::from_str; use option::{Some, None}; use os; @@ -18,18 +18,25 @@ // They are expected to be initialized once then left alone. static mut MIN_STACK: uint = 2 * 1024 * 1024; +/// This default corresponds to 20M of cache per scheduler (at the default size). +static mut MAX_CACHED_STACKS: uint = 10; static mut DEBUG_BORROW: bool = false; static mut POISON_ON_FREE: bool = false; pub fn init() { unsafe { match os::getenv("RUST_MIN_STACK") { - Some(s) => match FromStr::from_str(s) { + Some(s) => match from_str(s) { Some(i) => MIN_STACK = i, None => () }, None => () } + match os::getenv("RUST_MAX_CACHED_STACKS") { + Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \ + RUST_MAX_CACHED_STACKS"), + None => () + } match os::getenv("RUST_DEBUG_BORROW") { Some(_) => DEBUG_BORROW = true, None => () @@ -45,6 +52,10 @@ pub fn min_stack() -> uint { unsafe { MIN_STACK } } +pub fn max_cached_stacks() -> uint { + unsafe { MAX_CACHED_STACKS } +} + pub fn debug_borrow() -> bool { unsafe { DEBUG_BORROW } } -- GitLab