提交 91a2c0d5 编写于 作者: A Aaron Turon

Remove libgreen

With runtime removal complete, there is no longer any reason to provide
libgreen.

[breaking-change]
上级 3ee916e5
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is a basic event loop implementation not meant for any "real purposes"
//! other than testing the scheduler and proving that it's possible to have a
//! pluggable event loop.
//!
//! This implementation is also used as the fallback implementation of an event
//! loop if no other one is provided (and M:N scheduling is desired).
use self::Message::*;
use alloc::arc::Arc;
use std::sync::atomic;
use std::mem;
use std::rt::rtio::{EventLoop, RemoteCallback};
use std::rt::rtio::{PausableIdleCallback, Callback};
use std::rt::exclusive::Exclusive;
/// This is the only exported function from this module.
pub fn event_loop() -> Box<EventLoop + Send> {
box BasicLoop::new() as Box<EventLoop + Send>
}
struct BasicLoop {
work: Vec<proc(): Send>, // pending work
remotes: Vec<(uint, Box<Callback + Send>)>,
next_remote: uint,
messages: Arc<Exclusive<Vec<Message>>>,
idle: Option<Box<Callback + Send>>,
idle_active: Option<Arc<atomic::AtomicBool>>,
}
enum Message { RunRemote(uint), RemoveRemote(uint) }
impl BasicLoop {
fn new() -> BasicLoop {
BasicLoop {
work: vec![],
idle: None,
idle_active: None,
next_remote: 0,
remotes: vec![],
messages: Arc::new(Exclusive::new(Vec::new())),
}
}
/// Process everything in the work queue (continually)
fn work(&mut self) {
while self.work.len() > 0 {
for work in mem::replace(&mut self.work, vec![]).into_iter() {
work();
}
}
}
fn remote_work(&mut self) {
let messages = unsafe {
mem::replace(&mut *self.messages.lock(), Vec::new())
};
for message in messages.into_iter() {
self.message(message);
}
}
fn message(&mut self, message: Message) {
match message {
RunRemote(i) => {
match self.remotes.iter_mut().find(|& &(id, _)| id == i) {
Some(&(_, ref mut f)) => f.call(),
None => panic!("bad remote: {}", i),
}
}
RemoveRemote(i) => {
match self.remotes.iter().position(|&(id, _)| id == i) {
Some(i) => { self.remotes.remove(i).unwrap(); }
None => panic!("bad remote: {}", i),
}
}
}
}
/// Run the idle callback if one is registered
fn idle(&mut self) {
match self.idle {
Some(ref mut idle) => {
if self.idle_active.as_ref().unwrap().load(atomic::SeqCst) {
idle.call();
}
}
None => {}
}
}
fn has_idle(&self) -> bool {
self.idle.is_some() && self.idle_active.as_ref().unwrap().load(atomic::SeqCst)
}
}
impl EventLoop for BasicLoop {
fn run(&mut self) {
// Not exactly efficient, but it gets the job done.
while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() {
self.work();
self.remote_work();
if self.has_idle() {
self.idle();
continue
}
unsafe {
let messages = self.messages.lock();
// We block here if we have no messages to process and we may
// receive a message at a later date
if self.remotes.len() > 0 && messages.len() == 0 &&
self.work.len() == 0 {
messages.wait()
}
}
}
}
fn callback(&mut self, f: proc():Send) {
self.work.push(f);
}
// FIXME: Seems like a really weird requirement to have an event loop provide.
fn pausable_idle_callback(&mut self, cb: Box<Callback + Send>)
-> Box<PausableIdleCallback + Send> {
rtassert!(self.idle.is_none());
self.idle = Some(cb);
let a = Arc::new(atomic::AtomicBool::new(true));
self.idle_active = Some(a.clone());
box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
}
fn remote_callback(&mut self, f: Box<Callback + Send>)
-> Box<RemoteCallback + Send> {
let id = self.next_remote;
self.next_remote += 1;
self.remotes.push((id, f));
box BasicRemote::new(self.messages.clone(), id) as
Box<RemoteCallback + Send>
}
fn has_active_io(&self) -> bool { false }
}
struct BasicRemote {
queue: Arc<Exclusive<Vec<Message>>>,
id: uint,
}
impl BasicRemote {
fn new(queue: Arc<Exclusive<Vec<Message>>>, id: uint) -> BasicRemote {
BasicRemote { queue: queue, id: id }
}
}
impl RemoteCallback for BasicRemote {
fn fire(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RunRemote(self.id));
queue.signal();
}
}
impl Drop for BasicRemote {
fn drop(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RemoveRemote(self.id));
queue.signal();
}
}
struct BasicPausable {
active: Arc<atomic::AtomicBool>,
}
impl PausableIdleCallback for BasicPausable {
fn pause(&mut self) {
self.active.store(false, atomic::SeqCst);
}
fn resume(&mut self) {
self.active.store(true, atomic::SeqCst);
}
}
impl Drop for BasicPausable {
fn drop(&mut self) {
self.active.store(false, atomic::SeqCst);
}
}
#[cfg(test)]
mod test {
use std::rt::task::TaskOpts;
use basic;
use PoolConfig;
use SchedPool;
fn pool() -> SchedPool {
SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: basic::event_loop,
})
}
fn run(f: proc():Send) {
let mut pool = pool();
pool.spawn(TaskOpts::new(), f);
pool.shutdown();
}
#[test]
fn smoke() {
run(proc() {});
}
#[test]
fn some_channels() {
run(proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
#[test]
fn multi_thread() {
let mut pool = SchedPool::new(PoolConfig {
threads: 2,
event_loop_factory: basic::event_loop,
});
for _ in range(0u, 20) {
pool.spawn(TaskOpts::new(), proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
pool.shutdown();
}
}
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use stack::Stack;
use std::uint;
use std::mem::transmute;
use std::rt::stack;
use std::raw;
#[cfg(target_arch = "x86_64")]
use std::simd;
use libc;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we
// use an attribute on a struct.
// FIXME #7761: It would be nice to define regs as `Box<Option<Registers>>`
// since the registers are sometimes empty, but the discriminant would
// then misalign the regs again.
pub struct Context {
/// Hold the registers while the task or scheduler is suspended
regs: Box<Registers>,
/// Lower bound and upper bound for the stack
stack_bounds: Option<(uint, uint)>,
}
pub type InitFn = extern "C" fn(uint, *mut (), *mut ()) -> !;
impl Context {
pub fn empty() -> Context {
Context {
regs: new_regs(),
stack_bounds: None,
}
}
/// Create a new context that will resume execution by running proc()
///
/// The `init` function will be run with `arg` and the `start` procedure
/// split up into code and env pointers. It is required that the `init`
/// function never return.
///
/// FIXME: this is basically an awful the interface. The main reason for
/// this is to reduce the number of allocations made when a green
/// task is spawned as much as possible
pub fn new(init: InitFn, arg: uint, start: proc():Send,
stack: &mut Stack) -> Context {
let sp: *const uint = stack.end();
let sp: *mut uint = sp as *mut uint;
// Save and then immediately load the current context,
// which we will then modify to call the given function when restored
let mut regs = new_regs();
initialize_call_frame(&mut *regs,
init,
arg,
unsafe { transmute(start) },
sp);
// Scheduler tasks don't have a stack in the "we allocated it" sense,
// but rather they run on pthreads stacks. We have complete control over
// them in terms of the code running on them (and hopefully they don't
// overflow). Additionally, their coroutine stacks are listed as being
// zero-length, so that's how we detect what's what here.
let stack_base: *const uint = stack.start();
let bounds = if sp as libc::uintptr_t == stack_base as libc::uintptr_t {
None
} else {
Some((stack_base as uint, sp as uint))
};
return Context {
regs: regs,
stack_bounds: bounds,
}
}
/* Switch contexts
Suspend the current execution context and resume another by
saving the registers values of the executing thread to a Context
then loading the registers from a previously saved Context.
*/
pub fn swap(out_context: &mut Context, in_context: &Context) {
rtdebug!("swapping contexts");
let out_regs: &mut Registers = match out_context {
&Context { regs: box ref mut r, .. } => r
};
let in_regs: &Registers = match in_context {
&Context { regs: box ref r, .. } => r
};
rtdebug!("noting the stack limit and doing raw swap");
unsafe {
// Right before we switch to the new context, set the new context's
// stack limit in the OS-specified TLS slot. This also means that
// we cannot call any more rust functions after record_stack_bounds
// returns because they would all likely panic due to the limit being
// invalid for the current task. Lucky for us `rust_swap_registers`
// is a C function so we don't have to worry about that!
match in_context.stack_bounds {
Some((lo, hi)) => stack::record_rust_managed_stack_bounds(lo, hi),
// If we're going back to one of the original contexts or
// something that's possibly not a "normal task", then reset
// the stack limit to 0 to make morestack never panic
None => stack::record_rust_managed_stack_bounds(0, uint::MAX),
}
rust_swap_registers(out_regs, in_regs);
}
}
}
#[link(name = "context_switch", kind = "static")]
extern {
fn rust_swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
// Register contexts used in various architectures
//
// These structures all represent a context of one task throughout its
// execution. Each struct is a representation of the architecture's register
// set. When swapping between tasks, these register sets are used to save off
// the current registers into one struct, and load them all from another.
//
// Note that this is only used for context switching, which means that some of
// the registers may go unused. For example, for architectures with
// callee/caller saved registers, the context will only reflect the callee-saved
// registers. This is because the caller saved registers are already stored
// elsewhere on the stack (if it was necessary anyway).
//
// Additionally, there may be fields on various architectures which are unused
// entirely because they only reflect what is theoretically possible for a
// "complete register set" to show, but user-space cannot alter these registers.
// An example of this would be the segment selectors for x86.
//
// These structures/functions are roughly in-sync with the source files inside
// of src/rt/arch/$arch. The only currently used function from those folders is
// the `rust_swap_registers` function, but that's only because for now segmented
// stacks are disabled.
#[cfg(target_arch = "x86")]
#[repr(C)]
struct Registers {
eax: u32, ebx: u32, ecx: u32, edx: u32,
ebp: u32, esi: u32, edi: u32, esp: u32,
cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16,
eflags: u32, eip: u32
}
#[cfg(target_arch = "x86")]
fn new_regs() -> Box<Registers> {
box Registers {
eax: 0, ebx: 0, ecx: 0, edx: 0,
ebp: 0, esi: 0, edi: 0, esp: 0,
cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0,
eflags: 0, eip: 0
}
}
#[cfg(target_arch = "x86")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = sp as *mut uint;
// x86 has interesting stack alignment requirements, so do some alignment
// plus some offsetting to figure out what the actual stack should be.
let sp = align_down(sp);
let sp = mut_offset(sp, -4);
unsafe { *mut_offset(sp, 2) = procedure.env as uint };
unsafe { *mut_offset(sp, 1) = procedure.code as uint };
unsafe { *mut_offset(sp, 0) = arg as uint };
let sp = mut_offset(sp, -1);
unsafe { *sp = 0 }; // The final return address
regs.esp = sp as u32;
regs.eip = fptr as u32;
// Last base pointer on the stack is 0
regs.ebp = 0;
}
// windows requires saving more registers (both general and XMM), so the windows
// register context must be larger.
#[cfg(all(windows, target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..14],
_xmm:[simd::u32x4, ..10]
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..10],
_xmm:[simd::u32x4, ..6]
}
#[cfg(all(windows, target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..14],
_xmm:[simd::u32x4(0,0,0,0),..10]
}
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..10],
_xmm:[simd::u32x4(0,0,0,0),..6]
}
}
#[cfg(target_arch = "x86_64")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
// Redefinitions from rt/arch/x86_64/regs.h
static RUSTRT_RSP: uint = 1;
static RUSTRT_IP: uint = 8;
static RUSTRT_RBP: uint = 2;
static RUSTRT_R12: uint = 4;
static RUSTRT_R13: uint = 5;
static RUSTRT_R14: uint = 6;
static RUSTRT_R15: uint = 7;
let sp = align_down(sp);
let sp = mut_offset(sp, -1);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
rtdebug!("creating call frame");
rtdebug!("fptr {:#x}", fptr as libc::uintptr_t);
rtdebug!("arg {:#x}", arg);
rtdebug!("sp {}", sp);
// These registers are frobbed by rust_bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[RUSTRT_R12] = arg as libc::uintptr_t;
regs.gpr[RUSTRT_R13] = procedure.code as libc::uintptr_t;
regs.gpr[RUSTRT_R14] = procedure.env as libc::uintptr_t;
regs.gpr[RUSTRT_R15] = fptr as libc::uintptr_t;
// These registers are picked up by the regular context switch paths. These
// will put us in "mostly the right context" except for frobbing all the
// arguments to the right place. We have the small trampoline code inside of
// rust_bootstrap_green_task to do that.
regs.gpr[RUSTRT_RSP] = sp as libc::uintptr_t;
regs.gpr[RUSTRT_IP] = rust_bootstrap_green_task as libc::uintptr_t;
// Last base pointer on the stack should be 0
regs.gpr[RUSTRT_RBP] = 0;
}
#[cfg(target_arch = "arm")]
type Registers = [libc::uintptr_t, ..32];
#[cfg(target_arch = "arm")]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(target_arch = "arm")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
let sp = align_down(sp);
// sp of arm eabi is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
// ARM uses the same technique as x86_64 to have a landing pad for the start
// of all new green tasks. Neither r1/r2 are saved on a context switch, so
// the shim will copy r3/r4 into r1/r2 and then execute the function in r5
regs[0] = arg as libc::uintptr_t; // r0
regs[3] = procedure.code as libc::uintptr_t; // r3
regs[4] = procedure.env as libc::uintptr_t; // r4
regs[5] = fptr as libc::uintptr_t; // r5
regs[13] = sp as libc::uintptr_t; // #52 sp, r13
regs[14] = rust_bootstrap_green_task as libc::uintptr_t; // #56 pc, r14 --> lr
}
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
type Registers = [libc::uintptr_t, ..32];
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = align_down(sp);
// sp of mips o32 is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
regs[4] = arg as libc::uintptr_t;
regs[5] = procedure.code as libc::uintptr_t;
regs[6] = procedure.env as libc::uintptr_t;
regs[29] = sp as libc::uintptr_t;
regs[25] = fptr as libc::uintptr_t;
regs[31] = fptr as libc::uintptr_t;
}
fn align_down(sp: *mut uint) -> *mut uint {
let sp = (sp as uint) & !(16 - 1);
sp as *mut uint
}
// ptr::mut_offset is positive ints only
#[inline]
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
use std::mem::size_of;
(ptr as int + count * (size_of::<T>() as int)) as *mut T
}
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Coroutines represent nothing more than a context and a stack
// segment.
use context::Context;
use stack::{StackPool, Stack};
/// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
///
/// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds.
pub current_stack_segment: Stack,
/// Always valid if the task is alive and not running.
pub saved_context: Context
}
impl Coroutine {
pub fn empty() -> Coroutine {
Coroutine {
current_stack_segment: unsafe { Stack::dummy_stack() },
saved_context: Context::empty()
}
}
/// Destroy coroutine and try to reuse std::stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
let Coroutine { current_stack_segment, .. } = self;
stack_pool.give_stack(current_stack_segment);
}
}
此差异已折叠。
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: this file probably shouldn't exist
// ignore-lexer-test FIXME #15677
#![macro_escape]
use std::fmt;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
macro_rules! rterrln (
($($arg:tt)*) => ( {
format_args!(::macros::dumb_println, $($arg)*)
} )
)
// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
macro_rules! rtdebug (
($($arg:tt)*) => ( {
if cfg!(rtdebug) {
rterrln!($($arg)*)
}
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if ::macros::ENFORCE_SANITY {
if !$arg {
rtabort!(" assertion failed: {}", stringify!($arg));
}
}
} )
)
macro_rules! rtabort (
($($arg:tt)*) => ( {
::macros::abort(format!($($arg)*).as_slice());
} )
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::rt;
let mut w = rt::Stderr;
let _ = writeln!(&mut w, "{}", args);
}
pub fn abort(msg: &str) -> ! {
let msg = if !msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() -> ! {
use std::intrinsics;
unsafe { intrinsics::abort() }
}
}
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::PopResult::*;
use alloc::arc::Arc;
use std::sync::mpsc_queue as mpsc;
use std::kinds::marker;
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
}
pub struct Producer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
}
此差异已折叠。
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small module implementing a simple "runtime" used for bootstrapping a rust
//! scheduler pool and then interacting with it.
use std::any::Any;
use std::mem;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::task::{Task, BlockedTask, TaskOpts};
struct SimpleTask {
lock: NativeMutex,
awoken: bool,
}
impl Runtime for SimpleTask {
// Implement the simple tasks of descheduling and rescheduling, but only in
// a simple number of cases.
fn deschedule(mut self: Box<SimpleTask>,
times: uint,
mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
assert!(times == 1);
let me = &mut *self as *mut SimpleTask;
let cur_dupe = &mut *cur_task as *mut Task;
cur_task.put_runtime(self);
let task = BlockedTask::block(cur_task);
// See libnative/task.rs for what's going on here with the `awoken`
// field and the while loop around wait()
unsafe {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
drop(guard);
cur_task = mem::transmute(cur_dupe);
}
Local::put(cur_task);
}
fn reawaken(mut self: Box<SimpleTask>, mut to_wake: Box<Task>) {
let me = &mut *self as *mut SimpleTask;
to_wake.put_runtime(self);
unsafe {
mem::forget(to_wake);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}
// These functions are all unimplemented and panic as a result. This is on
// purpose. A "simple task" is just that, a very simple task that can't
// really do a whole lot. The only purpose of the task is to get us off our
// feet and running.
fn yield_now(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn maybe_yield(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn spawn_sibling(self: Box<SimpleTask>,
_cur_task: Box<Task>,
_opts: TaskOpts,
_f: proc():Send) {
panic!()
}
fn stack_bounds(&self) -> (uint, uint) { panic!() }
fn stack_guard(&self) -> Option<uint> { panic!() }
fn can_block(&self) -> bool { true }
fn wrap(self: Box<SimpleTask>) -> Box<Any+'static> { panic!() }
}
pub fn task() -> Box<Task> {
let mut task = box Task::new();
task.put_runtime(box SimpleTask {
lock: unsafe {NativeMutex::new()},
awoken: false,
});
return task;
}
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use std::sync::mpmc_bounded_queue::Queue;
use sched::SchedHandle;
pub struct SleeperList {
q: Queue<SchedHandle>,
}
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList{q: Queue::with_capacity(8*1024)}
}
pub fn push(&mut self, value: SchedHandle) {
assert!(self.q.push(value))
}
pub fn pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
pub fn casual_pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
}
impl Clone for SleeperList {
fn clone(&self) -> SleeperList {
SleeperList {
q: self.q.clone()
}
}
}
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
use std::sync::atomic;
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
MapNonStandardFlags, getenv};
use libc;
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
pub struct Stack {
buf: Option<MemoryMap>,
min_size: uint,
valgrind_id: libc::c_uint,
}
// Try to use MAP_STACK on platforms that support it (it's what we're doing
// anyway), but some platforms don't support it at all. For example, it appears
// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
// panics): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
//
// DragonFly BSD also seems to suffer from the same problem. When MAP_STACK is
// used, it returns the same `ptr` multiple times.
#[cfg(not(any(windows, target_os = "freebsd", target_os = "dragonfly")))]
static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE |
libc::MAP_ANON;
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON;
#[cfg(windows)]
static STACK_FLAGS: libc::c_int = 0;
impl Stack {
/// Allocate a new stack of `size`. If size = 0, this will panic. Use
/// `dummy_stack` if you want a zero-sized stack.
pub fn new(size: uint) -> Stack {
// Map in a stack. Eventually we might be able to handle stack
// allocation failure, which would fail to spawn the task. But there's
// not many sensible things to do on OOM. Panic seems fine (and is
// what the old stack allocation did).
let stack = match MemoryMap::new(size, &[MapReadable, MapWritable,
MapNonStandardFlags(STACK_FLAGS)]) {
Ok(map) => map,
Err(e) => panic!("mmap for stack of size {} failed: {}", size, e)
};
// Change the last page to be inaccessible. This is to provide safety;
// when an FFI function overflows it will (hopefully) hit this guard
// page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
// guaranteed to be aligned properly.
if !protect_last_page(&stack) {
panic!("Could not memory-protect guard page. stack={}, errno={}",
stack.data(), errno());
}
let mut stk = Stack {
buf: Some(stack),
min_size: size,
valgrind_id: 0
};
// FIXME: Using the FFI to call a C macro. Slow
stk.valgrind_id = unsafe {
rust_valgrind_stack_register(stk.start() as *const libc::uintptr_t,
stk.end() as *const libc::uintptr_t)
};
return stk;
}
/// Create a 0-length stack which starts (and ends) at 0.
pub unsafe fn dummy_stack() -> Stack {
Stack {
buf: None,
min_size: 0,
valgrind_id: 0
}
}
/// Point to the last writable byte of the stack
pub fn guard(&self) -> *const uint {
(self.start() as uint + page_size()) as *const uint
}
/// Point to the low end of the allocated stack
pub fn start(&self) -> *const uint {
self.buf.as_ref().map(|m| m.data() as *const uint)
.unwrap_or(ptr::null())
}
/// Point one uint beyond the high end of the allocated stack
pub fn end(&self) -> *const uint {
self.buf.as_ref().map(|buf| unsafe {
buf.data().offset(buf.len() as int) as *const uint
}).unwrap_or(ptr::null())
}
}
#[cfg(unix)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// This may seem backwards: the start of the segment is the last page?
// Yes! The stack grows from higher addresses (the end of the allocated
// block) to lower addresses (the start of the allocated block).
let last_page = stack.data() as *mut libc::c_void;
libc::mprotect(last_page, page_size() as libc::size_t,
libc::PROT_NONE) != -1
}
}
#[cfg(windows)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// see above
let last_page = stack.data() as *mut libc::c_void;
let mut old_prot: libc::DWORD = 0;
libc::VirtualProtect(last_page, page_size() as libc::SIZE_T,
libc::PAGE_NOACCESS,
&mut old_prot as libc::LPDWORD) != 0
}
}
impl Drop for Stack {
fn drop(&mut self) {
unsafe {
// FIXME: Using the FFI to call a C macro. Slow
rust_valgrind_stack_deregister(self.valgrind_id);
}
}
}
pub struct StackPool {
// Ideally this would be some data structure that preserved ordering on
// Stack.min_size.
stacks: Vec<Stack>,
}
impl StackPool {
pub fn new() -> StackPool {
StackPool {
stacks: vec![],
}
}
pub fn take_stack(&mut self, min_size: uint) -> Stack {
// Ideally this would be a binary search
match self.stacks.iter().position(|s| min_size <= s.min_size) {
Some(idx) => self.stacks.swap_remove(idx).unwrap(),
None => Stack::new(min_size)
}
}
pub fn give_stack(&mut self, stack: Stack) {
if self.stacks.len() <= max_cached_stacks() {
self.stacks.push(stack)
}
}
}
fn max_cached_stacks() -> uint {
static AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match AMT.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
// This default corresponds to 20M of cache per scheduler (at the
// default size).
let amt = amt.unwrap_or(10);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
AMT.store(amt + 1, atomic::SeqCst);
return amt;
}
extern {
fn rust_valgrind_stack_register(start: *const libc::uintptr_t,
end: *const libc::uintptr_t) -> libc::c_uint;
fn rust_valgrind_stack_deregister(id: libc::c_uint);
}
#[cfg(test)]
mod tests {
use super::StackPool;
#[test]
fn stack_pool_caches() {
let mut p = StackPool::new();
let s = p.take_stack(10);
p.give_stack(s);
let s = p.take_stack(4);
assert_eq!(s.min_size, 10);
p.give_stack(s);
let s = p.take_stack(14);
assert_eq!(s.min_size, 14);
p.give_stack(s);
}
#[test]
fn stack_pool_caches_exact() {
let mut p = StackPool::new();
let mut s = p.take_stack(10);
s.valgrind_id = 100;
p.give_stack(s);
let s = p.take_stack(10);
assert_eq!(s.min_size, 10);
assert_eq!(s.valgrind_id, 100);
}
}
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册