diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 184ed19da952477062ec3f1ca514139518e1b71c..59cc312bee5fcc4064751bca256ed75592fa31ea 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -117,6 +117,7 @@ #![feature(powerpc_target_feature)] #![feature(mips_target_feature)] #![feature(aarch64_target_feature)] +#![feature(wasm_target_feature)] #![feature(const_slice_len)] #![feature(const_str_as_bytes)] #![feature(const_str_len)] diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 69c524925fc546e9d82f9f1a9aab80cba7e504bc..f130dbfb0e3dfea57259d2f89df93f6be11c00ab 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -2251,7 +2251,15 @@ unsafe fn atomic_umin(dst: *mut T, val: T, order: Ordering) -> T { /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(target_arch = "wasm32", allow(unused_variables))] pub fn fence(order: Ordering) { + // On wasm32 it looks like fences aren't implemented in LLVM yet in that + // they will cause LLVM to abort. The wasm instruction set doesn't have + // fences right now. There's discussion online about the best way for tools + // to conventionally implement fences at + // https://github.com/WebAssembly/tool-conventions/issues/59. We should + // follow that discussion and implement a solution when one comes about! + #[cfg(not(target_arch = "wasm32"))] unsafe { match order { Acquire => intrinsics::atomic_fence_acq(), diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index afe0b67e33020aeb3a743bb32997549def92c7dd..c58bd364cedaba7857e9f607aedc464bcff11bab 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -257,6 +257,7 @@ #![feature(const_cstr_unchecked)] #![feature(core_intrinsics)] #![feature(dropck_eyepatch)] +#![feature(duration_as_u128)] #![feature(exact_size_is_empty)] #![feature(external_doc)] #![feature(fixed_size_array)] diff --git a/src/libstd/sys/wasm/condvar_atomics.rs b/src/libstd/sys/wasm/condvar_atomics.rs new file mode 100644 index 0000000000000000000000000000000000000000..5c55fd0a61868523e982dd4d3b4a8ae700f6d92a --- /dev/null +++ b/src/libstd/sys/wasm/condvar_atomics.rs @@ -0,0 +1,104 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use arch::wasm32::atomic; +use cmp; +use mem; +use sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use sys::mutex::Mutex; +use time::Duration; + +pub struct Condvar { + cnt: AtomicUsize, +} + +// Condition variables are implemented with a simple counter internally that is +// likely to cause spurious wakeups. Blocking on a condition variable will first +// read the value of the internal counter, unlock the given mutex, and then +// block if and only if the counter's value is still the same. Notifying a +// condition variable will modify the counter (add one for now) and then wake up +// a thread waiting on the address of the counter. +// +// A thread waiting on the condition variable will as a result avoid going to +// sleep if it's notified after the lock is unlocked but before it fully goes to +// sleep. A sleeping thread is guaranteed to be woken up at some point as it can +// only be woken up with a call to `wake`. +// +// Note that it's possible for 2 or more threads to be woken up by a call to +// `notify_one` with this implementation. That can happen where the modification +// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep, +// and the subsequent `wake` may wake up a thread that's actually blocking. We +// consider this a spurious wakeup, though, which all users of condition +// variables must already be prepared to handle. As a result, this source of +// spurious wakeups is currently though to be ok, although it may be problematic +// later on if it causes too many spurious wakeups. + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { cnt: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn notify_one(&self) { + self.cnt.fetch_add(1, SeqCst); + atomic::wake(self.ptr(), 1); + } + + #[inline] + pub unsafe fn notify_all(&self) { + self.cnt.fetch_add(1, SeqCst); + atomic::wake(self.ptr(), -1); // -1 == "wake everyone" + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // "atomically block and unlock" implemented by loading our current + // counter's value, unlocking the mutex, and blocking if the counter + // still has the same value. + // + // Notifications happen by incrementing the counter and then waking a + // thread. Incrementing the counter after we unlock the mutex will + // prevent us from sleeping and otherwise the call to `wake` will + // wake us up once we're asleep. + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let val = atomic::wait_i32(self.ptr(), ticket, -1); + // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen) + debug_assert!(val == 0 || val == 1); + mutex.lock(); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let nanos = dur.as_nanos(); + let nanos = cmp::min(i64::max_value() as u128, nanos); + + // If the return value is 2 then a timeout happened, so we return + // `false` as we weren't actually notified. + let ret = atomic::wait_i32(self.ptr(), ticket, nanos as i64) != 2; + mutex.lock(); + return ret + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::(), mem::size_of::()); + &self.cnt as *const AtomicUsize as *mut i32 + } +} diff --git a/src/libstd/sys/wasm/mod.rs b/src/libstd/sys/wasm/mod.rs index c02e5e809c8bb33cb81553eec35607420553fbd4..e11b4d71aaeea90cf01cee2b66eb99cc2423d90e 100644 --- a/src/libstd/sys/wasm/mod.rs +++ b/src/libstd/sys/wasm/mod.rs @@ -36,24 +36,38 @@ #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; -pub mod condvar; pub mod env; pub mod fs; pub mod memchr; -pub mod mutex; pub mod net; pub mod os; pub mod os_str; pub mod path; pub mod pipe; pub mod process; -pub mod rwlock; pub mod stack_overflow; pub mod thread; -pub mod thread_local; pub mod time; pub mod stdio; +cfg_if! { + if #[cfg(target_feature = "atomics")] { + #[path = "condvar_atomics.rs"] + pub mod condvar; + #[path = "mutex_atomics.rs"] + pub mod mutex; + #[path = "rwlock_atomics.rs"] + pub mod rwlock; + #[path = "thread_local_atomics.rs"] + pub mod thread_local; + } else { + pub mod condvar; + pub mod mutex; + pub mod rwlock; + pub mod thread_local; + } +} + #[cfg(not(test))] pub fn init() { } diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs new file mode 100644 index 0000000000000000000000000000000000000000..ced6c17ef9605aba311f71bb9825202558c3a1f7 --- /dev/null +++ b/src/libstd/sys/wasm/mutex_atomics.rs @@ -0,0 +1,163 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use arch::wasm32::atomic; +use cell::UnsafeCell; +use mem; +use sync::atomic::{AtomicUsize, AtomicU64, Ordering::SeqCst}; + +pub struct Mutex { + locked: AtomicUsize, +} + +// Mutexes have a pretty simple implementation where they contain an `i32` +// internally that is 0 when unlocked and 1 when the mutex is locked. +// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and +// if it fails it then waits for a notification. Releasing a lock is then done +// by swapping in 0 and then notifying any waiters, if present. + +impl Mutex { + pub const fn new() -> Mutex { + Mutex { locked: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn lock(&self) { + while !self.try_lock() { + let val = atomic::wait_i32( + self.ptr(), + 1, // we expect our mutex is locked + -1, // wait infinitely + ); + // we should have either woke up (0) or got a not-equal due to a + // race (1). We should never time out (2) + debug_assert!(val == 0 || val == 1); + } + } + + pub unsafe fn unlock(&self) { + let prev = self.locked.swap(0, SeqCst); + debug_assert_eq!(prev, 1); + atomic::wake(self.ptr(), 1); // wake up one waiter, if any + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::(), mem::size_of::()); + &self.locked as *const AtomicUsize as *mut isize as *mut i32 + } +} + +pub struct ReentrantMutex { + owner: AtomicU64, + recursions: UnsafeCell, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +// Reentrant mutexes are similarly implemented to mutexs above except that +// instead of "1" meaning unlocked we use the id of a thread to represent +// whether it has locked a mutex. That way we have an atomic counter which +// always holds the id of the thread that currently holds the lock (or 0 if the +// lock is unlocked). +// +// Once a thread acquires a lock recursively, which it detects by looking at +// the value that's already there, it will update a local `recursions` counter +// in a nonatomic fashion (as we hold the lock). The lock is then fully +// released when this recursion counter reaches 0. + +impl ReentrantMutex { + pub unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { + owner: AtomicU64::new(0), + recursions: UnsafeCell::new(0), + } + } + + pub unsafe fn init(&mut self) { + // nothing to do... + } + + pub unsafe fn lock(&self) { + let me = thread_id(); + while let Err(owner) = self._try_lock(me) { + let val = atomic::wait_i64(self.ptr(), owner as i64, -1); + debug_assert!(val == 0 || val == 1); + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self._try_lock(thread_id()).is_ok() + } + + #[inline] + unsafe fn _try_lock(&self, id: u64) -> Result<(), u64> { + let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 + match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { + // we transitioned from unlocked to locked + Ok(_) => { + debug_assert_eq!(*self.recursions.get(), 0); + Ok(()) + } + + // we currently own this lock, so let's update our count and return + // true. + Err(n) if n == id => { + *self.recursions.get() += 1; + Ok(()) + } + + // Someone else owns the lock, let our caller take care of it + Err(other) => Err(other), + } + } + + pub unsafe fn unlock(&self) { + // If we didn't ever recursively lock the lock then we fully unlock the + // mutex and wake up a waiter, if any. Otherwise we decrement our + // recursive counter and let some one else take care of the zero. + match *self.recursions.get() { + 0 => { + self.owner.swap(0, SeqCst); + atomic::wake(self.ptr() as *mut i32, 1); // wake up one waiter, if any + } + ref mut n => *n -= 1, + } + } + + pub unsafe fn destroy(&self) { + // nothing to do... + } + + #[inline] + fn ptr(&self) -> *mut i64 { + &self.owner as *const AtomicU64 as *mut i64 + } +} + +fn thread_id() -> u64 { + panic!("thread ids not implemented on wasm with atomics yet") +} diff --git a/src/libstd/sys/wasm/rwlock_atomics.rs b/src/libstd/sys/wasm/rwlock_atomics.rs new file mode 100644 index 0000000000000000000000000000000000000000..3623333cc8610f59a9ab9ef4c71e6f2a228ca720 --- /dev/null +++ b/src/libstd/sys/wasm/rwlock_atomics.rs @@ -0,0 +1,161 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use sys::mutex::Mutex; +use sys::condvar::Condvar; + +pub struct RWLock { + lock: Mutex, + cond: Condvar, + state: UnsafeCell, +} + +enum State { + Unlocked, + Reading(usize), + Writing, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +// This rwlock implementation is a relatively simple implementation which has a +// condition variable for readers/writers as well as a mutex protecting the +// internal state of the lock. A current downside of the implementation is that +// unlocking the lock will notify *all* waiters rather than just readers or just +// writers. This can cause lots of "thundering stampede" problems. While +// hopefully correct this implementation is very likely to want to be changed in +// the future. + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { + lock: Mutex::new(), + cond: Condvar::new(), + state: UnsafeCell::new(State::Unlocked), + } + } + + #[inline] + pub unsafe fn read(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_readers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_readers(); + self.lock.unlock(); + return ok + } + + #[inline] + pub unsafe fn write(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_writers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_writers(); + self.lock.unlock(); + return ok + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.lock.lock(); + let notify = (*self.state.get()).dec_readers(); + self.lock.unlock(); + if notify { + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + self.lock.lock(); + (*self.state.get()).dec_writers(); + self.lock.unlock(); + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + + #[inline] + pub unsafe fn destroy(&self) { + self.lock.destroy(); + self.cond.destroy(); + } +} + +impl State { + fn inc_readers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Reading(1); + true + } + State::Reading(ref mut cnt) => { + *cnt += 1; + true + } + State::Writing => false + } + } + + fn inc_writers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Writing; + true + } + State::Reading(_) | + State::Writing => false + } + } + + fn dec_readers(&mut self) -> bool { + let zero = match *self { + State::Reading(ref mut cnt) => { + *cnt -= 1; + *cnt == 0 + } + State::Unlocked | + State::Writing => invalid(), + }; + if zero { + *self = State::Unlocked; + } + zero + } + + fn dec_writers(&mut self) { + match *self { + State::Writing => {} + State::Unlocked | + State::Reading(_) => invalid(), + } + *self = State::Unlocked; + } +} + +fn invalid() -> ! { + panic!("inconsistent rwlock"); +} diff --git a/src/libstd/sys/wasm/thread.rs b/src/libstd/sys/wasm/thread.rs index 8173a62421117d4bb11b034a1bcee3978458a84c..bef6c1f34905e8b84e56cd5d5778e84a8ca6e153 100644 --- a/src/libstd/sys/wasm/thread.rs +++ b/src/libstd/sys/wasm/thread.rs @@ -33,10 +33,31 @@ pub fn set_name(_name: &CStr) { // nope } + #[cfg(not(target_feature = "atomics"))] pub fn sleep(_dur: Duration) { panic!("can't sleep"); } + #[cfg(target_feature = "atomics")] + pub fn sleep(dur: Duration) { + use arch::wasm32::atomic; + use cmp; + + // Use an atomic wait to block the current thread artificially with a + // timeout listed. Note that we should never be notified (return value + // of 0) or our comparison should never fail (return value of 1) so we + // should always only resume execution through a timeout (return value + // 2). + let mut nanos = dur.as_nanos(); + while nanos > 0 { + let amt = cmp::min(i64::max_value() as u128, nanos); + let mut x = 0; + let val = unsafe { atomic::wait_i32(&mut x, 0, amt as i64) }; + debug_assert_eq!(val, 2); + nanos -= amt; + } + } + pub fn join(self) { match self.0 {} } diff --git a/src/libstd/sys/wasm/thread_local_atomics.rs b/src/libstd/sys/wasm/thread_local_atomics.rs new file mode 100644 index 0000000000000000000000000000000000000000..1394013b4a314e782eabe56ceebce8f85308be76 --- /dev/null +++ b/src/libstd/sys/wasm/thread_local_atomics.rs @@ -0,0 +1,32 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub type Key = usize; + +pub unsafe fn create(_dtor: Option) -> Key { + panic!("TLS on wasm with atomics not implemented yet"); +} + +pub unsafe fn set(_key: Key, _value: *mut u8) { + panic!("TLS on wasm with atomics not implemented yet"); +} + +pub unsafe fn get(_key: Key) -> *mut u8 { + panic!("TLS on wasm with atomics not implemented yet"); +} + +pub unsafe fn destroy(_key: Key) { + panic!("TLS on wasm with atomics not implemented yet"); +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + false +} diff --git a/src/stdsimd b/src/stdsimd index 05c2f61c384e2097a3a4c648344114fc4ac983be..fe825c93788c841ac1872e8351a62c37a5f78427 160000 --- a/src/stdsimd +++ b/src/stdsimd @@ -1 +1 @@ -Subproject commit 05c2f61c384e2097a3a4c648344114fc4ac983be +Subproject commit fe825c93788c841ac1872e8351a62c37a5f78427