提交 fb803a85 编写于 作者: F Flavio Percoco

Require types to opt-in Sync

上级 c43efee6
......@@ -129,6 +129,10 @@ pub struct Weak<T> {
_ptr: *mut ArcInner<T>,
}
impl<T: Sync + Send> Send for Arc<T> { }
impl<T: Sync + Send> Sync for Arc<T> { }
struct ArcInner<T> {
strong: atomic::AtomicUint,
weak: atomic::AtomicUint,
......
......@@ -19,6 +19,7 @@
use core::kinds::Sized;
use core::mem;
use core::option::Option;
use core::ptr::OwnedPtr;
use core::raw::TraitObject;
use core::result::Result;
use core::result::Result::{Ok, Err};
......@@ -44,7 +45,7 @@
/// A type that represents a uniquely-owned value.
#[lang = "owned_box"]
#[unstable = "custom allocators will add an additional type parameter (with default)"]
pub struct Box<T>(*mut T);
pub struct Box<T>(OwnedPtr<T>);
#[stable]
impl<T: Default> Default for Box<T> {
......
......@@ -58,7 +58,7 @@
use core::mem;
use core::num::{Int, UnsignedInt};
use core::ops;
use core::ptr;
use core::ptr::{mod, OwnedPtr};
use core::raw::Slice as RawSlice;
use core::uint;
......@@ -133,7 +133,7 @@
#[unsafe_no_drop_flag]
#[stable]
pub struct Vec<T> {
ptr: *mut T,
ptr: OwnedPtr<T>,
len: uint,
cap: uint,
}
......@@ -176,7 +176,7 @@ pub fn new() -> Vec<T> {
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
Vec { ptr: EMPTY as *mut T, len: 0, cap: 0 }
Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
......@@ -209,7 +209,7 @@ pub fn new() -> Vec<T> {
#[stable]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
Vec { ptr: EMPTY as *mut T, len: 0, cap: uint::MAX }
Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: uint::MAX }
} else if capacity == 0 {
Vec::new()
} else {
......@@ -217,7 +217,7 @@ pub fn with_capacity(capacity: uint) -> Vec<T> {
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
Vec { ptr: OwnedPtr(ptr as *mut T), len: 0, cap: capacity }
}
}
......@@ -284,7 +284,7 @@ pub fn from_fn<F>(length: uint, mut op: F) -> Vec<T> where F: FnMut(uint) -> T {
#[unstable = "needs finalization"]
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
capacity: uint) -> Vec<T> {
Vec { ptr: ptr, len: length, cap: capacity }
Vec { ptr: OwnedPtr(ptr), len: length, cap: capacity }
}
/// Creates a vector by copying the elements from a raw pointer.
......@@ -795,7 +795,7 @@ pub fn shrink_to_fit(&mut self) {
if self.len == 0 {
if self.cap != 0 {
unsafe {
dealloc(self.ptr, self.cap)
dealloc(self.ptr.0, self.cap)
}
self.cap = 0;
}
......@@ -803,11 +803,11 @@ pub fn shrink_to_fit(&mut self) {
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
self.ptr = reallocate(self.ptr as *mut u8,
self.cap * mem::size_of::<T>(),
self.len * mem::size_of::<T>(),
mem::min_align_of::<T>()) as *mut T;
if self.ptr.is_null() { ::alloc::oom() }
self.ptr = OwnedPtr(reallocate(self.ptr.0 as *mut u8,
self.cap * mem::size_of::<T>(),
self.len * mem::size_of::<T>(),
mem::min_align_of::<T>()) as *mut T);
if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = self.len;
}
......@@ -867,7 +867,7 @@ pub fn truncate(&mut self, len: uint) {
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
unsafe {
mem::transmute(RawSlice {
data: self.ptr as *const T,
data: self.ptr.0 as *const T,
len: self.len,
})
}
......@@ -890,9 +890,9 @@ pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
let ptr = self.ptr;
let ptr = self.ptr.0;
let cap = self.cap;
let begin = self.ptr as *const T;
let begin = self.ptr.0 as *const T;
let end = if mem::size_of::<T>() == 0 {
(ptr as uint + self.len()) as *const T
} else {
......@@ -1110,14 +1110,14 @@ pub fn push(&mut self, value: T) {
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size { panic!("capacity overflow") }
unsafe {
self.ptr = alloc_or_realloc(self.ptr, old_size, size);
if self.ptr.is_null() { ::alloc::oom() }
self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0, old_size, size));
if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = max(self.cap, 2) * 2;
}
unsafe {
let end = (self.ptr as *const T).offset(self.len as int) as *mut T;
let end = self.ptr.0.offset(self.len as int);
ptr::write(&mut *end, value);
self.len += 1;
}
......@@ -1162,11 +1162,11 @@ pub fn pop(&mut self) -> Option<T> {
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
unsafe {
let begin = self.ptr as *const T;
let begin = self.ptr.0 as *const T;
let end = if mem::size_of::<T>() == 0 {
(self.ptr as uint + self.len()) as *const T
(self.ptr.0 as uint + self.len()) as *const T
} else {
self.ptr.offset(self.len() as int) as *const T
self.ptr.0.offset(self.len() as int) as *const T
};
self.set_len(0);
Drain {
......@@ -1231,8 +1231,10 @@ fn grow_capacity(&mut self, capacity: uint) {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
self.ptr = alloc_or_realloc(self.ptr, self.cap * mem::size_of::<T>(), size);
if self.ptr.is_null() { ::alloc::oom() }
self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0,
self.cap * mem::size_of::<T>(),
size));
if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = capacity;
}
......@@ -1355,7 +1357,7 @@ impl<T> AsSlice<T> for Vec<T> {
fn as_slice<'a>(&'a self) -> &'a [T] {
unsafe {
mem::transmute(RawSlice {
data: self.ptr as *const T,
data: self.ptr.0 as *const T,
len: self.len
})
}
......@@ -1380,7 +1382,7 @@ fn drop(&mut self) {
for x in self.iter() {
ptr::read(x);
}
dealloc(self.ptr, self.cap)
dealloc(self.ptr.0, self.cap)
}
}
}
......@@ -1418,7 +1420,7 @@ pub fn into_inner(mut self) -> Vec<T> {
for _x in self { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
mem::forget(self);
Vec { ptr: allocation, cap: cap, len: 0 }
Vec { ptr: OwnedPtr(allocation), cap: cap, len: 0 }
}
}
......
......@@ -15,30 +15,30 @@
pub use self::Ordering::*;
use intrinsics;
use cell::UnsafeCell;
use cell::{UnsafeCell, RacyCell};
/// A boolean type which can be safely shared between threads.
#[stable]
pub struct AtomicBool {
v: UnsafeCell<uint>,
v: RacyCell<uint>,
}
/// A signed integer type which can be safely shared between threads.
#[stable]
pub struct AtomicInt {
v: UnsafeCell<int>,
v: RacyCell<int>,
}
/// An unsigned integer type which can be safely shared between threads.
#[stable]
pub struct AtomicUint {
v: UnsafeCell<uint>,
v: RacyCell<uint>,
}
/// A raw pointer type which can be safely shared between threads.
#[stable]
pub struct AtomicPtr<T> {
p: UnsafeCell<uint>,
p: RacyCell<uint>,
}
/// Atomic memory orderings
......@@ -80,15 +80,15 @@ pub enum Ordering {
/// An `AtomicBool` initialized to `false`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_BOOL: AtomicBool =
AtomicBool { v: UnsafeCell { value: 0 } };
AtomicBool { v: RacyCell(UnsafeCell { value: 0 }) };
/// An `AtomicInt` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_INT: AtomicInt =
AtomicInt { v: UnsafeCell { value: 0 } };
AtomicInt { v: RacyCell(UnsafeCell { value: 0 }) };
/// An `AtomicUint` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_UINT: AtomicUint =
AtomicUint { v: UnsafeCell { value: 0, } };
AtomicUint { v: RacyCell(UnsafeCell { value: 0 }) };
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: uint = -1;
......@@ -108,7 +108,7 @@ impl AtomicBool {
#[stable]
pub fn new(v: bool) -> AtomicBool {
let val = if v { UINT_TRUE } else { 0 };
AtomicBool { v: UnsafeCell::new(val) }
AtomicBool { v: RacyCell::new(val) }
}
/// Loads a value from the bool.
......@@ -348,7 +348,7 @@ impl AtomicInt {
#[inline]
#[stable]
pub fn new(v: int) -> AtomicInt {
AtomicInt {v: UnsafeCell::new(v)}
AtomicInt {v: RacyCell::new(v)}
}
/// Loads a value from the int.
......@@ -534,7 +534,7 @@ impl AtomicUint {
#[inline]
#[stable]
pub fn new(v: uint) -> AtomicUint {
AtomicUint { v: UnsafeCell::new(v) }
AtomicUint { v: RacyCell::new(v) }
}
/// Loads a value from the uint.
......@@ -721,7 +721,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable]
pub fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p as uint) }
AtomicPtr { p: RacyCell::new(p as uint) }
}
/// Loads a value from the pointer.
......
......@@ -158,7 +158,7 @@
use clone::Clone;
use cmp::PartialEq;
use default::Default;
use kinds::{marker, Copy};
use kinds::{marker, Copy, Send, Sync};
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{None, Some};
......@@ -555,3 +555,28 @@ pub unsafe fn into_inner(self) -> T { self.value }
#[deprecated = "renamed to into_inner()"]
pub unsafe fn unwrap(self) -> T { self.into_inner() }
}
/// A version of `UnsafeCell` intended for use in concurrent data
/// structures (for example, you might put it in an `Arc`).
pub struct RacyCell<T>(pub UnsafeCell<T>);
impl<T> RacyCell<T> {
/// DOX
pub fn new(value: T) -> RacyCell<T> {
RacyCell(UnsafeCell { value: value })
}
/// DOX
pub unsafe fn get(&self) -> *mut T {
self.0.get()
}
/// DOX
pub unsafe fn into_inner(self) -> T {
self.0.into_inner()
}
}
impl<T:Send> Send for RacyCell<T> { }
impl<T> Sync for RacyCell<T> { } // Oh dear
......@@ -92,6 +92,7 @@
use intrinsics;
use option::Option;
use option::Option::{Some, None};
use kinds::{Send, Sync};
use cmp::{PartialEq, Eq, Ord, PartialOrd, Equiv};
use cmp::Ordering;
......@@ -501,3 +502,35 @@ fn gt(&self, other: &*mut T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
/// A wrapper around a raw `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. This in turn implies that the
/// `OwnedPtr<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a
/// raw `*mut T` (which conveys no particular ownership semantics).
/// Useful for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
pub struct OwnedPtr<T>(pub *mut T);
/// `OwnedPtr` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `OwnedPtr` must enforce it.
impl<T:Send> Send for OwnedPtr<T> { }
/// `OwnedPtr` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `OwnedPtr` must enforce it.
impl<T:Sync> Sync for OwnedPtr<T> { }
impl<T> OwnedPtr<T> {
/// Returns a null OwnedPtr.
pub fn null() -> OwnedPtr<T> {
OwnedPtr(RawPtr::null())
}
/// Return an (unsafe) pointer into the memory owned by `self`.
pub unsafe fn offset(self, offset: int) -> *mut T {
(self.0 as *const T).offset(offset) as *mut T
}
}
......@@ -27,8 +27,9 @@
extern crate libc;
use std::c_vec::CVec;
use libc::{c_void, size_t, c_int};
use std::c_vec::CVec;
use std::ptr::OwnedPtr;
#[link(name = "miniz", kind = "static")]
extern {
......@@ -59,7 +60,8 @@ fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> Option<CVec<u8>> {
&mut outsz,
flags);
if !res.is_null() {
Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
let res = OwnedPtr(res);
Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
......@@ -84,7 +86,8 @@ fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> Option<CVec<u8>> {
&mut outsz,
flags);
if !res.is_null() {
Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
let res = OwnedPtr(res);
Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
......
......@@ -67,5 +67,6 @@
E0173,
E0174,
E0177,
E0178
E0178,
E0179
}
......@@ -31,6 +31,7 @@
use middle::traits;
use middle::mem_categorization as mc;
use middle::expr_use_visitor as euv;
use util::common::ErrorReported;
use util::nodemap::NodeSet;
use syntax::ast;
......@@ -119,12 +120,19 @@ fn check_static_type(&self, e: &ast::Expr) {
let ty = ty::node_id_to_type(self.tcx, e.id);
let infcx = infer::new_infer_ctxt(self.tcx);
let mut fulfill_cx = traits::FulfillmentContext::new();
fulfill_cx.register_builtin_bound(self.tcx, ty, ty::BoundSync,
traits::ObligationCause::dummy());
let env = ty::empty_parameter_environment();
if !fulfill_cx.select_all_or_error(&infcx, &env, self.tcx).is_ok() {
self.tcx.sess.span_err(e.span, "shared static items must have a \
type which implements Sync");
match traits::poly_trait_ref_for_builtin_bound(self.tcx, ty::BoundSync, ty) {
Ok(trait_ref) => {
let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
fulfill_cx.register_trait_ref(self.tcx, trait_ref, cause);
let env = ty::empty_parameter_environment();
match fulfill_cx.select_all_or_error(&infcx, &env, self.tcx) {
Ok(()) => { },
Err(ref errors) => {
traits::report_fulfillment_errors(&infcx, errors);
}
}
}
Err(ErrorReported) => { }
}
}
}
......
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{FulfillmentError, FulfillmentErrorCode,
ObligationCauseCode, SelectionError,
PredicateObligation, OutputTypeParameterMismatch};
use middle::infer::InferCtxt;
use middle::ty::{mod};
use syntax::codemap::Span;
use util::ppaux::{Repr, UserString};
pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
errors: &Vec<FulfillmentError<'tcx>>) {
for error in errors.iter() {
report_fulfillment_error(infcx, error);
}
}
fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
error: &FulfillmentError<'tcx>) {
match error.code {
FulfillmentErrorCode::CodeSelectionError(ref e) => {
report_selection_error(infcx, &error.obligation, e);
}
FulfillmentErrorCode::CodeAmbiguity => {
maybe_report_ambiguity(infcx, &error.obligation);
}
}
}
pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>,
error: &SelectionError<'tcx>)
{
match *error {
SelectionError::Overflow => {
// We could track the stack here more precisely if we wanted, I imagine.
match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
let trait_ref =
infcx.resolve_type_vars_if_possible(&**trait_ref);
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"overflow evaluating the trait `{}` for the type `{}`",
trait_ref.user_string(infcx.tcx),
trait_ref.self_ty().user_string(infcx.tcx))[]);
}
ty::Predicate::Equate(ref predicate) => {
let predicate = infcx.resolve_type_vars_if_possible(predicate);
let err = infcx.equality_predicate(obligation.cause.span,
&predicate).unwrap_err();
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied (`{}`)",
predicate.user_string(infcx.tcx),
ty::type_err_to_str(infcx.tcx, &err)).as_slice());
}
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) => {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!("overflow evaluating lifetime predicate").as_slice());
}
}
let current_limit = infcx.tcx.sess.recursion_limit.get();
let suggested_limit = current_limit * 2;
infcx.tcx.sess.span_note(
obligation.cause.span,
format!(
"consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
suggested_limit)[]);
note_obligation_cause(infcx, obligation);
}
SelectionError::Unimplemented => {
match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
let trait_ref =
infcx.resolve_type_vars_if_possible(
&**trait_ref);
if !ty::type_is_error(trait_ref.self_ty()) {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"the trait `{}` is not implemented for the type `{}`",
trait_ref.user_string(infcx.tcx),
trait_ref.self_ty().user_string(infcx.tcx)).as_slice());
note_obligation_cause(infcx, obligation);
}
}
ty::Predicate::Equate(ref predicate) => {
let predicate = infcx.resolve_type_vars_if_possible(predicate);
let err = infcx.equality_predicate(obligation.cause.span,
&predicate).unwrap_err();
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied (`{}`)",
predicate.user_string(infcx.tcx),
ty::type_err_to_str(infcx.tcx, &err)).as_slice());
}
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) => {
let predicate = infcx.resolve_type_vars_if_possible(&obligation.trait_ref);
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied",
predicate.user_string(infcx.tcx)).as_slice());
}
}
}
OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
let expected_trait_ref =
infcx.resolve_type_vars_if_possible(
&**expected_trait_ref);
let actual_trait_ref =
infcx.resolve_type_vars_if_possible(
&**actual_trait_ref);
if !ty::type_is_error(actual_trait_ref.self_ty()) {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"type mismatch: the type `{}` implements the trait `{}`, \
but the trait `{}` is required ({})",
expected_trait_ref.self_ty().user_string(infcx.tcx),
expected_trait_ref.user_string(infcx.tcx),
actual_trait_ref.user_string(infcx.tcx),
ty::type_err_to_str(infcx.tcx, e)).as_slice());
note_obligation_cause(infcx, obligation);
}
}
}
}
fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>) {
// Unable to successfully determine, probably means
// insufficient type information, but could mean
// ambiguous impls. The latter *ought* to be a
// coherence violation, so we don't report it here.
let trait_ref = match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
infcx.resolve_type_vars_if_possible(&**trait_ref)
}
_ => {
infcx.tcx.sess.span_bug(
obligation.cause.span,
format!("ambiguity from something other than a trait: {}",
obligation.trait_ref.repr(infcx.tcx)).as_slice());
}
};
let self_ty = trait_ref.self_ty();
debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
trait_ref.repr(infcx.tcx),
self_ty.repr(infcx.tcx),
obligation.repr(infcx.tcx));
let all_types = &trait_ref.substs().types;
if all_types.iter().any(|&t| ty::type_is_error(t)) {
} else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
// This is kind of a hack: it frequently happens that some earlier
// error prevents types from being fully inferred, and then we get
// a bunch of uninteresting errors saying something like "<generic
// #0> doesn't implement Sized". It may even be true that we
// could just skip over all checks where the self-ty is an
// inference variable, but I was afraid that there might be an
// inference variable created, registered as an obligation, and
// then never forced by writeback, and hence by skipping here we'd
// be ignoring the fact that we don't KNOW the type works
// out. Though even that would probably be harmless, given that
// we're only talking about builtin traits, which are known to be
// inhabited. But in any case I just threw in this check for
// has_errors() to be sure that compilation isn't happening
// anyway. In that case, why inundate the user.
if !infcx.tcx.sess.has_errors() {
if infcx.tcx.lang_items.sized_trait()
.map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"unable to infer enough type information about `{}`; type annotations \
required",
self_ty.user_string(infcx.tcx)).as_slice());
} else {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
"unable to infer enough type information to \
locate the impl of the trait `{}` for \
the type `{}`; type annotations required",
trait_ref.user_string(infcx.tcx),
self_ty.user_string(infcx.tcx))[]);
note_obligation_cause(infcx, obligation);
}
}
} else if !infcx.tcx.sess.has_errors() {
// Ambiguity. Coherence should have reported an error.
infcx.tcx.sess.span_bug(
obligation.cause.span,
format!(
"coherence failed to report ambiguity: \
cannot locate the impl of the trait `{}` for \
the type `{}`",
trait_ref.user_string(infcx.tcx),
self_ty.user_string(infcx.tcx))[]);
}
}
fn note_obligation_cause<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>)
{
let trait_ref = match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
infcx.resolve_type_vars_if_possible(&**trait_ref)
}
_ => {
infcx.tcx.sess.span_bug(
obligation.cause.span,
format!("ambiguity from something other than a trait: {}",
obligation.trait_ref.repr(infcx.tcx)).as_slice());
}
};
note_obligation_cause_code(infcx,
&trait_ref,
obligation.cause.span,
&obligation.cause.code)
}
fn note_obligation_cause_code<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
trait_ref: &ty::PolyTraitRef<'tcx>,
cause_span: Span,
cause_code: &ObligationCauseCode<'tcx>)
{
let tcx = infcx.tcx;
let trait_name = ty::item_path_str(tcx, trait_ref.def_id());
match *cause_code {
ObligationCauseCode::MiscObligation => { }
ObligationCauseCode::ItemObligation(item_def_id) => {
let item_name = ty::item_path_str(tcx, item_def_id);
tcx.sess.span_note(
cause_span,
format!(
"the trait `{}` must be implemented because it is required by `{}`",
trait_name,
item_name).as_slice());
}
ObligationCauseCode::ObjectCastObligation(object_ty) => {
tcx.sess.span_note(
cause_span,
format!(
"the trait `{}` must be implemented for the cast \
to the object type `{}`",
trait_name,
infcx.ty_to_string(object_ty)).as_slice());
}
ObligationCauseCode::RepeatVec => {
tcx.sess.span_note(
cause_span,
"the `Copy` trait is required because the \
repeated element will be copied");
}
ObligationCauseCode::VariableType(_) => {
tcx.sess.span_note(
cause_span,
"all local variables must have a statically known size");
}
ObligationCauseCode::ReturnType => {
tcx.sess.span_note(
cause_span,
"the return type of a function must have a \
statically known size");
}
ObligationCauseCode::AssignmentLhsSized => {
tcx.sess.span_note(
cause_span,
"the left-hand-side of an assignment must have a statically known size");
}
ObligationCauseCode::StructInitializerSized => {
tcx.sess.span_note(
cause_span,
"structs must have a statically known size to be initialized");
}
ObligationCauseCode::ClosureCapture(var_id, closure_span, builtin_bound) => {
let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
let trait_name = ty::item_path_str(tcx, def_id);
let name = ty::local_var_name_str(tcx, var_id);
span_note!(tcx.sess, closure_span,
"the closure that captures `{}` requires that all captured variables \
implement the trait `{}`",
name,
trait_name);
}
ObligationCauseCode::FieldSized => {
span_note!(tcx.sess, cause_span,
"only the last field of a struct or enum variant \
may have a dynamically sized type")
}
ObligationCauseCode::ObjectSized => {
span_note!(tcx.sess, cause_span,
"only sized types can be made into objects");
}
ObligationCauseCode::SharedStatic => {
span_note!(tcx.sess, cause_span,
"shared static variables must have a type that implements `Sync`");
}
ObligationCauseCode::BuiltinDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
let root_trait_ref =
infcx.resolve_type_vars_if_possible(&**root_trait_ref);
span_note!(tcx.sess, cause_span,
"the type `{}` must implement `{}` because it appears within the type `{}`",
trait_ref.self_ty().user_string(infcx.tcx),
trait_ref.user_string(infcx.tcx),
root_trait_ref.self_ty().user_string(infcx.tcx));
note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
}
ObligationCauseCode::ImplDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
let root_trait_ref =
infcx.resolve_type_vars_if_possible(&**root_trait_ref);
span_note!(tcx.sess, cause_span,
"the type `{}` must implement `{}` due to the requirements \
on the impl of `{}` for the type `{}`",
trait_ref.self_ty().user_string(infcx.tcx),
trait_ref.user_string(infcx.tcx),
root_trait_ref.user_string(infcx.tcx),
root_trait_ref.self_ty().user_string(infcx.tcx));
note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
}
}
}
......@@ -305,7 +305,7 @@ fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
let tcx = selcx.tcx();
match predicate.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
let trait_obligation = Obligation { cause: predicate.cause,
let trait_obligation = Obligation { cause: predicate.cause.clone(),
recursion_depth: predicate.recursion_depth,
trait_ref: trait_ref.clone() };
match selcx.select(&trait_obligation) {
......@@ -368,7 +368,9 @@ fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
CodeSelectionError(Unimplemented)));
} else {
let ty::OutlivesPredicate(t_a, r_b) = binder.0;
register_region_obligation(tcx, t_a, r_b, predicate.cause, region_obligations);
register_region_obligation(tcx, t_a, r_b,
predicate.cause.clone(),
region_obligations);
}
true
}
......
......@@ -23,6 +23,7 @@
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
pub use self::error_reporting::report_fulfillment_errors;
pub use self::fulfill::{FulfillmentContext, RegionObligation};
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
......@@ -36,6 +37,7 @@
pub use self::util::poly_trait_ref_for_builtin_bound;
mod coherence;
mod error_reporting;
mod fulfill;
mod select;
mod util;
......@@ -57,7 +59,7 @@ pub struct Obligation<'tcx, T> {
pub type TraitObligation<'tcx> = Obligation<'tcx, Rc<ty::PolyTraitRef<'tcx>>>;
/// Why did we incur this obligation? Used for error reporting.
#[deriving(Copy, Clone)]
#[deriving(Clone)]
pub struct ObligationCause<'tcx> {
pub span: Span,
......@@ -72,7 +74,7 @@ pub struct ObligationCause<'tcx> {
pub code: ObligationCauseCode<'tcx>
}
#[deriving(Copy, Clone)]
#[deriving(Clone)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
......@@ -84,9 +86,6 @@ pub enum ObligationCauseCode<'tcx> {
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
/// To implement drop, type must be sendable.
DropTrait,
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S { ... } must be Sized
......@@ -103,6 +102,13 @@ pub enum ObligationCauseCode<'tcx> {
// Only Sized types can be made into objects
ObjectSized,
// static items must have `Sync` type
SharedStatic,
BuiltinDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
ImplDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
}
pub type Obligations<'tcx, O> = subst::VecPerParamSpace<Obligation<'tcx, O>>;
......
......@@ -18,6 +18,7 @@
use self::EvaluationResult::*;
use super::{PredicateObligation, Obligation, TraitObligation, ObligationCause};
use super::{ObligationCauseCode, BuiltinDerivedObligation};
use super::{SelectionError, Unimplemented, Overflow, OutputTypeParameterMismatch};
use super::{Selection};
use super::{SelectionResult};
......@@ -256,7 +257,7 @@ fn evaluate_builtin_bound_recursively<'o>(&mut self,
let obligation =
util::predicate_for_builtin_bound(
self.tcx(),
previous_stack.obligation.cause,
previous_stack.obligation.cause.clone(),
bound,
previous_stack.obligation.recursion_depth + 1,
ty);
......@@ -416,7 +417,7 @@ pub fn evaluate_impl(&mut self,
Ok(substs) => {
let vtable_impl = self.vtable_impl(impl_def_id,
substs,
obligation.cause,
obligation.cause.clone(),
obligation.recursion_depth + 1,
skol_map,
snapshot);
......@@ -663,13 +664,27 @@ fn assemble_candidates<'o>(&mut self,
// behavior, ignore user-defined impls here. This will
// go away by the time 1.0 is released.
if !self.tcx().sess.features.borrow().opt_out_copy {
try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
try!(self.assemble_builtin_bound_candidates(ty::BoundCopy,
stack,
&mut candidates));
}
Some(bound @ ty::BoundSend) |
Some(bound @ ty::BoundSync) => {
try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
// No explicit impls were declared for this type, consider the fallback rules.
if candidates.vec.is_empty() {
try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
}
}
Some(bound @ ty::BoundSized) => {
// Sized and Copy are always automatically computed.
try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
}
None => {
// For the time being, we ignore user-defined impls for builtin-bounds, other than
......@@ -677,11 +692,7 @@ fn assemble_candidates<'o>(&mut self,
// (And unboxed candidates only apply to the Fn/FnMut/etc traits.)
try!(self.assemble_unboxed_closure_candidates(obligation, &mut candidates));
try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates));
try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
}
Some(bound) => {
try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
}
......@@ -816,7 +827,7 @@ fn assemble_fn_pointer_candidates(&mut self,
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut CandidateSet<'tcx>)
candidate_vec: &mut Vec<Candidate<'tcx>>)
-> Result<(), SelectionError<'tcx>>
{
let all_impls = self.all_impls(obligation.trait_ref.def_id());
......@@ -827,7 +838,7 @@ fn assemble_candidates_from_impls(&mut self,
match self.match_impl(impl_def_id, obligation, snapshot,
&skol_map, Rc::new(skol_obligation_trait_ref)) {
Ok(_) => {
candidates.vec.push(ImplCandidate(impl_def_id));
candidate_vec.push(ImplCandidate(impl_def_id));
}
Err(()) => { }
}
......@@ -1007,7 +1018,7 @@ fn builtin_bound(&mut self,
}
}
ty::ty_ptr(ty::mt { ty: referent_ty, .. }) => { // *const T, *mut T
ty::ty_ptr(..) => { // *const T, *mut T
match bound {
ty::BoundCopy |
ty::BoundSized => {
......@@ -1016,7 +1027,8 @@ fn builtin_bound(&mut self,
ty::BoundSync |
ty::BoundSend => {
Ok(If(vec![referent_ty]))
// sync and send are not implemented for *const, *mut
Err(Unimplemented)
}
}
}
......@@ -1324,16 +1336,10 @@ fn nominal<'cx, 'tcx>(this: &mut SelectionContext<'cx, 'tcx>,
ty::BoundSync => {
if
Some(def_id) == tcx.lang_items.no_sync_bound() ||
Some(def_id) == tcx.lang_items.managed_bound()
{
return Err(Unimplemented)
} else if
Some(def_id) == tcx.lang_items.managed_bound() ||
Some(def_id) == tcx.lang_items.unsafe_type()
{
// FIXME(#13231) -- we currently consider `UnsafeCell<T>`
// to always be sync. This is allow for types like `Queue`
// and `Mutex`, where `Queue<T> : Sync` is `T : Send`.
return Ok(If(Vec::new()));
return Err(Unimplemented)
}
}
......@@ -1408,7 +1414,7 @@ fn confirm_param_candidate(&mut self,
// where-clause trait-ref could be unified with the obligation
// trait-ref. Repeat that unification now without any
// transactional boundary; it should not fail.
match self.confirm_poly_trait_refs(obligation.cause,
match self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
param.bound.clone()) {
Ok(()) => Ok(param),
......@@ -1447,10 +1453,11 @@ fn vtable_builtin_data(&mut self,
nested: Vec<Ty<'tcx>>)
-> VtableBuiltinData<PredicateObligation<'tcx>>
{
let derived_cause = self.derived_cause(obligation, BuiltinDerivedObligation);
let obligations = nested.iter().map(|&t| {
util::predicate_for_builtin_bound(
self.tcx(),
obligation.cause,
derived_cause.clone(),
bound,
obligation.recursion_depth + 1,
t)
......@@ -1463,7 +1470,7 @@ fn vtable_builtin_data(&mut self,
// as a special case, `Send` requires `'static`
if bound == ty::BoundSend {
obligations.push(Obligation {
cause: obligation.cause,
cause: obligation.cause.clone(),
recursion_depth: obligation.recursion_depth+1,
trait_ref: ty::Binder(ty::OutlivesPredicate(obligation.self_ty(),
ty::ReStatic)).as_predicate(),
......@@ -1497,7 +1504,7 @@ fn confirm_impl_candidate(&mut self,
let substs = self.rematch_impl(impl_def_id, obligation,
snapshot, &skol_map, Rc::new(skol_obligation_trait_ref));
debug!("confirm_impl_candidate substs={}", substs);
Ok(self.vtable_impl(impl_def_id, substs, obligation.cause,
Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(),
obligation.recursion_depth + 1, skol_map, snapshot))
})
}
......@@ -1571,10 +1578,9 @@ fn confirm_fn_pointer_candidate(&mut self,
substs: substs,
}));
try!(self.confirm_poly_trait_refs(obligation.cause,
try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref));
Ok(self_ty)
}
......@@ -1617,7 +1623,7 @@ fn confirm_unboxed_closure_candidate(&mut self,
closure_def_id.repr(self.tcx()),
trait_ref.repr(self.tcx()));
self.confirm_poly_trait_refs(obligation.cause,
self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref)
}
......@@ -1809,7 +1815,7 @@ fn match_where_clause(&mut self,
/// back `Ok(T=int)`.
fn match_inherent_impl(&mut self,
impl_def_id: ast::DefId,
obligation_cause: ObligationCause,
obligation_cause: &ObligationCause,
obligation_self_ty: Ty<'tcx>)
-> Result<Substs<'tcx>,()>
{
......@@ -1842,7 +1848,7 @@ fn match_inherent_impl(&mut self,
}
fn match_self_types(&mut self,
cause: ObligationCause,
cause: &ObligationCause,
// The self type provided by the impl/caller-obligation:
provided_self_ty: Ty<'tcx>,
......@@ -1921,6 +1927,33 @@ fn fn_family_trait_kind(&self,
None
}
}
fn derived_cause(&self,
obligation: &TraitObligation<'tcx>,
variant: fn(Rc<ty::Binder<ty::TraitRef<'tcx>>>,
Rc<ObligationCauseCode<'tcx>>)
-> ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx>
{
/*!
* Creates a cause for obligations that are derived from
* `obligation` by a recursive search (e.g., for a builtin
* bound, or eventually a `impl Foo for ..`). If `obligation`
* is itself a derived obligation, this is just a clone, but
* otherwise we create a "derived obligation" cause so as to
* keep track of the original root obligation for error
* reporting.
*/
if obligation.recursion_depth == 0 {
ObligationCause::new(obligation.cause.span,
obligation.trait_ref.def_id().node,
variant(obligation.trait_ref.clone(),
Rc::new(obligation.cause.code.clone())))
} else {
obligation.cause.clone()
}
}
}
impl<'tcx> Repr<'tcx> for Candidate<'tcx> {
......
......@@ -260,7 +260,7 @@ pub fn predicates_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
generic_bounds.repr(tcx));
generic_bounds.predicates.map(|predicate| {
Obligation { cause: cause,
Obligation { cause: cause.clone(),
recursion_depth: recursion_depth,
trait_ref: predicate.clone() }
})
......
......@@ -439,7 +439,7 @@ impl<'tcx,O> TypeFoldable<'tcx> for traits::Obligation<'tcx,O>
{
fn fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::Obligation<'tcx, O> {
traits::Obligation {
cause: self.cause,
cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
trait_ref: self.trait_ref.fold_with(folder),
}
......
......@@ -281,6 +281,8 @@ struct ModuleConfig {
time_passes: bool,
}
impl Send for ModuleConfig { }
impl ModuleConfig {
fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
ModuleConfig {
......
......@@ -216,32 +216,32 @@
use syntax::ast_util::PostExpansionMethod;
use syntax::parse::token::{mod, special_idents};
static DW_LANG_RUST: c_uint = 0x9000;
const DW_LANG_RUST: c_uint = 0x9000;
#[allow(non_upper_case_globals)]
static DW_TAG_auto_variable: c_uint = 0x100;
const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
static DW_TAG_arg_variable: c_uint = 0x101;
const DW_TAG_arg_variable: c_uint = 0x101;
#[allow(non_upper_case_globals)]
static DW_ATE_boolean: c_uint = 0x02;
const DW_ATE_boolean: c_uint = 0x02;
#[allow(non_upper_case_globals)]
static DW_ATE_float: c_uint = 0x04;
const DW_ATE_float: c_uint = 0x04;
#[allow(non_upper_case_globals)]
static DW_ATE_signed: c_uint = 0x05;
const DW_ATE_signed: c_uint = 0x05;
#[allow(non_upper_case_globals)]
static DW_ATE_unsigned: c_uint = 0x07;
const DW_ATE_unsigned: c_uint = 0x07;
#[allow(non_upper_case_globals)]
static DW_ATE_unsigned_char: c_uint = 0x08;
const DW_ATE_unsigned_char: c_uint = 0x08;
static UNKNOWN_LINE_NUMBER: c_uint = 0;
static UNKNOWN_COLUMN_NUMBER: c_uint = 0;
const UNKNOWN_LINE_NUMBER: c_uint = 0;
const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
// ptr::null() doesn't work :(
static UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
static UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
const UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
const UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
static FLAGS_NONE: c_uint = 0;
const FLAGS_NONE: c_uint = 0;
//=-----------------------------------------------------------------------------
// Public Interface of debuginfo module
......
......@@ -60,6 +60,9 @@ pub struct ModuleTranslation {
pub llmod: ModuleRef,
}
impl Send for ModuleTranslation { }
impl Sync for ModuleTranslation { }
pub struct CrateTranslation {
pub modules: Vec<ModuleTranslation>,
pub metadata_module: ModuleTranslation,
......
......@@ -11,16 +11,14 @@
use check::{FnCtxt, structurally_resolved_type};
use middle::subst::{FnSpace};
use middle::traits;
use middle::traits::{SelectionError, OutputTypeParameterMismatch, Overflow, Unimplemented};
use middle::traits::{Obligation, ObligationCause};
use middle::traits::{FulfillmentError, CodeSelectionError, CodeAmbiguity};
use middle::traits::{PredicateObligation};
use middle::traits::report_fulfillment_errors;
use middle::ty::{mod, Ty};
use middle::infer;
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::Span;
use util::ppaux::{UserString, Repr, ty_to_string};
use util::ppaux::{Repr, ty_to_string};
pub fn check_object_cast<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
cast_expr: &ast::Expr,
......@@ -285,199 +283,7 @@ pub fn select_all_fcx_obligations_or_error(fcx: &FnCtxt) {
fcx);
match r {
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(fcx, &errors); }
}
}
pub fn report_fulfillment_errors<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
errors: &Vec<FulfillmentError<'tcx>>) {
for error in errors.iter() {
report_fulfillment_error(fcx, error);
}
}
pub fn report_fulfillment_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
error: &FulfillmentError<'tcx>) {
match error.code {
CodeSelectionError(ref e) => {
report_selection_error(fcx, &error.obligation, e);
}
CodeAmbiguity => {
maybe_report_ambiguity(fcx, &error.obligation);
}
}
}
pub fn report_selection_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>,
error: &SelectionError<'tcx>)
{
match *error {
Overflow => {
// We could track the stack here more precisely if we wanted, I imagine.
let predicate =
fcx.infcx().resolve_type_vars_if_possible(&obligation.trait_ref);
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"overflow evaluating the requirement `{}`",
predicate.user_string(fcx.tcx())).as_slice());
let current_limit = fcx.tcx().sess.recursion_limit.get();
let suggested_limit = current_limit * 2;
fcx.tcx().sess.span_note(
obligation.cause.span,
format!(
"consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
suggested_limit)[]);
note_obligation_cause(fcx, obligation);
}
Unimplemented => {
match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
let trait_ref = fcx.infcx().resolve_type_vars_if_possible(&**trait_ref);
if !ty::type_is_error(trait_ref.self_ty()) {
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"the trait `{}` is not implemented for the type `{}`",
trait_ref.user_string(fcx.tcx()),
trait_ref.self_ty().user_string(fcx.tcx())).as_slice());
}
}
ty::Predicate::Equate(ref predicate) => {
let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
let err = fcx.infcx().equality_predicate(obligation.cause.span,
&predicate).unwrap_err();
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied (`{}`)",
predicate.user_string(fcx.tcx()),
ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
}
ty::Predicate::RegionOutlives(ref predicate) => {
let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
let err = fcx.infcx().region_outlives_predicate(obligation.cause.span,
&predicate).unwrap_err();
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied (`{}`)",
predicate.user_string(fcx.tcx()),
ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
}
ty::Predicate::TypeOutlives(ref predicate) => {
let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"the requirement `{}` is not satisfied",
predicate.user_string(fcx.tcx())).as_slice());
}
}
note_obligation_cause(fcx, obligation);
}
OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
let expected_trait_ref =
fcx.infcx().resolve_type_vars_if_possible(
&**expected_trait_ref);
let actual_trait_ref =
fcx.infcx().resolve_type_vars_if_possible(
&**actual_trait_ref);
if !ty::type_is_error(actual_trait_ref.self_ty()) {
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"type mismatch: the type `{}` implements the trait `{}`, \
but the trait `{}` is required ({})",
expected_trait_ref.self_ty().user_string(fcx.tcx()),
expected_trait_ref.user_string(fcx.tcx()),
actual_trait_ref.user_string(fcx.tcx()),
ty::type_err_to_str(fcx.tcx(), e)).as_slice());
note_obligation_cause(fcx, obligation);
}
}
}
}
pub fn maybe_report_ambiguity<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>) {
// Unable to successfully determine, probably means
// insufficient type information, but could mean
// ambiguous impls. The latter *ought* to be a
// coherence violation, so we don't report it here.
let trait_ref = match obligation.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
fcx.infcx().resolve_type_vars_if_possible(&**trait_ref)
}
_ => {
fcx.tcx().sess.span_bug(
obligation.cause.span,
format!("ambiguity from something other than a trait: {}",
obligation.trait_ref.repr(fcx.tcx())).as_slice());
}
};
let self_ty = trait_ref.self_ty();
debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
trait_ref.repr(fcx.tcx()),
self_ty.repr(fcx.tcx()),
obligation.repr(fcx.tcx()));
let all_types = &trait_ref.substs().types;
if all_types.iter().any(|&t| ty::type_is_error(t)) {
} else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
// This is kind of a hack: it frequently happens that some earlier
// error prevents types from being fully inferred, and then we get
// a bunch of uninteresting errors saying something like "<generic
// #0> doesn't implement Sized". It may even be true that we
// could just skip over all checks where the self-ty is an
// inference variable, but I was afraid that there might be an
// inference variable created, registered as an obligation, and
// then never forced by writeback, and hence by skipping here we'd
// be ignoring the fact that we don't KNOW the type works
// out. Though even that would probably be harmless, given that
// we're only talking about builtin traits, which are known to be
// inhabited. But in any case I just threw in this check for
// has_errors() to be sure that compilation isn't happening
// anyway. In that case, why inundate the user.
if !fcx.tcx().sess.has_errors() {
if fcx.ccx.tcx.lang_items.sized_trait()
.map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"unable to infer enough type information about `{}`; type annotations \
required",
self_ty.user_string(fcx.tcx()))[]);
} else {
fcx.tcx().sess.span_err(
obligation.cause.span,
format!(
"unable to infer enough type information to \
locate the impl of the trait `{}` for \
the type `{}`; type annotations required",
trait_ref.user_string(fcx.tcx()),
self_ty.user_string(fcx.tcx()))[]);
note_obligation_cause(fcx, obligation);
}
}
} else if !fcx.tcx().sess.has_errors() {
// Ambiguity. Coherence should have reported an error.
fcx.tcx().sess.span_bug(
obligation.cause.span,
format!(
"coherence failed to report ambiguity: \
cannot locate the impl of the trait `{}` for \
the type `{}`",
trait_ref.user_string(fcx.tcx()),
self_ty.user_string(fcx.tcx()))[]);
Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
......@@ -490,7 +296,7 @@ pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt)
.select_where_possible(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(fcx, &errors); }
Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
......@@ -504,83 +310,6 @@ pub fn select_new_fcx_obligations(fcx: &FnCtxt) {
.select_new_obligations(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(fcx, &errors); }
}
}
fn note_obligation_cause<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
obligation: &PredicateObligation<'tcx>) {
let tcx = fcx.tcx();
match obligation.cause.code {
traits::MiscObligation => { }
traits::ItemObligation(item_def_id) => {
let item_name = ty::item_path_str(tcx, item_def_id);
tcx.sess.span_note(
obligation.cause.span,
format!(
"required by `{}`",
item_name).as_slice());
}
traits::ObjectCastObligation(object_ty) => {
tcx.sess.span_note(
obligation.cause.span,
format!(
"required for the cast to the object type `{}`",
fcx.infcx().ty_to_string(object_ty)).as_slice());
}
traits::RepeatVec => {
tcx.sess.span_note(
obligation.cause.span,
"the `Copy` trait is required because the \
repeated element will be copied");
}
traits::VariableType(_) => {
tcx.sess.span_note(
obligation.cause.span,
"all local variables must have a statically known size");
}
traits::ReturnType => {
tcx.sess.span_note(
obligation.cause.span,
"the return type of a function must have a \
statically known size");
}
traits::AssignmentLhsSized => {
tcx.sess.span_note(
obligation.cause.span,
"the left-hand-side of an assignment must have a statically known size");
}
traits::StructInitializerSized => {
tcx.sess.span_note(
obligation.cause.span,
"structs must have a statically known size to be initialized");
}
traits::DropTrait => {
span_note!(tcx.sess, obligation.cause.span,
"cannot implement a destructor on a \
structure or enumeration that does not satisfy Send");
span_help!(tcx.sess, obligation.cause.span,
"use \"#[unsafe_destructor]\" on the implementation \
to force the compiler to allow this");
}
traits::ClosureCapture(var_id, closure_span, builtin_bound) => {
let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
let trait_name = ty::item_path_str(tcx, def_id);
let name = ty::local_var_name_str(tcx, var_id);
span_note!(tcx.sess, closure_span,
"the closure that captures `{}` requires that all captured variables \"
implement the trait `{}`",
name,
trait_name);
}
traits::FieldSized => {
span_note!(tcx.sess, obligation.cause.span,
"only the last field of a struct or enum variant \
may have a dynamically sized type")
}
traits::ObjectSized => {
span_note!(tcx.sess, obligation.cause.span,
"only sized types can be made into objects");
}
Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
......@@ -188,7 +188,7 @@ fn check_impl(&mut self,
match self_ty.sty {
ty::ty_struct(def_id, _) |
ty::ty_enum(def_id, _) => {
check_struct_safe_for_destructor(fcx, item.span, self_ty, def_id);
check_struct_safe_for_destructor(fcx, item.span, def_id);
}
_ => {
// Coherence already reports an error in this case.
......@@ -221,7 +221,7 @@ fn check_impl(&mut self,
let poly_trait_ref = ty::Binder(trait_ref);
let predicates = ty::predicates_for_trait_ref(fcx.tcx(), &poly_trait_ref);
for predicate in predicates.into_iter() {
fcx.register_predicate(traits::Obligation::new(cause, predicate));
fcx.register_predicate(traits::Obligation::new(cause.clone(), predicate));
}
});
}
......@@ -460,20 +460,16 @@ fn filter_to_trait_obligations<'tcx>(bounds: ty::GenericBounds<'tcx>)
fn check_struct_safe_for_destructor<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
self_ty: Ty<'tcx>,
struct_did: ast::DefId) {
let struct_tpt = ty::lookup_item_type(fcx.tcx(), struct_did);
if !struct_tpt.generics.has_type_params(subst::TypeSpace)
&& !struct_tpt.generics.has_region_params(subst::TypeSpace)
if struct_tpt.generics.has_type_params(subst::TypeSpace)
|| struct_tpt.generics.has_region_params(subst::TypeSpace)
{
let cause = traits::ObligationCause::new(span, fcx.body_id, traits::DropTrait);
fcx.register_builtin_bound(self_ty, ty::BoundSend, cause);
} else {
span_err!(fcx.tcx().sess, span, E0141,
"cannot implement a destructor on a structure \
with type parameters");
span_note!(fcx.tcx().sess, span,
"use \"#[unsafe_destructor]\" on the implementation \
to force the compiler to allow this");
with type parameters");
span_note!(fcx.tcx().sess, span,
"use \"#[unsafe_destructor]\" on the implementation \
to force the compiler to allow this");
}
}
......@@ -72,13 +72,12 @@
use fmt;
use hash;
use kinds::marker;
use mem;
use ptr;
use slice::{mod, ImmutableIntSlice};
use str;
use string::String;
use core::kinds::marker;
/// The representation of a C String.
///
......@@ -90,6 +89,9 @@ pub struct CString {
owns_buffer_: bool,
}
impl Send for CString { }
impl Sync for CString { }
impl Clone for CString {
/// Clone this CString into a new, uniquely owned CString. For safety
/// reasons, this is always a deep clone with the memory allocated
......
......@@ -23,7 +23,7 @@
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{Some, None};
use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
use ptr::{OwnedPtr, RawPtr, copy_nonoverlapping_memory, zero_memory};
use ptr;
use rt::heap::{allocate, deallocate};
......@@ -69,7 +69,7 @@
pub struct RawTable<K, V> {
capacity: uint,
size: uint,
hashes: *mut u64,
hashes: OwnedPtr<u64>,
// Because K/V do not appear directly in any of the types in the struct,
// inform rustc that in fact instances of K and V are reachable from here.
marker: marker::CovariantType<(K,V)>,
......@@ -563,7 +563,7 @@ unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
return RawTable {
size: 0,
capacity: 0,
hashes: 0 as *mut u64,
hashes: OwnedPtr::null(),
marker: marker::CovariantType,
};
}
......@@ -602,7 +602,7 @@ unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
RawTable {
capacity: capacity,
size: 0,
hashes: hashes,
hashes: OwnedPtr(hashes),
marker: marker::CovariantType,
}
}
......@@ -611,14 +611,14 @@ fn first_bucket_raw(&self) -> RawBucket<K, V> {
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let buffer = self.hashes as *mut u8;
let buffer = self.hashes.0 as *mut u8;
let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(),
min_align_of::<V>());
unsafe {
RawBucket {
hash: self.hashes,
hash: self.hashes.0,
key: buffer.offset(keys_offset as int) as *mut K,
val: buffer.offset(vals_offset as int) as *mut V
}
......@@ -631,7 +631,7 @@ fn first_bucket_raw(&self) -> RawBucket<K, V> {
pub fn new(capacity: uint) -> RawTable<K, V> {
unsafe {
let ret = RawTable::new_uninitialized(capacity);
zero_memory(ret.hashes, capacity);
zero_memory(ret.hashes.0, capacity);
ret
}
}
......@@ -651,7 +651,7 @@ fn raw_buckets(&self) -> RawBuckets<K, V> {
RawBuckets {
raw: self.first_bucket_raw(),
hashes_end: unsafe {
self.hashes.offset(self.capacity as int)
self.hashes.0.offset(self.capacity as int)
},
marker: marker::ContravariantLifetime,
}
......@@ -916,7 +916,7 @@ fn clone(&self) -> RawTable<K, V> {
#[unsafe_destructor]
impl<K, V> Drop for RawTable<K, V> {
fn drop(&mut self) {
if self.hashes.is_null() {
if self.hashes.0.is_null() {
return;
}
// This is done in reverse because we've likely partially taken
......@@ -936,7 +936,7 @@ fn drop(&mut self) {
vals_size, min_align_of::<V>());
unsafe {
deallocate(self.hashes as *mut u8, size, align);
deallocate(self.hashes.0 as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}
......
......@@ -17,6 +17,7 @@
use mem;
use clone::Clone;
#[deriving(Send, Sync)]
struct Inner {
thread: Thread,
woken: AtomicBool,
......
......@@ -321,7 +321,7 @@
use alloc::arc::Arc;
use core::kinds::marker;
use core::mem;
use core::cell::UnsafeCell;
use core::cell::{UnsafeCell, RacyCell};
pub use self::select::{Select, Handle};
use self::select::StartResult;
......@@ -359,10 +359,12 @@ mod $name {
#[unstable]
pub struct Receiver<T> {
inner: UnsafeCell<Flavor<T>>,
// can't share in an arc
_marker: marker::NoSync,
}
// The receiver port can be sent from place to place, so long as it
// is not used to receive non-sendable things.
impl<T:Send> Send for Receiver<T> { }
/// An iterator over messages on a receiver, this iterator will block
/// whenever `next` is called, waiting for a new message, and `None` will be
/// returned when the corresponding channel has hung up.
......@@ -376,15 +378,17 @@ pub struct Messages<'a, T:'a> {
#[unstable]
pub struct Sender<T> {
inner: UnsafeCell<Flavor<T>>,
// can't share in an arc
_marker: marker::NoSync,
}
// The send port can be sent from place to place, so long as it
// is not used to send non-sendable things.
impl<T:Send> Send for Sender<T> { }
/// The sending-half of Rust's synchronous channel type. This half can only be
/// owned by one task, but it can be cloned to send to other tasks.
#[unstable = "this type may be renamed, but it will always exist"]
pub struct SyncSender<T> {
inner: Arc<UnsafeCell<sync::Packet<T>>>,
inner: Arc<RacyCell<sync::Packet<T>>>,
// can't share in an arc
_marker: marker::NoSync,
}
......@@ -420,10 +424,10 @@ pub enum TrySendError<T> {
}
enum Flavor<T> {
Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
Stream(Arc<UnsafeCell<stream::Packet<T>>>),
Shared(Arc<UnsafeCell<shared::Packet<T>>>),
Sync(Arc<UnsafeCell<sync::Packet<T>>>),
Oneshot(Arc<RacyCell<oneshot::Packet<T>>>),
Stream(Arc<RacyCell<stream::Packet<T>>>),
Shared(Arc<RacyCell<shared::Packet<T>>>),
Sync(Arc<RacyCell<sync::Packet<T>>>),
}
#[doc(hidden)]
......@@ -474,7 +478,7 @@ fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
/// ```
#[unstable]
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
let a = Arc::new(RacyCell::new(oneshot::Packet::new()));
(Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
}
......@@ -514,7 +518,7 @@ pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
#[unstable = "this function may be renamed to more accurately reflect the type \
of channel that is is creating"]
pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
let a = Arc::new(RacyCell::new(sync::Packet::new(bound)));
(SyncSender::new(a.clone()), Receiver::new(Sync(a)))
}
......@@ -526,7 +530,6 @@ impl<T: Send> Sender<T> {
fn new(inner: Flavor<T>) -> Sender<T> {
Sender {
inner: UnsafeCell::new(inner),
_marker: marker::NoSync,
}
}
......@@ -596,7 +599,8 @@ pub fn send_opt(&self, t: T) -> Result<(), T> {
if !(*p).sent() {
return (*p).send(t);
} else {
let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
let a =
Arc::new(RacyCell::new(stream::Packet::new()));
match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
oneshot::UpSuccess => {
let ret = (*a.get()).send(t);
......@@ -633,7 +637,7 @@ impl<T: Send> Clone for Sender<T> {
fn clone(&self) -> Sender<T> {
let (packet, sleeper, guard) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
......@@ -644,7 +648,7 @@ fn clone(&self) -> Sender<T> {
}
}
Stream(ref p) => {
let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
......@@ -688,7 +692,7 @@ fn drop(&mut self) {
////////////////////////////////////////////////////////////////////////////////
impl<T: Send> SyncSender<T> {
fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
fn new(inner: Arc<RacyCell<sync::Packet<T>>>) -> SyncSender<T> {
SyncSender { inner: inner, _marker: marker::NoSync }
}
......@@ -777,7 +781,7 @@ fn drop(&mut self) {
impl<T: Send> Receiver<T> {
fn new(inner: Flavor<T>) -> Receiver<T> {
Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
Receiver { inner: UnsafeCell::new(inner) }
}
/// Blocks waiting for a value on this receiver
......
......@@ -76,6 +76,9 @@ pub struct Queue<T> {
tail: UnsafeCell<*mut Node<T>>,
}
impl<T:Send> Send for Queue<T> { }
impl<T:Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(box Node {
......
......@@ -73,6 +73,10 @@ pub struct Queue<T> {
cache_subtractions: AtomicUint,
}
impl<T: Send> Send for Queue<T> { }
impl<T: Send> Sync for Queue<T> { }
impl<T: Send> Node<T> {
fn new() -> *mut Node<T> {
unsafe {
......
......@@ -53,6 +53,11 @@ pub struct Packet<T> {
lock: Mutex<State<T>>,
}
impl<T:Send> Send for Packet<T> { }
impl<T:Send> Sync for Packet<T> { }
#[deriving(Send)]
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
......@@ -88,6 +93,8 @@ struct Node {
next: *mut Node,
}
impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
......
......@@ -26,6 +26,10 @@ pub struct Exclusive<T> {
data: UnsafeCell<T>,
}
impl<T:Send> Send for Exclusive<T> { }
impl<T:Send> Sync for Exclusive<T> { }
/// An RAII guard returned via `lock`
pub struct ExclusiveGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
......
......@@ -10,7 +10,7 @@
use prelude::*;
use cell::UnsafeCell;
use cell::{UnsafeCell, RacyCell};
use kinds::marker;
use sync::{poison, AsMutexGuard};
use sys_common::mutex as sys;
......@@ -70,9 +70,13 @@ pub struct Mutex<T> {
// time, so to ensure that the native mutex is used correctly we box the
// inner lock to give it a constant address.
inner: Box<StaticMutex>,
data: UnsafeCell<T>,
data: RacyCell<T>,
}
impl<T:Send> Send for Mutex<T> { }
impl<T:Send> Sync for Mutex<T> { }
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
......@@ -94,9 +98,10 @@ pub struct Mutex<T> {
/// }
/// // lock is unlocked here.
/// ```
#[deriving(Sync)]
pub struct StaticMutex {
lock: sys::Mutex,
poison: UnsafeCell<poison::Flag>,
poison: RacyCell<poison::Flag>,
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
......@@ -125,7 +130,7 @@ pub struct StaticMutexGuard {
/// other mutex constants.
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
poison: UnsafeCell { value: poison::Flag { failed: false } },
poison: RacyCell(UnsafeCell { value: poison::Flag { failed: false } }),
};
impl<T: Send> Mutex<T> {
......@@ -133,7 +138,7 @@ impl<T: Send> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
data: UnsafeCell::new(t),
data: RacyCell::new(t),
}
}
......
......@@ -35,6 +35,7 @@
/// // run initialization here
/// });
/// ```
#[deriving(Sync)]
pub struct Once {
mutex: StaticMutex,
cnt: atomic::AtomicInt,
......
......@@ -59,6 +59,10 @@ pub struct Helper<M> {
pub shutdown: UnsafeCell<bool>,
}
impl<M:Send> Send for Helper<M> { }
impl<M:Send> Sync for Helper<M> { }
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
......
......@@ -15,6 +15,7 @@
/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of
/// this mutex is unsafe and it is recommended to instead use the safe wrapper
/// at the top level of the crate instead of this type.
#[deriving(Sync)]
pub struct Mutex(imp::Mutex);
/// Constant initializer for statically allocated mutexes.
......
......@@ -162,6 +162,9 @@ pub struct sigaction {
sa_restorer: *mut libc::c_void,
}
impl ::kinds::Send for sigaction { }
impl ::kinds::Sync for sigaction { }
#[repr(C)]
#[cfg(target_word_size = "32")]
pub struct sigset_t {
......@@ -211,6 +214,9 @@ pub struct sigaction {
sa_resv: [libc::c_int, ..1],
}
impl ::kinds::Send for sigaction { }
impl ::kinds::Sync for sigaction { }
#[repr(C)]
pub struct sigset_t {
__val: [libc::c_ulong, ..32],
......
......@@ -8,11 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cell::UnsafeCell;
use cell::{UnsafeCell, RacyCell};
use sys::sync as ffi;
use sys_common::mutex;
pub struct Mutex { inner: UnsafeCell<ffi::pthread_mutex_t> }
#[deriving(Sync)]
pub struct Mutex { inner: RacyCell<ffi::pthread_mutex_t> }
#[inline]
pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
......@@ -20,7 +21,7 @@ pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
}
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
inner: RacyCell(UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER }),
};
impl Mutex {
......
......@@ -210,6 +210,7 @@ fn clone(&self) -> UnixStream {
// Unix Listener
////////////////////////////////////////////////////////////////////////////////
#[deriving(Sync)]
pub struct UnixListener {
inner: Inner,
path: CString,
......@@ -252,6 +253,7 @@ pub struct UnixAcceptor {
deadline: u64,
}
#[deriving(Sync)]
struct AcceptorInner {
listener: UnixListener,
reader: FileDesc,
......
......@@ -160,7 +160,7 @@ mod signal {
pub static SIGSTKSZ: libc::size_t = 8192;
pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
// This definition is not as accurate as it could be, {si_addr} is
// actually a giant union. Currently we're only interested in that field,
......
......@@ -29,6 +29,7 @@
// TCP listeners
////////////////////////////////////////////////////////////////////////////////
#[deriving(Sync)]
pub struct TcpListener {
pub inner: FileDesc,
}
......@@ -89,6 +90,7 @@ pub struct TcpAcceptor {
deadline: u64,
}
#[deriving(Sync)]
struct AcceptorInner {
listener: TcpListener,
reader: FileDesc,
......
......@@ -127,9 +127,9 @@
use any::Any;
use borrow::IntoCow;
use boxed::Box;
use cell::UnsafeCell;
use cell::RacyCell;
use clone::Clone;
use kinds::Send;
use kinds::{Send, Sync};
use ops::{Drop, FnOnce};
use option::Option::{mod, Some, None};
use result::Result::{Err, Ok};
......@@ -211,7 +211,7 @@ pub fn spawn<T, F>(self, f: F) -> JoinGuard<T> where
}
fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> JoinGuard<T> {
let my_packet = Arc::new(UnsafeCell::new(None));
let my_packet = Arc::new(RacyCell::new(None));
let their_packet = my_packet.clone();
let Builder { name, stack_size, stdout, stderr } = self;
......@@ -283,13 +283,14 @@ fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> JoinGuard<T> {
}
}
#[deriving(Sync)]
struct Inner {
name: Option<String>,
lock: Mutex<bool>, // true when there is a buffered unpark
cvar: Condvar,
}
#[deriving(Clone)]
#[deriving(Clone, Sync)]
/// A handle to a thread.
pub struct Thread {
inner: Arc<Inner>,
......@@ -387,7 +388,7 @@ pub struct JoinGuard<T> {
native: imp::rust_thread,
thread: Thread,
joined: bool,
packet: Arc<UnsafeCell<Option<Result<T>>>>,
packet: Arc<RacyCell<Option<Result<T>>>>,
}
impl<T: Send> JoinGuard<T> {
......
......@@ -280,6 +280,8 @@ pub struct Key<T> {
pub dtor_running: UnsafeCell<bool>, // should be Cell
}
impl<T> ::kinds::Sync for Key<T> { }
#[doc(hidden)]
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
......@@ -410,6 +412,8 @@ pub struct Key<T> {
pub os: OsStaticKey,
}
impl<T> ::kinds::Sync for Key<T> { }
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
......
......@@ -198,10 +198,12 @@ pub fn is_set(&'static self) -> bool {
mod imp {
use std::cell::UnsafeCell;
// FIXME: Should be a `Cell`, but that's not `Sync`
// SNAP c9f6d69 switch to `Cell`
#[doc(hidden)]
pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
#[cfg(not(stage0))] impl<T> ::kinds::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
......@@ -222,6 +224,8 @@ pub struct KeyInner<T> {
pub marker: marker::InvariantType<T>,
}
#[cfg(not(stage0))] impl<T> ::kinds::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
......
......@@ -976,6 +976,8 @@ enum TestEvent {
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
impl Send for MonitorMsg {}
fn run_tests<F>(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
mut callback: F) -> io::IoResult<()> where
......
......@@ -11,11 +11,15 @@
#![allow(dead_code)]
#![allow(unused_unsafe)]
use std::kinds::Sync;
struct Foo {
a: uint,
b: *const ()
}
impl Sync for Foo {}
fn foo<T>(a: T) -> T {
a
}
......
......@@ -10,8 +10,14 @@
use std::ptr;
static a: *const u8 = 0 as *const u8;
struct TestStruct {
x: *const u8
}
impl Sync for TestStruct {}
static a: TestStruct = TestStruct{x: 0 as *const u8};
pub fn main() {
assert_eq!(a, ptr::null());
assert_eq!(a.x, ptr::null());
}
......@@ -10,14 +10,16 @@
extern crate libc;
extern fn foo() {}
struct TestStruct {
x: *const libc::c_void
}
impl Sync for TestStruct {}
extern fn foo() {}
const x: extern "C" fn() = foo;
static y: *const libc::c_void = x as *const libc::c_void;
const a: &'static int = &10;
static b: *const int = a as *const int;
static y: TestStruct = TestStruct { x: x as *const libc::c_void };
pub fn main() {
assert_eq!(x as *const libc::c_void, y);
assert_eq!(a as *const int, b);
assert_eq!(x as *const libc::c_void, y.x);
}
......@@ -8,6 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static TEST_VALUE : *const [int; 2] = 0x1234 as *const [int; 2];
struct TestStruct {
x: *const [int; 2]
}
impl Sync for TestStruct {}
static TEST_VALUE : TestStruct = TestStruct{x: 0x1234 as *const [int; 2]};
fn main() {}
......@@ -9,10 +9,10 @@
// except according to those terms.
use std::kinds::marker;
use std::cell::UnsafeCell;
use std::cell::{UnsafeCell, RacyCell};
struct MyUnsafe<T> {
value: UnsafeCell<T>
value: RacyCell<T>
}
impl<T> MyUnsafe<T> {
......@@ -21,23 +21,25 @@ fn forbidden(&self) {}
enum UnsafeEnum<T> {
VariantSafe,
VariantUnsafe(UnsafeCell<T>)
VariantUnsafe(RacyCell<T>)
}
static STATIC1: UnsafeEnum<int> = UnsafeEnum::VariantSafe;
static STATIC2: UnsafeCell<int> = UnsafeCell { value: 1 };
const CONST: UnsafeCell<int> = UnsafeCell { value: 1 };
static STATIC2: RacyCell<int> = RacyCell(UnsafeCell { value: 1 });
const CONST: RacyCell<int> = RacyCell(UnsafeCell { value: 1 });
static STATIC3: MyUnsafe<int> = MyUnsafe{value: CONST};
static STATIC4: &'static UnsafeCell<int> = &STATIC2;
static STATIC4: &'static RacyCell<int> = &STATIC2;
struct Wrap<T> {
value: T
}
static UNSAFE: UnsafeCell<int> = UnsafeCell{value: 1};
static WRAPPED_UNSAFE: Wrap<&'static UnsafeCell<int>> = Wrap { value: &UNSAFE };
impl<T: Send> Sync for Wrap<T> {}
static UNSAFE: RacyCell<int> = RacyCell(UnsafeCell{value: 1});
static WRAPPED_UNSAFE: Wrap<&'static RacyCell<int>> = Wrap { value: &UNSAFE };
fn main() {
let a = &STATIC1;
......
......@@ -23,6 +23,7 @@ pub mod pipes {
use std::mem::{replace, swap};
use std::mem;
use std::task;
use std::kinds::Send;
pub struct Stuff<T> {
state: state,
......@@ -45,6 +46,8 @@ pub struct packet<T> {
payload: Option<T>
}
impl<T:Send> Send for packet<T> {}
pub fn packet<T:Send>() -> *const packet<T> {
unsafe {
let p: *const packet<T> = mem::transmute(box Stuff{
......@@ -230,8 +233,13 @@ pub mod pingpong {
use std::mem;
pub struct ping(::pipes::send_packet<pong>);
unsafe impl Send for ping {}
pub struct pong(::pipes::send_packet<ping>);
unsafe impl Send for pong {}
pub fn liberate_ping(p: ping) -> ::pipes::send_packet<pong> {
unsafe {
let _addr : *const ::pipes::send_packet<pong> = match &p {
......
......@@ -11,7 +11,14 @@
// This test checks that the `_` type placeholder works
// correctly for enabling type inference.
static CONSTEXPR: *const int = &413 as *const _;
struct TestStruct {
x: *const int
}
impl Sync for TestStruct {}
static CONSTEXPR: TestStruct = TestStruct{x: &413 as *const _};
pub fn main() {
let x: Vec<_> = range(0u, 5).collect();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册