提交 dde35e75 编写于 作者: B bors

Auto merge of #32800 - Manishearth:rollup, r=Manishearth

Rollup of 7 pull requests

- Successful merges: #32687, #32729, #32731, #32732, #32734, #32737, #32741
- Failed merges:
......@@ -717,18 +717,6 @@ if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; f
if [ -n "$CFG_ENABLE_ORBIT" ]; then putvar CFG_ENABLE_ORBIT; fi
# A magic value that allows the compiler to use unstable features
# during the bootstrap even when doing so would normally be an error
# because of feature staging or because the build turns on
# warnings-as-errors and unstable features default to warnings. The
# build has to match this key in an env var. Meant to be a mild
# deterrent from users just turning on unstable features on the stable
# channel.
# Basing CFG_BOOTSTRAP_KEY on CFG_BOOTSTRAP_KEY lets it get picked up
# during a Makefile reconfig.
CFG_BOOTSTRAP_KEY="${CFG_BOOTSTRAP_KEY-`date +%H:%M:%S`}"
putvar CFG_BOOTSTRAP_KEY
step_msg "looking for build programs"
probe_need CFG_CURLORWGET curl wget
......
......@@ -24,6 +24,17 @@ CFG_PRERELEASE_VERSION=.1
# versions in the same place
CFG_FILENAME_EXTRA=$(shell printf '%s' $(CFG_RELEASE)$(CFG_EXTRA_FILENAME) | $(CFG_HASH_COMMAND))
# A magic value that allows the compiler to use unstable features during the
# bootstrap even when doing so would normally be an error because of feature
# staging or because the build turns on warnings-as-errors and unstable features
# default to warnings. The build has to match this key in an env var.
#
# This value is keyed off the release to ensure that all compilers for one
# particular release have the same bootstrap key. Note that this is
# intentionally not "secure" by any definition, this is largely just a deterrent
# from users enabling unstable features on the stable compiler.
CFG_BOOTSTRAP_KEY=$(CFG_FILENAME_EXTRA)
ifeq ($(CFG_RELEASE_CHANNEL),stable)
# This is the normal semver version string, e.g. "0.12.0", "0.12.0-nightly"
CFG_RELEASE=$(CFG_RELEASE_NUM)
......
......@@ -43,10 +43,16 @@ pub fn cc2ar(cc: &Path, target: &str) -> PathBuf {
if target.contains("musl") || target.contains("msvc") {
PathBuf::from("ar")
} else {
let parent = cc.parent().unwrap();
let file = cc.file_name().unwrap().to_str().unwrap();
cc.parent().unwrap().join(file.replace("gcc", "ar")
.replace("cc", "ar")
.replace("clang", "ar"))
for suffix in &["gcc", "cc", "clang"] {
if let Some(idx) = file.rfind(suffix) {
let mut file = file[..idx].to_owned();
file.push_str("ar");
return parent.join(&file);
}
}
parent.join(file)
}
}
......
......@@ -124,9 +124,7 @@
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T: ?Sized> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: Shared<ArcInner<T>>,
ptr: Shared<ArcInner<T>>,
}
#[stable(feature = "rust1", since = "1.0.0")]
......@@ -144,9 +142,7 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
#[unsafe_no_drop_flag]
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: Shared<ArcInner<T>>,
ptr: Shared<ArcInner<T>>,
}
#[stable(feature = "arc_weak", since = "1.4.0")]
......@@ -198,7 +194,7 @@ pub fn new(data: T) -> Arc<T> {
weak: atomic::AtomicUsize::new(1),
data: data,
};
Arc { _ptr: unsafe { Shared::new(Box::into_raw(x)) } }
Arc { ptr: unsafe { Shared::new(Box::into_raw(x)) } }
}
/// Unwraps the contained value if the `Arc<T>` has exactly one strong reference.
......@@ -230,11 +226,11 @@ pub fn try_unwrap(this: Self) -> Result<T, Self> {
atomic::fence(Acquire);
unsafe {
let ptr = *this._ptr;
let ptr = *this.ptr;
let elem = ptr::read(&(*ptr).data);
// Make a weak pointer to clean up the implicit strong-weak reference
let _weak = Weak { _ptr: this._ptr };
let _weak = Weak { ptr: this.ptr };
mem::forget(this);
Ok(elem)
......@@ -275,7 +271,7 @@ pub fn downgrade(this: &Self) -> Weak<T> {
// synchronize with the write coming from `is_unique`, so that the
// events prior to that write happen before this read.
match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
Ok(_) => return Weak { _ptr: this._ptr },
Ok(_) => return Weak { ptr: this.ptr },
Err(old) => cur = old,
}
}
......@@ -304,13 +300,13 @@ fn inner(&self) -> &ArcInner<T> {
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &**self._ptr }
unsafe { &**self.ptr }
}
// Non-inlined part of `drop`.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let ptr = *self._ptr;
let ptr = *self.ptr;
// Destroy the data at this time, even though we may not free the box
// allocation itself (there may still be weak pointers lying around).
......@@ -368,7 +364,7 @@ fn clone(&self) -> Arc<T> {
}
}
Arc { _ptr: self._ptr }
Arc { ptr: self.ptr }
}
}
......@@ -436,7 +432,7 @@ pub fn make_mut(this: &mut Self) -> &mut T {
// Materialize our own implicit weak pointer, so that it can clean
// up the ArcInner as needed.
let weak = Weak { _ptr: this._ptr };
let weak = Weak { ptr: this.ptr };
// mark the data itself as already deallocated
unsafe {
......@@ -444,7 +440,7 @@ pub fn make_mut(this: &mut Self) -> &mut T {
// here (due to zeroing) because data is no longer accessed by
// other threads (due to there being no more strong refs at this
// point).
let mut swap = Arc::new(ptr::read(&(**weak._ptr).data));
let mut swap = Arc::new(ptr::read(&(**weak.ptr).data));
mem::swap(this, &mut swap);
mem::forget(swap);
}
......@@ -457,7 +453,7 @@ pub fn make_mut(this: &mut Self) -> &mut T {
// As with `get_mut()`, the unsafety is ok because our reference was
// either unique to begin with, or became one upon cloning the contents.
unsafe {
let inner = &mut **this._ptr;
let inner = &mut **this.ptr;
&mut inner.data
}
}
......@@ -489,7 +485,7 @@ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
unsafe {
let inner = &mut **this._ptr;
let inner = &mut **this.ptr;
Some(&mut inner.data)
}
} else {
......@@ -558,7 +554,7 @@ fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
let thin = *self._ptr as *const ();
let thin = *self.ptr as *const ();
if thin as usize == mem::POST_DROP_USIZE {
return;
......@@ -639,7 +635,7 @@ pub fn upgrade(&self) -> Option<Arc<T>> {
// Relaxed is valid for the same reason it is on Arc's Clone impl
match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
Ok(_) => return Some(Arc { _ptr: self._ptr }),
Ok(_) => return Some(Arc { ptr: self.ptr }),
Err(old) => n = old,
}
}
......@@ -648,7 +644,7 @@ pub fn upgrade(&self) -> Option<Arc<T>> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// See comments above for why this is "safe"
unsafe { &**self._ptr }
unsafe { &**self.ptr }
}
}
......@@ -682,7 +678,7 @@ fn clone(&self) -> Weak<T> {
}
}
return Weak { _ptr: self._ptr };
return Weak { ptr: self.ptr };
}
}
......@@ -714,7 +710,7 @@ impl<T: ?Sized> Drop for Weak<T> {
/// } // implicit drop
/// ```
fn drop(&mut self) {
let ptr = *self._ptr;
let ptr = *self.ptr;
let thin = ptr as *const ();
// see comments above for why this check is here
......@@ -886,7 +882,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self._ptr, f)
fmt::Pointer::fmt(&*self.ptr, f)
}
}
......@@ -931,7 +927,7 @@ impl<T> Weak<T> {
issue = "30425")]
pub fn new() -> Weak<T> {
unsafe {
Weak { _ptr: Shared::new(Box::into_raw(box ArcInner {
Weak { ptr: Shared::new(Box::into_raw(box ArcInner {
strong: atomic::AtomicUsize::new(0),
weak: atomic::AtomicUsize::new(1),
data: uninitialized(),
......
......@@ -184,9 +184,7 @@ struct RcBox<T: ?Sized> {
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
// FIXME #12808: strange names to try to avoid interfering with field
// accesses of the contained type via Deref
_ptr: Shared<RcBox<T>>,
ptr: Shared<RcBox<T>>,
}
#[stable(feature = "rust1", since = "1.0.0")]
......@@ -215,7 +213,7 @@ pub fn new(value: T) -> Rc<T> {
// pointers, which ensures that the weak destructor never frees
// the allocation while the strong destructor is running, even
// if the weak pointer is stored inside the strong one.
_ptr: Shared::new(Box::into_raw(box RcBox {
ptr: Shared::new(Box::into_raw(box RcBox {
strong: Cell::new(1),
weak: Cell::new(1),
value: value,
......@@ -254,7 +252,7 @@ pub fn try_unwrap(this: Self) -> Result<T, Self> {
// pointer while also handling drop logic by just crafting a
// fake Weak.
this.dec_strong();
let _weak = Weak { _ptr: this._ptr };
let _weak = Weak { ptr: this.ptr };
forget(this);
Ok(val)
}
......@@ -287,7 +285,7 @@ impl<T: ?Sized> Rc<T> {
#[stable(feature = "rc_weak", since = "1.4.0")]
pub fn downgrade(this: &Self) -> Weak<T> {
this.inc_weak();
Weak { _ptr: this._ptr }
Weak { ptr: this.ptr }
}
/// Get the number of weak references to this value.
......@@ -348,7 +346,7 @@ pub fn is_unique(this: &Self) -> bool {
#[stable(feature = "rc_unique", since = "1.4.0")]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if Rc::is_unique(this) {
let inner = unsafe { &mut **this._ptr };
let inner = unsafe { &mut **this.ptr };
Some(&mut inner.value)
} else {
None
......@@ -390,7 +388,7 @@ pub fn make_mut(this: &mut Self) -> &mut T {
} else if Rc::weak_count(this) != 0 {
// Can just steal the data, all that's left is Weaks
unsafe {
let mut swap = Rc::new(ptr::read(&(**this._ptr).value));
let mut swap = Rc::new(ptr::read(&(**this.ptr).value));
mem::swap(this, &mut swap);
swap.dec_strong();
// Remove implicit strong-weak ref (no need to craft a fake
......@@ -404,7 +402,7 @@ pub fn make_mut(this: &mut Self) -> &mut T {
// reference count is guaranteed to be 1 at this point, and we required
// the `Rc<T>` itself to be `mut`, so we're returning the only possible
// reference to the inner value.
let inner = unsafe { &mut **this._ptr };
let inner = unsafe { &mut **this.ptr };
&mut inner.value
}
}
......@@ -449,7 +447,7 @@ impl<T: ?Sized> Drop for Rc<T> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
let ptr = *self.ptr;
let thin = ptr as *const ();
if thin as usize != mem::POST_DROP_USIZE {
......@@ -490,7 +488,7 @@ impl<T: ?Sized> Clone for Rc<T> {
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr }
Rc { ptr: self.ptr }
}
}
......@@ -691,7 +689,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self._ptr, f)
fmt::Pointer::fmt(&*self.ptr, f)
}
}
......@@ -711,9 +709,7 @@ fn from(t: T) -> Self {
#[unsafe_no_drop_flag]
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: Shared<RcBox<T>>,
ptr: Shared<RcBox<T>>,
}
#[stable(feature = "rc_weak", since = "1.4.0")]
......@@ -749,7 +745,7 @@ pub fn upgrade(&self) -> Option<Rc<T>> {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr })
Some(Rc { ptr: self.ptr })
}
}
}
......@@ -783,7 +779,7 @@ impl<T: ?Sized> Drop for Weak<T> {
/// ```
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
let ptr = *self.ptr;
let thin = ptr as *const ();
if thin as usize != mem::POST_DROP_USIZE {
......@@ -816,7 +812,7 @@ impl<T: ?Sized> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr }
Weak { ptr: self.ptr }
}
}
......@@ -848,7 +844,7 @@ impl<T> Weak<T> {
pub fn new() -> Weak<T> {
unsafe {
Weak {
_ptr: Shared::new(Box::into_raw(box RcBox {
ptr: Shared::new(Box::into_raw(box RcBox {
strong: Cell::new(0),
weak: Cell::new(1),
value: uninitialized(),
......@@ -910,8 +906,8 @@ fn inner(&self) -> &RcBox<T> {
// the contract anyway.
// This allows the null check to be elided in the destructor if we
// manipulated the reference count in the same function.
assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
&(**self._ptr)
assume(!(*(&self.ptr as *const _ as *const *const ())).is_null());
&(**self.ptr)
}
}
}
......@@ -924,8 +920,8 @@ fn inner(&self) -> &RcBox<T> {
// the contract anyway.
// This allows the null check to be elided in the destructor if we
// manipulated the reference count in the same function.
assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
&(**self._ptr)
assume(!(*(&self.ptr as *const _ as *const *const ())).is_null());
&(**self.ptr)
}
}
}
......
......@@ -390,8 +390,8 @@ pub fn borrow_state(&self) -> BorrowState {
pub fn borrow(&self) -> Ref<T> {
match BorrowRef::new(&self.borrow) {
Some(b) => Ref {
_value: unsafe { &*self.value.get() },
_borrow: b,
value: unsafe { &*self.value.get() },
borrow: b,
},
None => panic!("RefCell<T> already mutably borrowed"),
}
......@@ -438,8 +438,8 @@ pub fn borrow(&self) -> Ref<T> {
pub fn borrow_mut(&self) -> RefMut<T> {
match BorrowRefMut::new(&self.borrow) {
Some(b) => RefMut {
_value: unsafe { &mut *self.value.get() },
_borrow: b,
value: unsafe { &mut *self.value.get() },
borrow: b,
},
None => panic!("RefCell<T> already borrowed"),
}
......@@ -491,7 +491,7 @@ fn eq(&self, other: &RefCell<T>) -> bool {
impl<T: ?Sized + Eq> Eq for RefCell<T> {}
struct BorrowRef<'b> {
_borrow: &'b Cell<BorrowFlag>,
borrow: &'b Cell<BorrowFlag>,
}
impl<'b> BorrowRef<'b> {
......@@ -501,7 +501,7 @@ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> {
WRITING => None,
b => {
borrow.set(b + 1);
Some(BorrowRef { _borrow: borrow })
Some(BorrowRef { borrow: borrow })
},
}
}
......@@ -510,9 +510,9 @@ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> {
impl<'b> Drop for BorrowRef<'b> {
#[inline]
fn drop(&mut self) {
let borrow = self._borrow.get();
let borrow = self.borrow.get();
debug_assert!(borrow != WRITING && borrow != UNUSED);
self._borrow.set(borrow - 1);
self.borrow.set(borrow - 1);
}
}
......@@ -521,10 +521,10 @@ impl<'b> Clone for BorrowRef<'b> {
fn clone(&self) -> BorrowRef<'b> {
// Since this Ref exists, we know the borrow flag
// is not set to WRITING.
let borrow = self._borrow.get();
let borrow = self.borrow.get();
debug_assert!(borrow != WRITING && borrow != UNUSED);
self._borrow.set(borrow + 1);
BorrowRef { _borrow: self._borrow }
self.borrow.set(borrow + 1);
BorrowRef { borrow: self.borrow }
}
}
......@@ -534,10 +534,8 @@ fn clone(&self) -> BorrowRef<'b> {
/// See the [module-level documentation](index.html) for more.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ref<'b, T: ?Sized + 'b> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_value: &'b T,
_borrow: BorrowRef<'b>,
value: &'b T,
borrow: BorrowRef<'b>,
}
#[stable(feature = "rust1", since = "1.0.0")]
......@@ -546,7 +544,7 @@ impl<'b, T: ?Sized> Deref for Ref<'b, T> {
#[inline]
fn deref(&self) -> &T {
self._value
self.value
}
}
......@@ -565,8 +563,8 @@ impl<'b, T: ?Sized> Ref<'b, T> {
#[inline]
pub fn clone(orig: &Ref<'b, T>) -> Ref<'b, T> {
Ref {
_value: orig._value,
_borrow: orig._borrow.clone(),
value: orig.value,
borrow: orig.borrow.clone(),
}
}
......@@ -594,8 +592,8 @@ pub fn map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Ref<'b, U>
where F: FnOnce(&T) -> &U
{
Ref {
_value: f(orig._value),
_borrow: orig._borrow,
value: f(orig.value),
borrow: orig.borrow,
}
}
......@@ -627,9 +625,9 @@ pub fn map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Ref<'b, U>
pub fn filter_map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Option<Ref<'b, U>>
where F: FnOnce(&T) -> Option<&U>
{
f(orig._value).map(move |new| Ref {
_value: new,
_borrow: orig._borrow,
f(orig.value).map(move |new| Ref {
value: new,
borrow: orig.borrow,
})
}
}
......@@ -667,8 +665,8 @@ pub fn map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> RefMut<'b, U>
where F: FnOnce(&mut T) -> &mut U
{
RefMut {
_value: f(orig._value),
_borrow: orig._borrow,
value: f(orig.value),
borrow: orig.borrow,
}
}
......@@ -706,24 +704,24 @@ pub fn map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> RefMut<'b, U>
pub fn filter_map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> Option<RefMut<'b, U>>
where F: FnOnce(&mut T) -> Option<&mut U>
{
let RefMut { _value, _borrow } = orig;
f(_value).map(move |new| RefMut {
_value: new,
_borrow: _borrow,
let RefMut { value, borrow } = orig;
f(value).map(move |new| RefMut {
value: new,
borrow: borrow,
})
}
}
struct BorrowRefMut<'b> {
_borrow: &'b Cell<BorrowFlag>,
borrow: &'b Cell<BorrowFlag>,
}
impl<'b> Drop for BorrowRefMut<'b> {
#[inline]
fn drop(&mut self) {
let borrow = self._borrow.get();
let borrow = self.borrow.get();
debug_assert!(borrow == WRITING);
self._borrow.set(UNUSED);
self.borrow.set(UNUSED);
}
}
......@@ -733,7 +731,7 @@ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> {
match borrow.get() {
UNUSED => {
borrow.set(WRITING);
Some(BorrowRefMut { _borrow: borrow })
Some(BorrowRefMut { borrow: borrow })
},
_ => None,
}
......@@ -745,10 +743,8 @@ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> {
/// See the [module-level documentation](index.html) for more.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RefMut<'b, T: ?Sized + 'b> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_value: &'b mut T,
_borrow: BorrowRefMut<'b>,
value: &'b mut T,
borrow: BorrowRefMut<'b>,
}
#[stable(feature = "rust1", since = "1.0.0")]
......@@ -757,7 +753,7 @@ impl<'b, T: ?Sized> Deref for RefMut<'b, T> {
#[inline]
fn deref(&self) -> &T {
self._value
self.value
}
}
......@@ -765,7 +761,7 @@ fn deref(&self) -> &T {
impl<'b, T: ?Sized> DerefMut for RefMut<'b, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
self._value
self.value
}
}
......
......@@ -80,6 +80,8 @@ pub struct ArgType {
/// Only later will `original_ty` aka `%Foo` be used in the LLVM function
/// pointer type, without ever having introspected it.
pub ty: Type,
/// Signedness for integer types, None for other types
pub signedness: Option<bool>,
/// Coerced LLVM Type
pub cast: Option<Type>,
/// Dummy argument, which is emitted before the real argument
......@@ -94,6 +96,7 @@ fn new(original_ty: Type, ty: Type) -> ArgType {
kind: ArgKind::Direct,
original_ty: original_ty,
ty: ty,
signedness: None,
cast: None,
pad: None,
attrs: llvm::Attributes::default()
......@@ -123,6 +126,19 @@ pub fn ignore(&mut self) {
self.kind = ArgKind::Ignore;
}
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
if let Some(signed) = self.signedness {
if self.ty.int_width() < bits {
self.attrs.set(if signed {
llvm::Attribute::SExt
} else {
llvm::Attribute::ZExt
});
}
}
}
pub fn is_indirect(&self) -> bool {
self.kind == ArgKind::Indirect
}
......@@ -268,6 +284,9 @@ pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
} else {
let mut arg = ArgType::new(type_of::type_of(ccx, ty),
type_of::sizing_type_of(ccx, ty));
if ty.is_integral() {
arg.signedness = Some(ty.is_signed());
}
if llsize_of_real(ccx, arg.ty) == 0 {
// For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments.
......
......@@ -163,6 +163,7 @@ fn check_struct(ty: Type) -> Option<(Type, u64)> {
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32);
return;
}
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
......@@ -190,6 +191,7 @@ fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
if is_reg_ty(arg.ty) {
arg.extend_integer_width_to(32);
return;
}
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) {
......
......@@ -131,6 +131,7 @@ fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32);
return;
}
let size = ty_size(ret.ty, align_fn);
......@@ -150,6 +151,7 @@ fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) {
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) {
if is_reg_ty(arg.ty) {
arg.extend_integer_width_to(32);
return;
}
let align = align_fn(arg.ty);
......
......@@ -86,6 +86,14 @@ fn ty_size(ty: Type) -> usize {
}
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect(ccx);
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
let orig_offset = *offset;
let size = ty_size(arg.ty) * 8;
......@@ -98,6 +106,8 @@ fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
if !is_reg_ty(arg.ty) {
arg.cast = Some(struct_ty(ccx, arg.ty));
arg.pad = padding_ty(ccx, align, orig_offset);
} else {
arg.extend_integer_width_to(32);
}
}
......@@ -146,8 +156,8 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) {
fty.ret.make_indirect(ccx);
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
}
let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
......
......@@ -82,6 +82,14 @@ fn ty_size(ty: Type) -> usize {
}
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect(ccx);
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
let orig_offset = *offset;
let size = ty_size(arg.ty) * 8;
......@@ -94,6 +102,8 @@ fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
if !is_reg_ty(arg.ty) {
arg.cast = Some(struct_ty(ccx, arg.ty));
arg.pad = padding_ty(ccx, align, orig_offset);
} else {
arg.extend_integer_width_to(32);
}
}
......@@ -141,8 +151,8 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) {
fty.ret.make_indirect(ccx);
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
}
let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
......
......@@ -153,6 +153,7 @@ fn check_struct(ty: Type) -> Option<(Type, u64)> {
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(64);
return;
}
......@@ -187,6 +188,7 @@ fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
if is_reg_ty(arg.ty) {
arg.extend_integer_width_to(64);
return;
}
......
......@@ -15,25 +15,29 @@
use super::machine::*;
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() && fty.ret.ty.kind() == Struct {
// Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though,
// small structs are returned as integers.
//
// Some links:
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
let t = &ccx.sess().target.target;
if t.options.is_like_osx || t.options.is_like_windows {
match llsize_of_alloc(ccx, fty.ret.ty) {
1 => fty.ret.cast = Some(Type::i8(ccx)),
2 => fty.ret.cast = Some(Type::i16(ccx)),
4 => fty.ret.cast = Some(Type::i32(ccx)),
8 => fty.ret.cast = Some(Type::i64(ccx)),
_ => fty.ret.make_indirect(ccx)
if !fty.ret.is_ignore() {
if fty.ret.ty.kind() == Struct {
// Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though,
// small structs are returned as integers.
//
// Some links:
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
let t = &ccx.sess().target.target;
if t.options.is_like_osx || t.options.is_like_windows {
match llsize_of_alloc(ccx, fty.ret.ty) {
1 => fty.ret.cast = Some(Type::i8(ccx)),
2 => fty.ret.cast = Some(Type::i16(ccx)),
4 => fty.ret.cast = Some(Type::i32(ccx)),
8 => fty.ret.cast = Some(Type::i64(ccx)),
_ => fty.ret.make_indirect(ccx)
}
} else {
fty.ret.make_indirect(ccx);
}
} else {
fty.ret.make_indirect(ccx);
fty.ret.extend_integer_width_to(32);
}
}
......@@ -42,6 +46,8 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if arg.ty.kind() == Struct {
arg.make_indirect(ccx);
arg.attrs.set(Attribute::ByVal);
} else {
arg.extend_integer_width_to(32);
}
}
}
......@@ -400,6 +400,8 @@ fn x86_64_ty<F>(ccx: &CrateContext,
} else {
arg.cast = Some(llreg_ty(ccx, &cls));
}
} else {
arg.extend_integer_width_to(32);
}
}
......
......@@ -26,6 +26,8 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
8 => a.cast = Some(Type::i64(ccx)),
_ => a.make_indirect(ccx)
}
} else {
a.extend_integer_width_to(32);
}
};
......
......@@ -50,7 +50,9 @@
use syntax::parse::token;
const DW_LANG_RUST: c_uint = 0x9000;
// From DWARF 5.
// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1
const DW_LANG_RUST: c_uint = 0x1c;
#[allow(non_upper_case_globals)]
const DW_ATE_boolean: c_uint = 0x02;
#[allow(non_upper_case_globals)]
......
......@@ -63,10 +63,6 @@ pub trait MetadataExt {
#[stable(feature = "metadata_ext2", since = "1.8.0")]
fn st_ctime_nsec(&self) -> i64;
#[stable(feature = "metadata_ext2", since = "1.8.0")]
fn st_birthtime(&self) -> i64;
#[stable(feature = "metadata_ext2", since = "1.8.0")]
fn st_birthtime_nsec(&self) -> i64;
#[stable(feature = "metadata_ext2", since = "1.8.0")]
fn st_blksize(&self) -> u64;
#[stable(feature = "metadata_ext2", since = "1.8.0")]
fn st_blocks(&self) -> u64;
......@@ -129,12 +125,6 @@ fn st_ctime(&self) -> i64 {
fn st_ctime_nsec(&self) -> i64 {
self.as_inner().as_inner().st_ctime_nsec as i64
}
fn st_birthtime(&self) -> i64 {
self.as_inner().as_inner().st_birthtime as i64
}
fn st_birthtime_nsec(&self) -> i64 {
self.as_inner().as_inner().st_birthtime_nsec as i64
}
fn st_blksize(&self) -> u64 {
self.as_inner().as_inner().st_blksize as u64
}
......
......@@ -224,8 +224,13 @@ fn __gcc_personality_v0(state: uw::_Unwind_State,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
// Backtraces on ARM will call the personality routine with
// state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
// we want to continue unwinding the stack, otherwise all our backtraces
// would end at __rust_try.
if (state as c_int & uw::_US_ACTION_MASK as c_int)
== uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
== uw::_US_VIRTUAL_UNWIND_FRAME as c_int
&& (state as c_int & uw::_US_FORCE_UNWIND as c_int) == 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
......
......@@ -36,6 +36,7 @@
static ENV_LOCK: StaticMutex = StaticMutex::new();
/// Returns the platform-specific value of errno
#[cfg(not(target_os = "dragonfly"))]
pub fn errno() -> i32 {
extern {
#[cfg_attr(any(target_os = "linux", target_os = "emscripten"),
......@@ -47,7 +48,6 @@ pub fn errno() -> i32 {
target_env = "newlib"),
link_name = "__errno")]
#[cfg_attr(target_os = "solaris", link_name = "___errno")]
#[cfg_attr(target_os = "dragonfly", link_name = "__dfly_error")]
#[cfg_attr(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd"),
......@@ -60,6 +60,16 @@ pub fn errno() -> i32 {
}
}
#[cfg(target_os = "dragonfly")]
pub fn errno() -> i32 {
extern {
#[thread_local]
static errno: c_int;
}
errno as i32
}
/// Gets a detailed string description for the given error number.
pub fn error_string(errno: i32) -> String {
extern {
......
......@@ -303,8 +303,13 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
}
}
#[cfg(not(target_os = "dragonfly"))]
pub type clock_t = libc::c_int;
#[cfg(target_os = "dragonfly")]
pub type clock_t = libc::c_ulong;
impl Timespec {
pub fn now(clock: libc::c_int) -> Timespec {
pub fn now(clock: clock_t) -> Timespec {
let mut t = Timespec {
t: libc::timespec {
tv_sec: 0,
......
......@@ -243,3 +243,7 @@ double rust_interesting_average(uint64_t n, ...) {
va_end(pairs);
return sum / n;
}
int32_t rust_int8_to_int32(int8_t x) {
return (int32_t)x;
}
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[link(name = "rust_test_helpers")]
extern {
fn rust_int8_to_int32(_: i8) -> i32;
}
fn main() {
let x = unsafe {
rust_int8_to_int32(-1)
};
assert!(x == -1);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册