提交 4f99f37b 编写于 作者: B bors

Auto merge of #50880 - glandium:oom, r=SimonSapin

OOM handling changes

As discussed in https://github.com/rust-lang/rust/issues/49668#issuecomment-384893456 and subsequent.

This does have codegen implications. Even without the hooks, and with a handler that ignores the arguments, the compiler doesn't eliminate calling `rust_oom` with the `Layout`. Even if it managed to eliminate that, with the hooks, I don't know if the compiler would be able to figure out it can skip it if the hook is never set.

A couple implementation notes:
- I went with explicit enums rather than bools because it makes it clearer in callers what is being requested.
- I didn't know what `feature` to put the hook setting functions behind. (and surprisingly, the compile went through without any annotation on the functions)
- There's probably some bikeshedding to do on the naming.

Cc: @Simonsapin, @sfackler
......@@ -115,7 +115,7 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
if !ptr.is_null() {
ptr as *mut u8
} else {
oom()
oom(layout)
}
}
}
......@@ -134,12 +134,13 @@ pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
}
#[rustc_allocator_nounwind]
pub fn oom() -> ! {
extern {
pub fn oom(layout: Layout) -> ! {
#[allow(improper_ctypes)]
extern "Rust" {
#[lang = "oom"]
fn oom_impl() -> !;
fn oom_impl(layout: Layout) -> !;
}
unsafe { oom_impl() }
unsafe { oom_impl(layout) }
}
#[cfg(test)]
......@@ -154,7 +155,7 @@ fn allocate_zeroed() {
unsafe {
let layout = Layout::from_size_align(1024, 1).unwrap();
let ptr = Global.alloc_zeroed(layout.clone())
.unwrap_or_else(|_| oom());
.unwrap_or_else(|_| oom(layout));
let mut i = ptr.cast::<u8>().as_ptr();
let end = i.offset(layout.size() as isize);
......
......@@ -553,7 +553,7 @@ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
let layout = Layout::for_value(&*fake_ptr);
let mem = Global.alloc(layout)
.unwrap_or_else(|_| oom());
.unwrap_or_else(|_| oom(layout));
// Initialize the real ArcInner
let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
......
......@@ -96,14 +96,15 @@ fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
NonNull::<T>::dangling().as_opaque()
} else {
let align = mem::align_of::<T>();
let layout = Layout::from_size_align(alloc_size, align).unwrap();
let result = if zeroed {
a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
a.alloc_zeroed(layout)
} else {
a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
a.alloc(layout)
};
match result {
Ok(ptr) => ptr,
Err(_) => oom(),
Err(_) => oom(layout),
}
};
......@@ -318,7 +319,7 @@ pub fn double(&mut self) {
new_size);
match ptr_res {
Ok(ptr) => (new_cap, ptr.cast().into()),
Err(_) => oom(),
Err(_) => oom(Layout::from_size_align_unchecked(new_size, cur.align())),
}
}
None => {
......@@ -327,7 +328,7 @@ pub fn double(&mut self) {
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
match self.a.alloc_array::<T>(new_cap) {
Ok(ptr) => (new_cap, ptr.into()),
Err(_) => oom(),
Err(_) => oom(Layout::array::<T>(new_cap).unwrap()),
}
}
};
......@@ -389,37 +390,7 @@ pub fn double_in_place(&mut self) -> bool {
pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
-> Result<(), CollectionAllocErr> {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they gave a bad `used_cap`.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return Ok(());
}
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
debug_assert!(new_layout.align() == layout.align());
self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
self.ptr = res?.cast().into();
self.cap = new_cap;
Ok(())
}
self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
}
/// Ensures that the buffer contains at least enough space to hold
......@@ -443,9 +414,9 @@ pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
match self.try_reserve_exact(used_cap, needed_extra_cap) {
match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocErr) => oom(),
Err(AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
......@@ -467,37 +438,7 @@ fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize)
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
-> Result<(), CollectionAllocErr> {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they give a bad `used_cap`
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return Ok(());
}
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?;
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
debug_assert!(new_layout.align() == layout.align());
self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
self.ptr = res?.cast().into();
self.cap = new_cap;
Ok(())
}
self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
}
/// Ensures that the buffer contains at least enough space to hold
......@@ -553,12 +494,12 @@ pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
/// # }
/// ```
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
match self.try_reserve(used_cap, needed_extra_cap) {
match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocErr) => oom(),
Err(AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
}
}
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
......@@ -670,7 +611,7 @@ pub fn shrink_to_fit(&mut self, amount: usize) {
old_layout,
new_size) {
Ok(p) => self.ptr = p.cast().into(),
Err(_) => oom(),
Err(_) => oom(Layout::from_size_align_unchecked(new_size, align)),
}
}
self.cap = amount;
......@@ -678,6 +619,73 @@ pub fn shrink_to_fit(&mut self, amount: usize) {
}
}
enum Fallibility {
Fallible,
Infallible,
}
use self::Fallibility::*;
enum ReserveStrategy {
Exact,
Amortized,
}
use self::ReserveStrategy::*;
impl<T, A: Alloc> RawVec<T, A> {
fn reserve_internal(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
fallibility: Fallibility,
strategy: ReserveStrategy,
) -> Result<(), CollectionAllocErr> {
unsafe {
use alloc::AllocErr;
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they gave a bad `used_cap`.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return Ok(());
}
// Nothing we can really do about these checks :(
let new_cap = match strategy {
Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?,
Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
};
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
debug_assert!(new_layout.align() == layout.align());
self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
match (&res, fallibility) {
(Err(AllocErr), Infallible) => oom(new_layout),
_ => {}
}
self.ptr = res?.cast().into();
self.cap = new_cap;
Ok(())
}
}
}
impl<T> RawVec<T, Global> {
/// Converts the entire buffer into `Box<[T]>`.
///
......
......@@ -668,7 +668,7 @@ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
let layout = Layout::for_value(&*fake_ptr);
let mem = Global.alloc(layout)
.unwrap_or_else(|_| oom());
.unwrap_or_else(|_| oom(layout));
// Initialize the real RcBox
let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>;
......
......@@ -13,15 +13,59 @@
#![unstable(issue = "32838", feature = "allocator_api")]
#[doc(inline)] #[allow(deprecated)] pub use alloc_crate::alloc::Heap;
#[doc(inline)] pub use alloc_crate::alloc::{Global, oom};
#[doc(inline)] pub use alloc_crate::alloc::{Global, Layout, oom};
#[doc(inline)] pub use alloc_system::System;
#[doc(inline)] pub use core::alloc::*;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::{mem, ptr};
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// Registers a custom OOM hook, replacing any that was previously registered.
///
/// The OOM hook is invoked when an infallible memory allocation fails.
/// The default hook prints a message to standard error and aborts the
/// execution, but this behavior can be customized with the [`set_oom_hook`]
/// and [`take_oom_hook`] functions.
///
/// The hook is provided with a `Layout` struct which contains information
/// about the allocation that failed.
///
/// The OOM hook is a global resource.
pub fn set_oom_hook(hook: fn(Layout) -> !) {
HOOK.store(hook as *mut (), Ordering::SeqCst);
}
/// Unregisters the current OOM hook, returning it.
///
/// *See also the function [`set_oom_hook`].*
///
/// If no custom hook is registered, the default hook will be returned.
pub fn take_oom_hook() -> fn(Layout) -> ! {
let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
if hook.is_null() {
default_oom_hook
} else {
unsafe { mem::transmute(hook) }
}
}
fn default_oom_hook(layout: Layout) -> ! {
rtabort!("memory allocation of {} bytes failed", layout.size())
}
#[cfg(not(test))]
#[doc(hidden)]
#[lang = "oom"]
pub extern fn rust_oom() -> ! {
rtabort!("memory allocation failed");
pub extern fn rust_oom(layout: Layout) -> ! {
let hook = HOOK.load(Ordering::SeqCst);
let hook: fn(Layout) -> ! = if hook.is_null() {
default_oom_hook
} else {
unsafe { mem::transmute(hook) }
};
hook(layout)
}
#[cfg(not(test))]
......
......@@ -11,7 +11,7 @@
use self::Entry::*;
use self::VacantEntryState::*;
use alloc::{CollectionAllocErr, oom};
use alloc::CollectionAllocErr;
use cell::Cell;
use borrow::Borrow;
use cmp::max;
......@@ -23,8 +23,10 @@
use ops::{Deref, Index};
use sys;
use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash};
use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
SafeHash};
use super::table::BucketState::{Empty, Full};
use super::table::Fallibility::{Fallible, Infallible};
const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two
......@@ -783,11 +785,11 @@ fn raw_capacity(&self) -> usize {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
match self.try_reserve(additional) {
match self.reserve_internal(additional, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
Err(CollectionAllocErr::AllocErr) => oom(),
Err(CollectionAllocErr::AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
......@@ -809,17 +811,24 @@ pub fn reserve(&mut self, additional: usize) {
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
self.reserve_internal(additional, Fallible)
}
fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility)
-> Result<(), CollectionAllocErr> {
let remaining = self.capacity() - self.len(); // this can't overflow
if remaining < additional {
let min_cap = self.len().checked_add(additional)
let min_cap = self.len()
.checked_add(additional)
.ok_or(CollectionAllocErr::CapacityOverflow)?;
let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
self.try_resize(raw_cap)?;
self.try_resize(raw_cap, fallibility)?;
} else if self.table.tag() && remaining <= self.len() {
// Probe sequence is too long and table is half full,
// resize early to reduce probing length.
let new_capacity = self.table.capacity() * 2;
self.try_resize(new_capacity)?;
self.try_resize(new_capacity, fallibility)?;
}
Ok(())
}
......@@ -831,11 +840,21 @@ pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocEr
/// 2) Ensure `new_raw_cap` is a power of two or zero.
#[inline(never)]
#[cold]
fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), CollectionAllocErr> {
fn try_resize(
&mut self,
new_raw_cap: usize,
fallibility: Fallibility,
) -> Result<(), CollectionAllocErr> {
assert!(self.table.size() <= new_raw_cap);
assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
let mut old_table = replace(&mut self.table, RawTable::try_new(new_raw_cap)?);
let mut old_table = replace(
&mut self.table,
match fallibility {
Infallible => RawTable::new(new_raw_cap),
Fallible => RawTable::try_new(new_raw_cap)?,
}
);
let old_size = old_table.size();
if old_table.size() == 0 {
......
......@@ -711,11 +711,21 @@ fn test_offset_calculation() {
assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false));
}
pub(crate) enum Fallibility {
Fallible,
Infallible,
}
use self::Fallibility::*;
impl<K, V> RawTable<K, V> {
/// Does not initialize the buckets. The caller should ensure they,
/// at the very least, set every hash to EMPTY_BUCKET.
/// Returns an error if it cannot allocate or capacity overflows.
unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
unsafe fn new_uninitialized_internal(
capacity: usize,
fallibility: Fallibility,
) -> Result<RawTable<K, V>, CollectionAllocErr> {
if capacity == 0 {
return Ok(RawTable {
size: 0,
......@@ -754,8 +764,12 @@ unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, Colle
return Err(CollectionAllocErr::CapacityOverflow);
}
let buffer = Global.alloc(Layout::from_size_align(size, alignment)
.map_err(|_| CollectionAllocErr::CapacityOverflow)?)?;
let layout = Layout::from_size_align(size, alignment)
.map_err(|_| CollectionAllocErr::CapacityOverflow)?;
let buffer = Global.alloc(layout).map_err(|e| match fallibility {
Infallible => oom(layout),
Fallible => e,
})?;
Ok(RawTable {
capacity_mask: capacity.wrapping_sub(1),
......@@ -768,9 +782,9 @@ unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, Colle
/// Does not initialize the buckets. The caller should ensure they,
/// at the very least, set every hash to EMPTY_BUCKET.
unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
match Self::try_new_uninitialized(capacity) {
match Self::new_uninitialized_internal(capacity, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
Err(CollectionAllocErr::AllocErr) => oom(),
Err(CollectionAllocErr::AllocErr) => unreachable!(),
Ok(table) => { table }
}
}
......@@ -794,22 +808,29 @@ fn raw_bucket_at(&self, index: usize) -> RawBucket<K, V> {
}
}
/// Tries to create a new raw table from a given capacity. If it cannot allocate,
/// it returns with AllocErr.
pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
fn new_internal(
capacity: usize,
fallibility: Fallibility,
) -> Result<RawTable<K, V>, CollectionAllocErr> {
unsafe {
let ret = RawTable::try_new_uninitialized(capacity)?;
let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?;
ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
Ok(ret)
}
}
/// Tries to create a new raw table from a given capacity. If it cannot allocate,
/// it returns with AllocErr.
pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
Self::new_internal(capacity, Fallible)
}
/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
pub fn new(capacity: usize) -> RawTable<K, V> {
match Self::try_new(capacity) {
match Self::new_internal(capacity, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
Err(CollectionAllocErr::AllocErr) => oom(),
Err(CollectionAllocErr::AllocErr) => unreachable!(),
Ok(table) => { table }
}
}
......
......@@ -10,11 +10,11 @@
#![feature(allocator_api, nonnull)]
use std::alloc::{Alloc, Global, oom};
use std::alloc::{Alloc, Global, Layout, oom};
fn main() {
unsafe {
let ptr = Global.alloc_one::<i32>().unwrap_or_else(|_| oom());
let ptr = Global.alloc_one::<i32>().unwrap_or_else(|_| oom(Layout::new::<i32>()));
*ptr.as_ptr() = 4;
assert_eq!(*ptr.as_ptr(), 4);
Global.dealloc_one(ptr);
......
......@@ -50,7 +50,7 @@ unsafe fn allocate(layout: Layout) -> *mut u8 {
println!("allocate({:?})", layout);
}
let ret = Global.alloc(layout.clone()).unwrap_or_else(|_| oom());
let ret = Global.alloc(layout).unwrap_or_else(|_| oom(layout));
if PRINT {
println!("allocate({:?}) = {:?}", layout, ret);
......@@ -72,8 +72,8 @@ unsafe fn reallocate(ptr: *mut u8, old: Layout, new: Layout) -> *mut u8 {
println!("reallocate({:?}, old={:?}, new={:?})", ptr, old, new);
}
let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old.clone(), new.size())
.unwrap_or_else(|_| oom());
let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old, new.size())
.unwrap_or_else(|_| oom(Layout::from_size_align_unchecked(new.size(), old.align())));
if PRINT {
println!("reallocate({:?}, old={:?}, new={:?}) = {:?}",
......
......@@ -32,8 +32,8 @@ struct Ccx {
fn alloc<'a>(_bcx : &'a arena) -> &'a Bcx<'a> {
unsafe {
let ptr = Global.alloc(Layout::new::<Bcx>())
.unwrap_or_else(|_| oom());
let layout = Layout::new::<Bcx>();
let ptr = Global.alloc(layout).unwrap_or_else(|_| oom(layout));
&*(ptr.as_ptr() as *const _)
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册