未验证 提交 26bf0ef0 编写于 作者: K Klim Tsoutsman 提交者: GitHub

Merge branch 'rust-lang:master' into master

......@@ -4795,9 +4795,9 @@ checksum = "da73c8f77aebc0e40c300b93f0a5f1bece7a248a36eee287d4e095f35c7b7d6e"
[[package]]
name = "socket2"
version = "0.4.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad"
dependencies = [
"libc",
"winapi",
......
......@@ -353,7 +353,11 @@ pub trait FnAbiLlvmExt<'tcx> {
impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
// Ignore "extra" args from the call site for C variadic functions.
// Only the "fixed" args are part of the LLVM function signature.
let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
let args_capacity: usize = args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
).sum();
......@@ -371,7 +375,7 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
}
};
for arg in &self.args {
for arg in args {
// add padding
if let Some(ty) = arg.pad {
llargument_tys.push(ty.llvm_type(cx));
......
......@@ -78,8 +78,14 @@ pub(crate) unsafe fn codegen(
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret =
llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
let ret = llvm::LLVMRustBuildCall(
llbuilder,
ty,
callee,
args.as_ptr(),
args.len() as c_uint,
None,
);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
llvm::LLVMBuildRet(llbuilder, ret);
......@@ -121,7 +127,8 @@ pub(crate) unsafe fn codegen(
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
let ret =
llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
llvm::LLVMSetTailCall(ret, True);
llvm::LLVMBuildRetVoid(llbuilder);
llvm::LLVMDisposeBuilder(llbuilder);
......
......@@ -464,7 +464,7 @@ fn inline_asm_call(
alignstack,
llvm::AsmDialect::from_generic(dia),
);
let call = bx.call(v, inputs, None);
let call = bx.call(fty, v, inputs, None);
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
......
......@@ -200,6 +200,7 @@ fn switch(
fn invoke(
&mut self,
llty: &'ll Type,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
......@@ -208,13 +209,14 @@ fn invoke(
) -> &'ll Value {
debug!("invoke {:?} with args ({:?})", llfn, args);
let args = self.check_call("invoke", llfn, args);
let args = self.check_call("invoke", llty, llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
llty,
llfn,
args.as_ptr(),
args.len() as c_uint,
......@@ -369,8 +371,7 @@ fn checked_binop(
},
};
let intrinsic = self.get_intrinsic(&name);
let res = self.call(intrinsic, &[lhs, rhs], None);
let res = self.call_intrinsic(name, &[lhs, rhs]);
(self.extract_value(res, 0), self.extract_value(res, 1))
}
......@@ -695,8 +696,7 @@ fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Val
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = format!("llvm.fptoui.sat.i{}.f{}", int_width, float_width);
let intrinsic = self.get_intrinsic(&name);
return Some(self.call(intrinsic, &[val], None));
return Some(self.call_intrinsic(&name, &[val]));
}
None
......@@ -708,8 +708,7 @@ fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Val
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = format!("llvm.fptosi.sat.i{}.f{}", int_width, float_width);
let intrinsic = self.get_intrinsic(&name);
return Some(self.call(intrinsic, &[val], None));
return Some(self.call_intrinsic(&name, &[val]));
}
None
......@@ -743,8 +742,7 @@ fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
_ => None,
};
if let Some(name) = name {
let intrinsic = self.get_intrinsic(name);
return self.call(intrinsic, &[val], None);
return self.call_intrinsic(name, &[val]);
}
}
}
......@@ -766,8 +764,7 @@ fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
_ => None,
};
if let Some(name) = name {
let intrinsic = self.get_intrinsic(name);
return self.call(intrinsic, &[val], None);
return self.call_intrinsic(name, &[val]);
}
}
}
......@@ -1115,12 +1112,17 @@ fn instrprof_increment(
);
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
let llty = self.cx.type_func(
&[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
self.cx.type_void(),
);
let args = &[fn_name, hash, num_counters, index];
let args = self.check_call("call", llfn, args);
let args = self.check_call("call", llty, llfn, args);
unsafe {
let _ = llvm::LLVMRustBuildCall(
self.llbuilder,
llty,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
......@@ -1131,19 +1133,21 @@ fn instrprof_increment(
fn call(
&mut self,
llty: &'ll Type,
llfn: &'ll Value,
args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>,
) -> &'ll Value {
debug!("call {:?} with args ({:?})", llfn, args);
let args = self.check_call("call", llfn, args);
let args = self.check_call("call", llty, llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
llty,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
......@@ -1313,15 +1317,10 @@ fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
fn check_call<'b>(
&mut self,
typ: &str,
fn_ty: &'ll Type,
llfn: &'ll Value,
args: &'b [&'ll Value],
) -> Cow<'b, [&'ll Value]> {
let mut fn_ty = self.cx.val_ty(llfn);
// Strip off pointers
while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
fn_ty = self.cx.element_type(fn_ty);
}
assert!(
self.cx.type_kind(fn_ty) == TypeKind::Function,
"builder::{} not passed a function, but {:?}",
......@@ -1362,6 +1361,11 @@ pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
}
crate fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(intrinsic);
self.call(ty, f, args, None)
}
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
let size = size.bytes();
if size == 0 {
......@@ -1372,10 +1376,8 @@ fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Si
return;
}
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
}
pub(crate) fn phi(
......
......@@ -84,9 +84,9 @@ pub struct CodegenCx<'ll, 'tcx> {
eh_personality: Cell<Option<&'ll Value>>,
eh_catch_typeinfo: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
......@@ -452,7 +452,7 @@ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
}
impl CodegenCx<'b, 'tcx> {
crate fn get_intrinsic(&self, key: &str) -> &'b Value {
crate fn get_intrinsic(&self, key: &str) -> (&'b Type, &'b Value) {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
}
......@@ -465,18 +465,18 @@ fn insert_intrinsic(
name: &'static str,
args: Option<&[&'b llvm::Type]>,
ret: &'b llvm::Type,
) -> &'b llvm::Value {
) -> (&'b llvm::Type, &'b llvm::Value) {
let fn_ty = if let Some(args) = args {
self.type_func(args, ret)
} else {
self.type_variadic_func(&[], ret)
};
let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
self.intrinsics.borrow_mut().insert(name, f);
f
self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
(fn_ty, f)
}
fn declare_intrinsic(&self, key: &str) -> Option<&'b Value> {
fn declare_intrinsic(&self, key: &str) -> Option<(&'b Type, &'b Value)> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
......
use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::llvm;
......@@ -24,7 +24,7 @@
use std::cmp::Ordering;
use std::iter;
fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<(&'ll Type, &'ll Value)> {
let llvm_name = match name {
sym::sqrtf32 => "llvm.sqrt.f32",
sym::sqrtf64 => "llvm.sqrt.f64",
......@@ -102,19 +102,20 @@ fn codegen_intrinsic_call(
let simple = get_simple_intrinsic(self, name);
let llval = match name {
_ if simple.is_some() => self.call(
simple.unwrap(),
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
),
sym::likely => {
let expect = self.get_intrinsic(&("llvm.expect.i1"));
self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
_ if simple.is_some() => {
let (simple_ty, simple_fn) = simple.unwrap();
self.call(
simple_ty,
simple_fn,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
)
}
sym::unlikely => {
let expect = self.get_intrinsic(&("llvm.expect.i1"));
self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
sym::likely => {
self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
}
sym::unlikely => self
.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
kw::Try => {
try_intrinsic(
self,
......@@ -125,13 +126,9 @@ fn codegen_intrinsic_call(
);
return;
}
sym::breakpoint => {
let llfn = self.get_intrinsic(&("llvm.debugtrap"));
self.call(llfn, &[], None)
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
sym::va_copy => {
let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
}
sym::va_arg => {
match fn_abi.ret.layout.abi {
......@@ -194,7 +191,6 @@ fn codegen_intrinsic_call(
| sym::prefetch_write_data
| sym::prefetch_read_instruction
| sym::prefetch_write_instruction => {
let expect = self.get_intrinsic(&("llvm.prefetch"));
let (rw, cache_type) = match name {
sym::prefetch_read_data => (0, 1),
sym::prefetch_write_data => (1, 1),
......@@ -202,15 +198,14 @@ fn codegen_intrinsic_call(
sym::prefetch_write_instruction => (1, 0),
_ => bug!(),
};
self.call(
expect,
self.call_intrinsic(
"llvm.prefetch",
&[
args[0].immediate(),
self.const_i32(rw),
args[1].immediate(),
self.const_i32(cache_type),
],
None,
)
}
sym::ctlz
......@@ -229,35 +224,33 @@ fn codegen_intrinsic_call(
Some((width, signed)) => match name {
sym::ctlz | sym::cttz => {
let y = self.const_bool(false);
let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
self.call(llfn, &[args[0].immediate(), y], None)
self.call_intrinsic(
&format!("llvm.{}.i{}", name, width),
&[args[0].immediate(), y],
)
}
sym::ctlz_nonzero | sym::cttz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[args[0].immediate(), y], None)
self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
}
sym::ctpop => self.call(
self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
sym::ctpop => self.call_intrinsic(
&format!("llvm.ctpop.i{}", width),
&[args[0].immediate()],
None,
),
sym::bswap => {
if width == 8 {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
self.call(
self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
self.call_intrinsic(
&format!("llvm.bswap.i{}", width),
&[args[0].immediate()],
None,
)
}
}
sym::bitreverse => self.call(
self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
sym::bitreverse => self.call_intrinsic(
&format!("llvm.bitreverse.i{}", width),
&[args[0].immediate()],
None,
),
sym::rotate_left | sym::rotate_right => {
let is_left = name == sym::rotate_left;
......@@ -266,8 +259,7 @@ fn codegen_intrinsic_call(
// rotate = funnel shift with first two args the same
let llvm_name =
&format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[val, val, raw_shift], None)
self.call_intrinsic(llvm_name, &[val, val, raw_shift])
}
sym::saturating_add | sym::saturating_sub => {
let is_add = name == sym::saturating_add;
......@@ -279,8 +271,7 @@ fn codegen_intrinsic_call(
if is_add { "add" } else { "sub" },
width
);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[lhs, rhs], None)
self.call_intrinsic(llvm_name, &[lhs, rhs])
}
_ => bug!(),
},
......@@ -331,8 +322,7 @@ fn codegen_intrinsic_call(
let a_ptr = self.bitcast(a, i8p_ty);
let b_ptr = self.bitcast(b, i8p_ty);
let n = self.const_usize(layout.size.bytes());
let llfn = self.get_intrinsic("memcmp");
let cmp = self.call(llfn, &[a_ptr, b_ptr, n], None);
let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
}
}
......@@ -361,18 +351,15 @@ fn codegen_intrinsic_call(
}
fn abort(&mut self) {
let fnname = self.get_intrinsic(&("llvm.trap"));
self.call(fnname, &[], None);
self.call_intrinsic("llvm.trap", &[]);
}
fn assume(&mut self, val: Self::Value) {
let assume_intrinsic = self.get_intrinsic("llvm.assume");
self.call(assume_intrinsic, &[val], None);
self.call_intrinsic("llvm.assume", &[val]);
}
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
let expect = self.get_intrinsic(&"llvm.expect.i1");
self.call(expect, &[cond, self.const_bool(expected)], None)
self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
}
fn sideeffect(&mut self) {
......@@ -380,19 +367,16 @@ fn sideeffect(&mut self) {
// caller of this function is in `rustc_codegen_ssa`, which is agnostic to whether LLVM
// codegen backend being used, and so is unable to check the LLVM version.
if unsafe { llvm::LLVMRustVersionMajor() } < 12 {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
self.call_intrinsic("llvm.sideeffect", &[]);
}
}
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
let intrinsic = self.cx().get_intrinsic("llvm.va_start");
self.call(intrinsic, &[va_list], None)
self.call_intrinsic("llvm.va_start", &[va_list])
}
fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
let intrinsic = self.cx().get_intrinsic("llvm.va_end");
self.call(intrinsic, &[va_list], None)
self.call_intrinsic("llvm.va_end", &[va_list])
}
}
......@@ -404,7 +388,8 @@ fn try_intrinsic(
dest: &'ll Value,
) {
if bx.sess().panic_strategy() == PanicStrategy::Abort {
bx.call(try_func, &[data], None);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.call(try_func_ty, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi;
......@@ -432,7 +417,7 @@ fn codegen_msvc_try(
catch_func: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
let mut normal = bx.build_sibling_block("normal");
......@@ -502,7 +487,8 @@ fn codegen_msvc_try(
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(bx.const_i32(0));
......@@ -544,14 +530,15 @@ fn codegen_msvc_try(
let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catchpad_rust.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb());
// The flag value of 64 indicates a "catch-all".
let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p());
let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]);
catchpad_foreign.call(catch_func, &[data, null], Some(&funclet));
catchpad_foreign.call(catch_ty, catch_func, &[data, null], Some(&funclet));
catchpad_foreign.catch_ret(&funclet, caught.llbb());
caught.ret(bx.const_i32(1));
......@@ -559,7 +546,7 @@ fn codegen_msvc_try(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
......@@ -582,7 +569,7 @@ fn codegen_gnu_try(
catch_func: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
......@@ -601,7 +588,8 @@ fn codegen_gnu_try(
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
......@@ -615,13 +603,14 @@ fn codegen_gnu_try(
let tydesc = bx.const_null(bx.type_i8p());
catch.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0);
catch.call(catch_func, &[data, ptr], None);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catch.call(catch_ty, catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
......@@ -636,7 +625,7 @@ fn codegen_emcc_try(
catch_func: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
......@@ -660,7 +649,8 @@ fn codegen_emcc_try(
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
......@@ -677,8 +667,7 @@ fn codegen_emcc_try(
let selector = catch.extract_value(vals, 1);
// Check if the typeid we got is the one for a Rust panic.
let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for");
let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None);
let rust_typeid = catch.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid);
let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool());
......@@ -702,13 +691,14 @@ fn codegen_emcc_try(
catch.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = catch.bitcast(catch_data, bx.type_i8p());
catch.call(catch_func, &[data, catch_data], None);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catch.call(catch_ty, catch_func, &[data, catch_data], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
......@@ -720,8 +710,9 @@ fn gen_fn<'ll, 'tcx>(
name: &str,
rust_fn_sig: ty::PolyFnSig<'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> &'ll Value {
) -> (&'ll Type, &'ll Value) {
let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
let llty = fn_abi.llvm_type(cx);
let llfn = cx.declare_fn(name, &fn_abi);
cx.set_frame_pointer_type(llfn);
cx.apply_target_cpu_attr(llfn);
......@@ -730,7 +721,7 @@ fn gen_fn<'ll, 'tcx>(
let llbb = Builder::append_block(cx, llfn, "entry-block");
let bx = Builder::build(cx, llbb);
codegen(bx);
llfn
(llty, llfn)
}
// Helper function used to get a handle to the `__rust_try` function used to
......@@ -740,7 +731,7 @@ fn gen_fn<'ll, 'tcx>(
fn get_rust_try_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> &'ll Value {
) -> (&'ll Type, &'ll Value) {
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
}
......@@ -1123,7 +1114,8 @@ fn simd_simple_float_intrinsic(
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty);
let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
let c =
bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c)
}
......@@ -1300,15 +1292,13 @@ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
let llvm_intrinsic =
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
llvm::UnnamedAddr::No,
bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
),
let fn_ty = bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
);
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v =
bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
......@@ -1430,12 +1420,11 @@ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
let llvm_intrinsic =
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
llvm::UnnamedAddr::No,
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
);
let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v =
bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
......@@ -1757,12 +1746,9 @@ enum Style {
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
let f = bx.declare_cfn(
&llvm_intrinsic,
llvm::UnnamedAddr::No,
bx.type_func(&[vec_ty, vec_ty], vec_ty),
);
let v = bx.call(f, &[lhs, rhs], None);
let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(fn_ty, f, &[lhs, rhs], None);
return Ok(v);
}
......
......@@ -1155,6 +1155,7 @@ pub fn LLVMBuildSwitch(
) -> &'a Value;
pub fn LLVMRustBuildInvoke(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
......@@ -1526,6 +1527,7 @@ pub fn LLVMBuildFCmp(
pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &'a Value;
pub fn LLVMRustBuildCall(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
......
......@@ -203,7 +203,11 @@ fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Ty
}
fn element_type(&self, ty: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMGetElementType(ty) }
match self.type_kind(ty) {
TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) },
TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"),
other => bug!("element_type called on unsupported type {:?}", other),
}
}
fn vector_length(&self, ty: &'ll Type) -> usize {
......@@ -275,6 +279,9 @@ fn scalar_pair_element_backend_type(
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
fn_abi.llvm_type(self)
}
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
fn_abi.ptr_to_llvm_type(self)
}
......
......@@ -441,9 +441,11 @@ fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize();
let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, args) = if use_start_lang_item {
let (start_fn, start_ty, args) = if use_start_lang_item {
let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
let start_fn = cx.get_fn_addr(
ty::Instance::resolve(
......@@ -455,16 +457,15 @@ fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
.unwrap()
.unwrap(),
);
(
start_fn,
vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
)
let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
(rust_main, vec![arg_argc, arg_argv])
let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
let result = bx.call(start_fn, &args, None);
let result = bx.call(start_ty, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast);
......
......@@ -132,14 +132,21 @@ fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
) {
// If there is a cleanup block and the function we're calling can unwind, then
// do an invoke, otherwise do a call.
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
let ret_llbb = if let Some((_, target)) = destination {
fx.llbb(target)
} else {
fx.unreachable_block()
};
let invokeret =
bx.invoke(fn_ptr, &llargs, ret_llbb, self.llblock(fx, cleanup), self.funclet(fx));
let invokeret = bx.invoke(
fn_ty,
fn_ptr,
&llargs,
ret_llbb,
self.llblock(fx, cleanup),
self.funclet(fx),
);
bx.apply_attrs_callsite(&fn_abi, invokeret);
if let Some((ret_dest, target)) = destination {
......@@ -148,7 +155,7 @@ fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
......
......@@ -518,7 +518,8 @@ pub fn codegen_rvalue_operand(
};
let instance = ty::Instance::mono(bx.tcx(), def_id);
let r = bx.cx().get_fn_addr(instance);
let call = bx.call(r, &[llsize, llalign], None);
let ty = bx.type_func(&[bx.type_isize(), bx.type_isize()], bx.type_i8p());
let call = bx.call(ty, r, &[llsize, llalign], None);
let val = bx.pointercast(call, llty_ptr);
let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
......
......@@ -72,6 +72,7 @@ fn switch(
);
fn invoke(
&mut self,
llty: Self::Type,
llfn: Self::Value,
args: &[Self::Value],
then: Self::BasicBlock,
......@@ -303,6 +304,7 @@ fn instrprof_increment(
fn call(
&mut self,
llty: Self::Type,
llfn: Self::Value,
args: &[Self::Value],
funclet: Option<&Self::Funclet>,
......
......@@ -102,6 +102,7 @@ impl<T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscM
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
......
......@@ -2689,6 +2689,38 @@
"detects deprecation attributes with no effect",
}
declare_lint! {
/// The `undefined_naked_function_abi` lint detects naked function definitions that
/// either do not specify an ABI or specify the Rust ABI.
///
/// ### Example
///
/// ```rust
/// #![feature(naked_functions)]
/// #![feature(asm)]
///
/// #[naked]
/// pub fn default_abi() -> u32 {
/// unsafe { asm!("", options(noreturn)); }
/// }
///
/// #[naked]
/// pub extern "Rust" fn rust_abi() -> u32 {
/// unsafe { asm!("", options(noreturn)); }
/// }
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// The Rust ABI is currently undefined. Therefore, naked functions should
/// specify a non-Rust ABI.
pub UNDEFINED_NAKED_FUNCTION_ABI,
Warn,
"undefined naked function ABI"
}
declare_lint! {
/// The `unsupported_naked_functions` lint detects naked function
/// definitions that are unsupported but were previously accepted.
......@@ -2699,7 +2731,7 @@
/// #![feature(naked_functions)]
///
/// #[naked]
/// pub fn f() -> u32 {
/// pub extern "C" fn f() -> u32 {
/// 42
/// }
/// ```
......
......@@ -1392,11 +1392,11 @@ extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) {
delete Bundle;
}
extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
OperandBundleDef *Bundle) {
Value *Callee = unwrap(Fn);
FunctionType *FTy = cast<FunctionType>(Callee->getType()->getPointerElementType());
FunctionType *FTy = unwrap<FunctionType>(Ty);
unsigned Len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
return wrap(unwrap(B)->CreateCall(
......@@ -1437,12 +1437,12 @@ extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B,
}
extern "C" LLVMValueRef
LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
unsigned NumArgs, LLVMBasicBlockRef Then,
LLVMBasicBlockRef Catch, OperandBundleDef *Bundle,
const char *Name) {
LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
OperandBundleDef *Bundle, const char *Name) {
Value *Callee = unwrap(Fn);
FunctionType *FTy = cast<FunctionType>(Callee->getType()->getPointerElementType());
FunctionType *FTy = unwrap<FunctionType>(Ty);
unsigned Len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch),
......
......@@ -51,6 +51,7 @@
#![feature(iter_zip)]
#![feature(thread_local_const_init)]
#![feature(try_reserve)]
#![feature(try_reserve_kind)]
#![feature(nonzero_ops)]
#![recursion_limit = "512"]
......
......@@ -30,6 +30,7 @@
#![feature(once_cell)]
#![feature(control_flow_enum)]
#![feature(try_reserve)]
#![feature(try_reserve_kind)]
#![recursion_limit = "256"]
#[macro_use]
......
......@@ -7,6 +7,7 @@
use rustc_hir::{ExprKind, HirId, InlineAsmOperand, StmtKind};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::UNDEFINED_NAKED_FUNCTION_ABI;
use rustc_session::lint::builtin::UNSUPPORTED_NAKED_FUNCTIONS;
use rustc_span::symbol::sym;
use rustc_span::Span;
......@@ -87,7 +88,7 @@ fn check_inline(tcx: TyCtxt<'_>, hir_id: HirId, attrs: &[Attribute]) {
/// Checks that function uses non-Rust ABI.
fn check_abi(tcx: TyCtxt<'_>, hir_id: HirId, abi: Abi, fn_ident_span: Span) {
if abi == Abi::Rust {
tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, fn_ident_span, |lint| {
tcx.struct_span_lint_hir(UNDEFINED_NAKED_FUNCTION_ABI, hir_id, fn_ident_span, |lint| {
lint.build("Rust ABI is unsupported in naked functions").emit();
});
}
......
......@@ -58,7 +58,31 @@ pub mod btree_set {
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub enum TryReserveError {
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
impl TryReserveError {
/// Details about the allocation that caused the error
#[inline]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub fn kind(&self) -> TryReserveErrorKind {
self.kind.clone()
}
}
/// Details of the allocation that caused a `TryReserveError`
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
......@@ -81,12 +105,23 @@ pub enum TryReserveError {
},
}
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
impl From<TryReserveErrorKind> for TryReserveError {
fn from(kind: TryReserveErrorKind) -> Self {
Self { kind }
}
}
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
impl From<LayoutError> for TryReserveError {
/// Always evaluates to [`TryReserveError::CapacityOverflow`].
/// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline]
fn from(_: LayoutError) -> Self {
TryReserveError::CapacityOverflow
TryReserveErrorKind::CapacityOverflow.into()
}
}
......@@ -97,11 +132,13 @@ fn fmt(
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
let reason = match &self {
TryReserveError::CapacityOverflow => {
let reason = match self.kind {
TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveError::AllocError { .. } => " because the memory allocator returned a error",
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned a error"
}
};
fmt.write_str(reason)
}
......
......@@ -19,6 +19,7 @@
use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
use crate::raw_vec::RawVec;
use crate::vec::Vec;
......@@ -773,7 +774,7 @@ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError>
let new_cap = used_cap
.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.ok_or(TryReserveError::CapacityOverflow)?;
.ok_or(TryReserveErrorKind::CapacityOverflow)?;
if new_cap > old_cap {
self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
......
......@@ -13,7 +13,8 @@
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError::{self, *};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
......@@ -425,7 +426,7 @@ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryRes
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
......@@ -451,7 +452,7 @@ fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserve
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
......@@ -471,10 +472,9 @@ fn shrink(&mut self, amount: usize) -> Result<(), TryReserveError> {
let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc.shrink(ptr, layout, new_layout).map_err(|_| TryReserveError::AllocError {
layout: new_layout,
non_exhaustive: (),
})?
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_ptr(ptr);
Ok(())
......@@ -510,7 +510,7 @@ fn finish_grow<A>(
alloc.allocate(new_layout)
};
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
......@@ -526,7 +526,7 @@ fn drop(&mut self) {
#[cfg(not(no_global_oom_handling))]
#[inline]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
......@@ -545,7 +545,7 @@ fn handle_reserve(result: Result<(), TryReserveError>) {
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow)
Err(CapacityOverflow.into())
} else {
Ok(())
}
......
......@@ -8,6 +8,7 @@
#![feature(pattern)]
#![feature(trusted_len)]
#![feature(try_reserve)]
#![feature(try_reserve_kind)]
#![feature(unboxed_closures)]
#![feature(associated_type_bounds)]
#![feature(binary_heap_into_iter_sorted)]
......
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::TryReserveError::*;
use std::collections::TryReserveErrorKind::*;
use std::ops::Bound;
use std::ops::Bound::*;
use std::ops::RangeBounds;
......@@ -703,35 +703,42 @@ fn test_try_reserve() {
let mut empty_string: String = String::new();
// Check isize::MAX doesn't count as an overflow
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
// Check isize::MAX + 1 is an OOM
if let Err(AllocError { .. }) = empty_string.try_reserve(MAX_CAP + 1) {
if let Err(AllocError { .. }) =
empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
// Check usize::MAX is an OOM
if let Err(AllocError { .. }) = empty_string.try_reserve(MAX_USIZE) {
if let Err(AllocError { .. }) =
empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an OOM!")
}
......@@ -742,25 +749,27 @@ fn test_try_reserve() {
// Same basic idea, but with non-zero len
let mut ten_bytes: String = String::from("0123456789");
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -782,30 +791,40 @@ fn test_try_reserve_exact() {
{
let mut empty_string: String = String::new();
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) =
empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
if let Err(AllocError { .. }) = empty_string.try_reserve_exact(MAX_CAP + 1) {
if let Err(AllocError { .. }) =
empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
if let Err(AllocError { .. }) = empty_string.try_reserve_exact(MAX_USIZE) {
if let Err(AllocError { .. }) =
empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an OOM!")
}
......@@ -815,24 +834,33 @@ fn test_try_reserve_exact() {
{
let mut ten_bytes: String = String::from("0123456789");
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(AllocError { .. }) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::TryReserveError::*;
use std::collections::TryReserveErrorKind::*;
use std::fmt::Debug;
use std::iter::InPlaceIterable;
use std::mem::{size_of, swap};
......@@ -1478,35 +1478,41 @@ fn test_try_reserve() {
let mut empty_bytes: Vec<u8> = Vec::new();
// Check isize::MAX doesn't count as an overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
// Check isize::MAX + 1 is an OOM
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP + 1) {
if let Err(AllocError { .. }) =
empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
// Check usize::MAX is an OOM
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE) {
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an OOM!")
}
......@@ -1517,25 +1523,27 @@ fn test_try_reserve() {
// Same basic idea, but with non-zero len
let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1545,25 +1553,31 @@ fn test_try_reserve() {
// Same basic idea, but with interesting type size
let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
if let Err(AllocError { .. }) =
ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should fail in the mul-by-size
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
......@@ -1585,30 +1599,40 @@ fn test_try_reserve_exact() {
{
let mut empty_bytes: Vec<u8> = Vec::new();
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
if let Err(AllocError { .. }) =
empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(AllocError { .. }) =
empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an OOM!")
}
......@@ -1618,24 +1642,33 @@ fn test_try_reserve_exact() {
{
let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(AllocError { .. }) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1644,24 +1677,34 @@ fn test_try_reserve_exact() {
{
let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
if let Err(AllocError { .. }) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......
use std::collections::TryReserveError::*;
use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
use std::fmt::Debug;
use std::mem::size_of;
......@@ -1171,23 +1171,26 @@ fn test_try_reserve() {
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
// Check isize::MAX doesn't count as an overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1196,7 +1199,7 @@ fn test_try_reserve() {
// VecDeque starts with capacity 7, always adds 1 to the capacity
// and also rounds the number to next power of 2 so this is the
// furthest we can go without triggering CapacityOverflow
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP) {
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
......@@ -1207,25 +1210,27 @@ fn test_try_reserve() {
// Same basic idea, but with non-zero len
let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1235,25 +1240,31 @@ fn test_try_reserve() {
// Same basic idea, but with interesting type size
let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
if let Err(AllocError { .. }) =
ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should fail in the mul-by-size
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) {
if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
......@@ -1275,20 +1286,26 @@ fn test_try_reserve_exact() {
{
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) =
empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1297,7 +1314,9 @@ fn test_try_reserve_exact() {
// VecDeque starts with capacity 7, always adds 1 to the capacity
// and also rounds the number to next power of 2 so this is the
// furthest we can go without triggering CapacityOverflow
if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_CAP) {
if let Err(AllocError { .. }) =
empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
......@@ -1307,24 +1326,33 @@ fn test_try_reserve_exact() {
{
let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(CapacityOverflow) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
if let Err(AllocError { .. }) =
ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......@@ -1333,24 +1361,34 @@ fn test_try_reserve_exact() {
{
let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
if let Err(AllocError { .. }) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
if let Err(AllocError { .. }) =
ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
{
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) {
if let Err(CapacityOverflow) =
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind())
{
} else {
panic!("usize::MAX should trigger an overflow!")
}
......
......@@ -3452,7 +3452,7 @@ fn is_sorted_by_key<F, K>(self, f: F) -> bool
self.map(f).is_sorted()
}
/// See [TrustedRandomAccess]
/// See [TrustedRandomAccess][super::super::TrustedRandomAccess]
// The unusual name is to avoid name collisions in method resolution
// see #76479.
#[inline]
......
......@@ -8,6 +8,7 @@
use crate::borrow::Borrow;
use crate::cell::Cell;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
use crate::fmt::{self, Debug};
#[allow(deprecated)]
use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13};
......@@ -2990,9 +2991,11 @@ fn map_entry<'a, K: 'a, V: 'a>(raw: base::RustcEntry<'a, K, V>) -> Entry<'a, K,
#[inline]
pub(super) fn map_try_reserve_error(err: hashbrown::TryReserveError) -> TryReserveError {
match err {
hashbrown::TryReserveError::CapacityOverflow => TryReserveError::CapacityOverflow,
hashbrown::TryReserveError::CapacityOverflow => {
TryReserveErrorKind::CapacityOverflow.into()
}
hashbrown::TryReserveError::AllocError { layout } => {
TryReserveError::AllocError { layout, non_exhaustive: () }
TryReserveErrorKind::AllocError { layout, non_exhaustive: () }.into()
}
}
}
......
......@@ -3,7 +3,7 @@
use super::RandomState;
use crate::cell::RefCell;
use rand::{thread_rng, Rng};
use realstd::collections::TryReserveError::*;
use realstd::collections::TryReserveErrorKind::*;
// https://github.com/rust-lang/rust/issues/62301
fn _assert_hashmap_is_unwind_safe() {
......@@ -821,12 +821,12 @@ fn test_try_reserve() {
const MAX_USIZE: usize = usize::MAX;
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8) {
if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX / 8 should trigger an OOM!")
}
......
......@@ -422,6 +422,12 @@
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub use alloc_crate::collections::TryReserveError;
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub use alloc_crate::collections::TryReserveErrorKind;
mod hash;
......
......@@ -2039,6 +2039,8 @@ pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
///
/// The iterator will yield instances of [`io::Result`]`<`[`DirEntry`]`>`.
/// New errors may be encountered after an iterator is initially constructed.
/// Entries for the current and parent directories (typically `.` and `..`) are
/// skipped.
///
/// # Platform-specific behavior
///
......
......@@ -326,6 +326,7 @@
#![feature(trace_macros)]
#![feature(try_blocks)]
#![feature(try_reserve)]
#![feature(try_reserve_kind)]
#![feature(unboxed_closures)]
#![feature(unsafe_cell_raw_get)]
#![feature(unwrap_infallible)]
......
......@@ -388,46 +388,21 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(target_os = "haiku")]
pub fn current_exe() -> io::Result<PathBuf> {
// Use Haiku's image info functions
#[repr(C)]
struct image_info {
id: i32,
type_: i32,
sequence: i32,
init_order: i32,
init_routine: *mut libc::c_void, // function pointer
term_routine: *mut libc::c_void, // function pointer
device: libc::dev_t,
node: libc::ino_t,
name: [libc::c_char; 1024], // MAXPATHLEN
text: *mut libc::c_void,
data: *mut libc::c_void,
text_size: i32,
data_size: i32,
api_version: i32,
abi: i32,
}
unsafe {
extern "C" {
fn _get_next_image_info(
team_id: i32,
cookie: *mut i32,
info: *mut image_info,
size: i32,
) -> i32;
}
let mut info: image_info = mem::zeroed();
let mut info: mem::MaybeUninit<libc::image_info> = mem::MaybeUninit::uninit();
let mut cookie: i32 = 0;
// the executable can be found at team id 0
let result =
_get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32);
let result = libc::_get_next_image_info(
0,
&mut cookie,
info.as_mut_ptr(),
mem::size_of::<libc::image_info>(),
);
if result != 0 {
use crate::io::ErrorKind;
Err(io::Error::new_const(ErrorKind::Uncategorized, &"Error getting executable path"))
} else {
let name = CStr::from_ptr(info.name.as_ptr()).to_bytes();
let name = CStr::from_ptr((*info.as_ptr()).name.as_ptr()).to_bytes();
Ok(PathBuf::from(OsStr::from_bytes(name)))
}
}
......
......@@ -486,6 +486,7 @@ fn test_with_no_doc_stage0() {
fail_fast: true,
doc_tests: DocTests::No,
bless: false,
force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
......@@ -527,6 +528,7 @@ fn test_exclude() {
fail_fast: true,
doc_tests: DocTests::No,
bless: false,
force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
......@@ -583,6 +585,7 @@ fn test_docs() {
fail_fast: true,
doc_tests: DocTests::Yes,
bless: false,
force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
......
......@@ -102,6 +102,7 @@ pub enum Subcommand {
paths: Vec<PathBuf>,
/// Whether to automatically update stderr/stdout files
bless: bool,
force_rerun: bool,
compare_mode: Option<String>,
pass: Option<String>,
run: Option<String>,
......@@ -284,6 +285,7 @@ pub fn parse(args: &[String]) -> Flags {
opts.optflag("", "no-doc", "do not run doc tests");
opts.optflag("", "doc", "only run doc tests");
opts.optflag("", "bless", "update all stderr/stdout files of failing ui tests");
opts.optflag("", "force-rerun", "rerun tests even if the inputs are unchanged");
opts.optopt(
"",
"compare-mode",
......@@ -558,6 +560,7 @@ pub fn parse(args: &[String]) -> Flags {
"test" | "t" => Subcommand::Test {
paths,
bless: matches.opt_present("bless"),
force_rerun: matches.opt_present("force-rerun"),
compare_mode: matches.opt_str("compare-mode"),
pass: matches.opt_str("pass"),
run: matches.opt_str("run"),
......@@ -726,6 +729,13 @@ pub fn bless(&self) -> bool {
}
}
pub fn force_rerun(&self) -> bool {
match *self {
Subcommand::Test { force_rerun, .. } => force_rerun,
_ => false,
}
}
pub fn rustfix_coverage(&self) -> bool {
match *self {
Subcommand::Test { rustfix_coverage, .. } => rustfix_coverage,
......
......@@ -1315,6 +1315,10 @@ fn run(self, builder: &Builder<'_>) {
cmd.arg("--bless");
}
if builder.config.cmd.force_rerun() {
cmd.arg("--force-rerun");
}
let compare_mode =
builder.config.cmd.compare_mode().or_else(|| {
if builder.config.test_compare_mode { self.compare_mode } else { None }
......
......@@ -1937,7 +1937,7 @@ impl Visibility {
Struct(VariantStruct),
}
/// Small wrapper around [`rustc_span::Span]` that adds helper methods
/// Small wrapper around [`rustc_span::Span`] that adds helper methods
/// and enforces calling [`rustc_span::Span::source_callsite()`].
#[derive(Copy, Clone, Debug)]
crate struct Span(rustc_span::Span);
......
......@@ -134,14 +134,12 @@ pub extern "C" fn inner(y: usize) -> usize {
#[naked]
pub unsafe fn default_abi() {
//~^ WARN Rust ABI is unsupported in naked functions
//~| WARN this was previously accepted
asm!("", options(noreturn));
}
#[naked]
pub unsafe extern "Rust" fn rust_abi() {
//~^ WARN Rust ABI is unsupported in naked functions
//~| WARN this was previously accepted
asm!("", options(noreturn));
}
......
......@@ -284,20 +284,16 @@ warning: Rust ABI is unsupported in naked functions
LL | pub unsafe fn default_abi() {
| ^^^^^^^^^^^
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
= note: `#[warn(undefined_naked_function_abi)]` on by default
warning: Rust ABI is unsupported in naked functions
--> $DIR/naked-functions.rs:142:29
--> $DIR/naked-functions.rs:141:29
|
LL | pub unsafe extern "Rust" fn rust_abi() {
| ^^^^^^^^
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:177:1
--> $DIR/naked-functions.rs:175:1
|
LL | #[inline]
| ^^^^^^^^^
......@@ -306,7 +302,7 @@ LL | #[inline]
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:185:1
--> $DIR/naked-functions.rs:183:1
|
LL | #[inline(always)]
| ^^^^^^^^^^^^^^^^^
......@@ -315,7 +311,7 @@ LL | #[inline(always)]
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:193:1
--> $DIR/naked-functions.rs:191:1
|
LL | #[inline(never)]
| ^^^^^^^^^^^^^^^^
......@@ -324,7 +320,7 @@ LL | #[inline(never)]
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:201:1
--> $DIR/naked-functions.rs:199:1
|
LL | #[inline]
| ^^^^^^^^^
......@@ -333,7 +329,7 @@ LL | #[inline]
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:204:1
--> $DIR/naked-functions.rs:202:1
|
LL | #[inline(always)]
| ^^^^^^^^^^^^^^^^^
......@@ -342,7 +338,7 @@ LL | #[inline(always)]
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
--> $DIR/naked-functions.rs:207:1
--> $DIR/naked-functions.rs:205:1
|
LL | #[inline(never)]
| ^^^^^^^^^^^^^^^^
......
......@@ -362,6 +362,9 @@ pub struct Config {
pub nodejs: Option<String>,
/// Path to a npm executable. Used for rustdoc GUI tests
pub npm: Option<String>,
/// Whether to rerun tests even if the inputs are unchanged.
pub force_rerun: bool,
}
impl Config {
......
......@@ -144,6 +144,7 @@ pub fn parse_config(args: Vec<String>) -> Config {
"enable this to generate a Rustfix coverage file, which is saved in \
`./<build_base>/rustfix_missing_coverage.txt`",
)
.optflag("", "force-rerun", "rerun tests even if the inputs are unchanged")
.optflag("h", "help", "show this message")
.reqopt("", "channel", "current Rust channel", "CHANNEL");
......@@ -289,6 +290,8 @@ fn make_absolute(path: PathBuf) -> PathBuf {
llvm_components: matches.opt_str("llvm-components").unwrap(),
nodejs: matches.opt_str("nodejs"),
npm: matches.opt_str("npm"),
force_rerun: matches.opt_present("force-rerun"),
}
}
......@@ -644,13 +647,15 @@ fn make_test(config: &Config, testpaths: &TestPaths, inputs: &Stamp) -> Vec<test
let test_name = crate::make_test_name(config, testpaths, revision);
let mut desc = make_test_description(config, test_name, &test_path, src_file, cfg);
// Ignore tests that already run and are up to date with respect to inputs.
desc.ignore |= is_up_to_date(
config,
testpaths,
&early_props,
revision.map(|s| s.as_str()),
inputs,
);
if !config.force_rerun {
desc.ignore |= is_up_to_date(
config,
testpaths,
&early_props,
revision.map(|s| s.as_str()),
inputs,
);
}
test::TestDescAndFn { desc, testfn: make_test_closure(config, testpaths, revision) }
})
.collect()
......
......@@ -129,7 +129,7 @@ enum FileEntry {
/// An HTML file.
///
/// This includes the contents of the HTML file, and an optional set of
/// HTML IDs. The IDs are used for checking fragments. The are computed
/// HTML IDs. The IDs are used for checking fragments. They are computed
/// as-needed. The source is discarded (replaced with an empty string)
/// after the file has been checked, to conserve on memory.
HtmlFile { source: Rc<String>, ids: RefCell<HashSet<String>> },
......
Subproject commit 99ec9c1707aad74b4a4a6d301f27fb1c19733f58
Subproject commit 042cbf175bfdad6524fd00d7570b2297a0426063
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册