提交 7a3fdfbf 编写于 作者: A Alex Crichton

Remove morestack support

This commit removes all morestack support from the compiler which entails:

* Segmented stacks are no longer emitted in codegen.
* We no longer build or distribute libmorestack.a
* The `stack_exhausted` lang item is no longer required

The only current use of the segmented stack support in LLVM is to detect stack
overflow. This is no longer really required, however, because we already have
guard pages for all threads and registered signal handlers watching for a
segfault on those pages (to print out a stack overflow message). Additionally,
major platforms (aka Windows) already don't use morestack.

This means that Rust is by default less likely to catch stack overflows because
if a function takes up more than one page of stack space it won't hit the guard
page. This is what the purpose of morestack was (to catch this case), but it's
better served with stack probes which have more cross platform support and no
runtime support necessary. Until LLVM supports this for all platform it looks
like morestack isn't really buying us much.

cc #16012 (still need stack probes)
Closes #26458 (a drive-by fix to help diagnostics on stack overflow)
上级 d0345618
......@@ -101,7 +101,6 @@ define CLEAN_TARGET_STAGE_N
clean$(1)_T_$(2)_H_$(3): \
$$(foreach crate,$$(CRATES),clean$(1)_T_$(2)_H_$(3)-lib-$$(crate)) \
$$(foreach tool,$$(TOOLS) $$(DEBUGGER_BIN_SCRIPTS_ALL),clean$(1)_T_$(2)_H_$(3)-tool-$$(tool))
$$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libmorestack.a
$$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libcompiler-rt.a
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/librun_pass_stage* # For unix
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/run_pass_stage* # For windows
......
......@@ -65,7 +65,7 @@ DEPS_libc := core
DEPS_rustc_unicode := core
DEPS_alloc := core libc native:jemalloc
DEPS_std := core libc rand alloc collections rustc_unicode \
native:rust_builtin native:backtrace native:rustrt_native \
native:rust_builtin native:backtrace \
rustc_bitflags
DEPS_graphviz := std
DEPS_syntax := std term serialize log fmt_macros arena libc
......
......@@ -163,7 +163,7 @@ endif
# that the snapshot will be generated with a statically linked rustc so we only
# have to worry about the distribution of one file (with its native dynamic
# dependencies)
RUSTFLAGS_STAGE0 += -C prefer-dynamic
RUSTFLAGS_STAGE0 += -C prefer-dynamic -C no-stack-check
RUSTFLAGS_STAGE1 += -C prefer-dynamic
RUST_LIB_FLAGS_ST2 += -C prefer-dynamic
RUST_LIB_FLAGS_ST3 += -C prefer-dynamic
......@@ -400,6 +400,11 @@ TSREQ$(1)_T_$(2)_H_$(3) = \
$$(foreach obj,$$(INSTALLED_OBJECTS_$(2)),\
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(obj))
ifeq ($(1),0)
TSREQ$(1)_T_$(2)_H_$(3) += \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(call CFG_STATIC_LIB_NAME_$(2),morestack)
endif
# Prerequisites for a working stageN compiler and libraries, for a specific
# target
SREQ$(1)_T_$(2)_H_$(3) = \
......
......@@ -113,8 +113,7 @@ CFG_RLIB_GLOB=lib$(1)-*.rlib
include $(wildcard $(CFG_SRC_DIR)mk/cfg/*.mk)
define ADD_INSTALLED_OBJECTS
INSTALLED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),morestack) \
$$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt)
INSTALLED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt)
endef
$(foreach target,$(CFG_TARGET), \
......
......@@ -35,8 +35,8 @@
# that's per-target so you're allowed to conditionally add files based on the
# target.
################################################################################
NATIVE_LIBS := rust_builtin hoedown morestack miniz \
rustrt_native rust_test_helpers
NATIVE_LIBS := rust_builtin hoedown miniz \
rust_test_helpers morestack
# $(1) is the target triple
define NATIVE_LIBRARIES
......@@ -53,10 +53,8 @@ NATIVE_DEPS_hoedown_$(1) := hoedown/src/autolink.c \
NATIVE_DEPS_miniz_$(1) = miniz.c
NATIVE_DEPS_rust_builtin_$(1) := rust_builtin.c \
rust_android_dummy.c
NATIVE_DEPS_rustrt_native_$(1) := arch/$$(HOST_$(1))/record_sp.S
NATIVE_DEPS_rust_test_helpers_$(1) := rust_test_helpers.c
NATIVE_DEPS_morestack_$(1) := arch/$$(HOST_$(1))/morestack.S
NATIVE_DEPS_morestack_$(1) := empty.c
################################################################################
# You shouldn't find it that necessary to edit anything below this line.
......
......@@ -56,8 +56,7 @@ $(foreach host,$(CFG_HOST), \
# 1. The immediate dependencies are the rust source files
# 2. Each rust crate dependency is listed (based on their stamp files),
# as well as all native dependencies (listed in RT_OUTPUT_DIR)
# 3. The stage (n-1) compiler is required through the TSREQ dependency, along
# with the morestack library
# 3. The stage (n-1) compiler is required through the TSREQ dependency
# 4. When actually executing the rule, the first thing we do is to clean out
# old libs and rlibs via the REMOVE_ALL_OLD_GLOB_MATCHES macro
# 5. Finally, we get around to building the actual crate. It's just one
......
......@@ -51,7 +51,6 @@ fn main(argc: isize, argv: *const *const u8) -> isize {
0
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
......
......@@ -36,7 +36,6 @@ fn start(_argc: isize, _argv: *const *const u8) -> isize {
// These functions and traits are used by the compiler, but not
// for a bare-bones hello world. These are normally
// provided by libstd.
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
......@@ -61,7 +60,6 @@ pub extern fn main(argc: i32, argv: *const *const u8) -> i32 {
0
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
......@@ -73,18 +71,12 @@ The compiler currently makes a few assumptions about symbols which are available
in the executable to call. Normally these functions are provided by the standard
library, but without it you must define your own.
The first of these three functions, `stack_exhausted`, is invoked whenever stack
overflow is detected. This function has a number of restrictions about how it
can be called and what it must do, but if the stack limit register is not being
maintained then a thread always has an "infinite stack" and this function
shouldn't get triggered.
The second of these three functions, `eh_personality`, is used by the
The first of these two functions, `eh_personality`, is used by the
failure mechanisms of the compiler. This is often mapped to GCC's
personality function (see the
[libstd implementation](../std/rt/unwind/index.html) for more
information), but crates which do not trigger a panic can be assured
that this function is never called. The final function, `panic_fmt`, is
that this function is never called. The second function, `panic_fmt`, is
also used by the failure mechanisms of the compiler.
## Using libcore
......@@ -150,7 +142,6 @@ extern fn panic_fmt(args: &core::fmt::Arguments,
loop {}
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
# #[start] fn start(argc: isize, argv: *const *const u8) -> isize { 0 }
......
......@@ -347,7 +347,5 @@ pub fn collect_language_items(krate: &ast::Crate,
NonZeroItem, "non_zero", non_zero;
StackExhaustedLangItem, "stack_exhausted", stack_exhausted;
DebugTraitLangItem, "debug_trait", debug_trait;
}
......@@ -39,9 +39,6 @@ pub fn check_crate(krate: &ast::Crate,
// These are never called by user code, they're generated by the compiler.
// They will never implicitly be added to the `missing` array unless we do
// so here.
if items.stack_exhausted().is_none() {
items.missing.push(lang_items::StackExhaustedLangItem);
}
if items.eh_personality().is_none() {
items.missing.push(lang_items::EhPersonalityLangItem);
}
......@@ -124,7 +121,6 @@ fn visit_foreign_item(&mut self, i: &ast::ForeignItem) {
weak_lang_items! {
panic_fmt, PanicFmtLangItem, rust_begin_unwind;
stack_exhausted, StackExhaustedLangItem, rust_stack_exhausted;
eh_personality, EhPersonalityLangItem, rust_eh_personality;
eh_unwind_resume, EhUnwindResumeLangItem, rust_eh_unwind_resume;
}
......@@ -19,7 +19,6 @@ pub fn opts() -> TargetOptions {
dynamic_linking: true,
executables: true,
is_like_osx: true,
morestack: true,
has_rpath: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".dylib".to_string(),
......
......@@ -87,14 +87,6 @@ pub fn opts(arch: Arch) -> TargetOptions {
cpu: target_cpu(arch),
dynamic_linking: false,
executables: true,
// Although there is an experimental implementation of LLVM which
// supports SS on armv7 it wasn't approved by Apple, see:
// http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20140505/216350.html
// It looks like it might be never accepted to upstream LLVM.
//
// SS might be also enabled on Arm64 as it has builtin support in LLVM
// but I haven't tested it through yet
morestack: false,
pre_link_args: pre_link_args(arch),
.. super::apple_base::opts()
}
......
......@@ -16,7 +16,6 @@ pub fn opts() -> TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: false,
linker_is_gnu: true,
has_rpath: true,
position_independent_executables: true,
......
......@@ -16,7 +16,6 @@ pub fn opts() -> TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: true,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec!(
......
......@@ -16,7 +16,6 @@ pub fn opts() -> TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: true,
has_rpath: true,
archive_format: "gnu".to_string(),
......
......@@ -14,7 +14,6 @@ pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
base.cpu = "pentium4".to_string();
base.pre_link_args.push("-m32".to_string());
base.morestack = false;
Target {
llvm_target: "i686-unknown-freebsd".to_string(),
......
......@@ -15,7 +15,6 @@ pub fn opts() -> TargetOptions {
TargetOptions {
dynamic_linking: true,
executables: true,
morestack: true,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec![
......
......@@ -118,9 +118,6 @@ pub struct TargetOptions {
/// Whether executables are available on this target. iOS, for example, only allows static
/// libraries. Defaults to false.
pub executables: bool,
/// Whether LLVM's segmented stack prelude is supported by whatever runtime is available.
/// Will emit stack checks and calls to __morestack. Defaults to false.
pub morestack: bool,
/// Relocation model to use in object file. Corresponds to `llc
/// -relocation-model=$relocation_model`. Defaults to "pic".
pub relocation_model: String,
......@@ -192,7 +189,6 @@ fn default() -> TargetOptions {
features: "".to_string(),
dynamic_linking: false,
executables: false,
morestack: false,
relocation_model: "pic".to_string(),
code_model: "default".to_string(),
disable_redzone: false,
......@@ -298,7 +294,6 @@ pub fn from_json(obj: Json) -> Target {
key!(data_layout);
key!(dynamic_linking, bool);
key!(executables, bool);
key!(morestack, bool);
key!(disable_redzone, bool);
key!(eliminate_frame_pointer, bool);
key!(function_sections, bool);
......
......@@ -16,7 +16,6 @@ pub fn opts() -> TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: false,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec!(
......
......@@ -16,7 +16,6 @@ pub fn opts() -> TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: false,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec!(
......
......@@ -23,7 +23,6 @@ pub fn opts() -> TargetOptions {
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
archive_format: "gnu".to_string(),
pre_link_args: vec!(
......
......@@ -53,7 +53,6 @@ pub fn opts() -> TargetOptions {
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
is_like_msvc: true,
pre_link_args: vec![
......
......@@ -759,9 +759,6 @@ fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path,
if sess.target.target.options.is_like_osx && !ab.using_llvm() {
ab.build();
}
if sess.target.target.options.morestack {
ab.add_native_library("morestack").unwrap();
}
if !sess.target.target.options.no_compiler_rt {
ab.add_native_library("compiler-rt").unwrap();
}
......@@ -905,26 +902,6 @@ fn link_args(cmd: &mut Linker,
}
cmd.output_filename(out_filename);
// Stack growth requires statically linking a __morestack function. Note
// that this is listed *before* all other libraries. Due to the usage of the
// --as-needed flag below, the standard library may only be useful for its
// rust_stack_exhausted function. In this case, we must ensure that the
// libmorestack.a file appears *before* the standard library (so we put it
// at the very front).
//
// Most of the time this is sufficient, except for when LLVM gets super
// clever. If, for example, we have a main function `fn main() {}`, LLVM
// will optimize out calls to `__morestack` entirely because the function
// doesn't need any stack at all!
//
// To get around this snag, we specially tell the linker to always include
// all contents of this library. This way we're guaranteed that the linker
// will include the __morestack symbol 100% of the time, always resolving
// references to it even if the object above didn't use it.
if t.options.morestack {
cmd.link_whole_staticlib("morestack", &[lib_path]);
}
// When linking a dynamic library, we put the metadata into a section of the
// executable. This metadata is in a separate object file from the main
// object file, so we link that in here.
......
......@@ -23,19 +23,6 @@
use trans::machine;
use trans::type_of;
/// Mark LLVM function to use split stack.
#[inline]
pub fn split_stack(val: ValueRef, set: bool) {
unsafe {
let attr = "split-stack\0".as_ptr() as *const _;
if set {
llvm::LLVMAddFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr);
} else {
llvm::LLVMRemoveFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr);
}
}
}
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(val: ValueRef, inline: InlineAttr) {
......@@ -123,9 +110,7 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe
}
for attr in attrs {
if attr.check_name("no_stack_check") {
split_stack(llfn, false);
} else if attr.check_name("cold") {
if attr.check_name("cold") {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
......
......@@ -2173,17 +2173,8 @@ fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId,
llfn: ValueRef) {
ccx.item_symbols().borrow_mut().insert(node_id, sym);
// The stack exhaustion lang item shouldn't have a split stack because
// otherwise it would continue to be exhausted (bad), and both it and the
// eh_personality functions need to be externally linkable.
// The eh_personality function need to be externally linkable.
let def = ast_util::local_def(node_id);
if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
attributes::split_stack(llfn, false);
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
if ccx.use_dll_storage_attrs() {
llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
}
}
if ccx.tcx().lang_items.eh_personality() == Some(def) {
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
if ccx.use_dll_storage_attrs() {
......@@ -2801,13 +2792,8 @@ pub fn trans_crate(tcx: &ty::ctxt, analysis: ty::CrateAnalysis) -> CrateTranslat
});
// Make sure that some other crucial symbols are not eliminated from the
// module. This includes the main function, the crate map (used for debug
// log settings and I/O), and finally the curious rust_stack_exhausted
// symbol. This symbol is required for use by the libmorestack library that
// we link in, so we must ensure that this symbol is not internalized (if
// defined in the crate).
// module, including the main function.
reachable.push("main".to_string());
reachable.push("rust_stack_exhausted".to_string());
// referenced from .eh_frame section on some platforms
reachable.push("rust_eh_personality".to_string());
......
......@@ -570,11 +570,6 @@ pub fn get_intrinsic(&self, key: & &'static str) -> ValueRef {
}
}
pub fn is_split_stack_supported(&self) -> bool {
self.sess().target.target.options.morestack
}
pub fn llmod(&self) -> ModuleRef {
self.local.llmod
}
......
......@@ -79,9 +79,6 @@ pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone)
}
if ccx.is_split_stack_supported() && !ccx.sess().opts.cg.no_stack_check {
attributes::split_stack(llfn, true);
}
llfn
}
......
......@@ -23,7 +23,7 @@
use prelude::v1::*;
use sys;
use usize;
use thread;
// Reexport some of our utilities which are expected by other crates.
pub use self::util::min_stack;
......@@ -53,11 +53,6 @@
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: isize = 101;
#[cfg(any(windows, android))]
const OS_DEFAULT_STACK_ESTIMATE: usize = 1 << 20;
#[cfg(all(unix, not(android)))]
const OS_DEFAULT_STACK_ESTIMATE: usize = 2 * (1 << 20);
#[cfg(not(test))]
#[lang = "start"]
fn lang_start(main: *const u8, argc: isize, argv: *const *const u8) -> isize {
......@@ -67,37 +62,9 @@ fn lang_start(main: *const u8, argc: isize, argv: *const *const u8) -> isize {
use env;
use rt;
use sys_common::thread_info::{self, NewThread};
use sys_common;
use thread::Thread;
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const _ as *const isize;
let my_stack_top = addr as usize;
// FIXME #11359 we just assume that this thread has a stack of a
// certain size, and estimate that there's at most 20KB of stack
// frames above our current position.
const TWENTY_KB: usize = 20000;
// saturating-add to sidestep overflow
let top_plus_spill = if usize::MAX - TWENTY_KB < my_stack_top {
usize::MAX
} else {
my_stack_top + TWENTY_KB
};
// saturating-sub to sidestep underflow
let my_stack_bottom = if top_plus_spill < OS_DEFAULT_STACK_ESTIMATE {
0
} else {
top_plus_spill - OS_DEFAULT_STACK_ESTIMATE
};
let failed = unsafe {
// First, make sure we don't trigger any __morestack overflow checks,
// and next set up our stack to have a guard page and run through our
// own fault handlers if we hit it.
sys_common::stack::record_os_managed_stack_bounds(my_stack_bottom,
my_stack_top);
let main_guard = sys::thread::guard::init();
sys::stack_overflow::init();
......@@ -129,10 +96,7 @@ fn lang_start(main: *const u8, argc: isize, argv: *const *const u8) -> isize {
args::init(argc, argv);
// And finally, let's run some code!
let res = unwind::try(|| {
let main: fn() = mem::transmute(main);
main();
});
let res = thread::catch_panic(mem::transmute::<_, fn()>(main));
cleanup();
res.is_err()
};
......
......@@ -111,10 +111,6 @@
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
#[link(name = "rustrt_native", kind = "static")]
#[cfg(not(test))]
extern {}
/// Invoke a closure, capturing the cause of panic if one occurs.
///
/// This function will return `Ok(())` if the closure did not panic, and will
......
......@@ -21,7 +21,6 @@
pub mod poison;
pub mod remutex;
pub mod rwlock;
pub mod stack;
pub mod thread;
pub mod thread_info;
pub mod thread_local;
......@@ -52,3 +51,7 @@ pub trait IntoInner<Inner> {
pub trait FromInner<Inner> {
fn from_inner(inner: Inner) -> Self;
}
#[cfg(stage0)]
#[lang = "stack_exhausted"]
pub fn stack_exhausted() {}
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust threads. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the thread lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: usize = 20 * 1024;
/// This function is invoked from rust's current __morestack function. Segmented
/// stacks are currently not enabled as segmented stacks, but rather one giant
/// stack segment. This means that whenever we run out of stack, we want to
/// truly consider it to be stack overflow rather than allocating a new stack.
#[cfg(not(test))] // in testing, use the original libstd's version
#[lang = "stack_exhausted"]
extern fn stack_exhausted() {
use intrinsics;
unsafe {
// We're calling this function because the stack just ran out. We need
// to call some other rust functions, but if we invoke the functions
// right now it'll just trigger this handler being called again. In
// order to alleviate this, we move the stack limit to be inside of the
// red zone that was allocated for exactly this reason.
let limit = get_sp_limit();
record_sp_limit(limit - RED_ZONE / 2);
// This probably isn't the best course of action. Ideally one would want
// to unwind the stack here instead of just aborting the entire process.
// This is a tricky problem, however. There's a few things which need to
// be considered:
//
// 1. We're here because of a stack overflow, yet unwinding will run
// destructors and hence arbitrary code. What if that code overflows
// the stack? One possibility is to use the above allocation of an
// extra 10k to hope that we don't hit the limit, and if we do then
// abort the whole program. Not the best, but kind of hard to deal
// with unless we want to switch stacks.
//
// 2. LLVM will optimize functions based on whether they can unwind or
// not. It will flag functions with 'nounwind' if it believes that
// the function cannot trigger unwinding, but if we do unwind on
// stack overflow then it means that we could unwind in any function
// anywhere. We would have to make sure that LLVM only places the
// nounwind flag on functions which don't call any other functions.
//
// 3. The function that overflowed may have owned arguments. These
// arguments need to have their destructors run, but we haven't even
// begun executing the function yet, so unwinding will not run the
// any landing pads for these functions. If this is ignored, then
// the arguments will just be leaked.
//
// Exactly what to do here is a very delicate topic, and is possibly
// still up in the air for what exactly to do. Some relevant issues:
//
// #3555 - out-of-stack failure leaks arguments
// #3695 - should there be a stack limit?
// #9855 - possible strategies which could be taken
// #9854 - unwinding on windows through __morestack has never worked
// #2361 - possible implementation of not using landing pads
::rt::util::report_overflow();
intrinsics::abort();
}
}
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: usize, _stack_hi: usize) {
record_sp_limit(stack_lo + RED_ZONE);
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: usize) {
return target_record_sp_limit(limit);
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: usize) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "linux"))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: usize) {
}
// mips, arm - The implementations are a bit big for inline asm!
// They can be found in src/rt/arch/$target_arch/record_sp.S
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// aarch64 - FIXME(AARCH64): missing...
// powerpc - FIXME(POWERPC): missing...
// arm-ios - iOS segmented stack is disabled for now, see related notes
// openbsd/bitrig/netbsd - no segmented stacks.
// x86-freebsd - no segmented stacks.
#[cfg(any(target_arch = "aarch64",
target_arch = "powerpc",
all(target_arch = "arm", target_os = "ios"),
all(target_arch = "x86", target_os = "freebsd"),
target_os = "bitrig",
target_os = "netbsd",
target_os = "openbsd"))]
unsafe fn target_record_sp_limit(_: usize) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> usize {
return target_get_sp_limit();
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "linux"))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
return 1024;
}
// mips, arm - The implementations are a bit big for inline asm!
// They can be found in src/rt/arch/$target_arch/record_sp.S
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
use libc::c_void;
return get_sp_limit() as usize;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// aarch64 - FIXME(AARCH64): missing...
// powerpc - FIXME(POWERPC): missing...
// arm-ios - no segmented stacks.
// openbsd/bitrig/netbsd - no segmented stacks.
// x86-freebsd - no segmented stacks..
//
// This function might be called by runtime though
// so it is unsafe to unreachable, let's return a fixed constant.
#[cfg(any(target_arch = "aarch64",
target_arch = "powerpc",
all(target_arch = "arm", target_os = "ios"),
all(target_arch = "x86", target_os = "freebsd"),
target_os = "bitrig",
target_os = "netbsd",
target_os = "openbsd"))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
1024
}
}
......@@ -13,15 +13,8 @@
use alloc::boxed::FnBox;
use libc;
use sys::stack_overflow;
use sys_common::stack;
use usize;
#[no_stack_check]
pub unsafe fn start_thread(main: *mut libc::c_void) {
// First ensure that we don't trigger __morestack (also why this has a
// no_stack_check annotation).
stack::record_os_managed_stack_bounds(0, usize::MAX);
// Next, set up our stack overflow handler which may get triggered if we run
// out of stack.
let _handler = stack_overflow::Handler::new();
......
......@@ -40,25 +40,17 @@ fn drop(&mut self) {
target_os = "netbsd",
target_os = "openbsd"))]
mod imp {
use sys_common::stack;
use super::Handler;
use rt::util::report_overflow;
use mem;
use ptr;
use intrinsics;
use sys::c::{siginfo, sigaction, SIGBUS, SIG_DFL,
SA_SIGINFO, SA_ONSTACK, sigaltstack,
SIGSTKSZ, sighandler_t, raise};
SIGSTKSZ, sighandler_t};
use libc;
use libc::funcs::posix88::mman::{mmap, munmap};
use libc::funcs::posix01::signal::signal;
use libc::consts::os::posix88::{SIGSEGV,
PROT_READ,
PROT_WRITE,
MAP_PRIVATE,
MAP_ANON,
MAP_FAILED};
use libc::{SIGSEGV, PROT_READ, PROT_WRITE, MAP_PRIVATE, MAP_ANON};
use libc::MAP_FAILED;
use sys_common::thread_info;
......@@ -66,46 +58,48 @@ mod imp {
// This is initialized in init() and only read from after
static mut PAGE_SIZE: usize = 0;
#[no_stack_check]
// Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
// (unmapped pages) at the end of every thread's stack, so if a thread ends
// up running into the guard page it'll trigger this handler. We want to
// detect these cases and print out a helpful error saying that the stack
// has overflowed. All other signals, however, should go back to what they
// were originally supposed to do.
//
// This handler currently exists purely to print an informative message
// whenever a thread overflows its stack. When run the handler always
// un-registers itself after running and then returns (to allow the original
// signal to be delivered again). By returning we're ensuring that segfaults
// do indeed look like segfaults.
//
// Returning from this kind of signal handler is technically not defined to
// work when reading the POSIX spec strictly, but in practice it turns out
// many large systems and all implementations allow returning from a signal
// handler to work. For a more detailed explanation see the comments on
// #26458.
unsafe extern fn signal_handler(signum: libc::c_int,
info: *mut siginfo,
_data: *mut libc::c_void) {
// We can not return from a SIGSEGV or SIGBUS signal.
// See: https://www.gnu.org/software/libc/manual/html_node/Handler-Returns.html
unsafe fn term(signum: libc::c_int) -> ! {
use core::mem::transmute;
signal(signum, transmute(SIG_DFL));
raise(signum);
intrinsics::abort();
}
// We're calling into functions with stack checks
stack::record_sp_limit(0);
info: *mut siginfo,
_data: *mut libc::c_void) {
let guard = thread_info::stack_guard().unwrap_or(0);
let addr = (*info).si_addr as usize;
if guard == 0 || addr < guard - PAGE_SIZE || addr >= guard {
term(signum);
// If the faulting address is within the guard page, then we print a
// message saying so.
if guard != 0 && guard - PAGE_SIZE <= addr && addr < guard {
report_overflow();
}
report_overflow();
// Unregister ourselves by reverting back to the default behavior.
let mut action: sigaction = mem::zeroed();
action.sa_sigaction = SIG_DFL;
sigaction(signum, &action, ptr::null_mut());
intrinsics::abort()
// See comment above for why this function returns.
}
static mut MAIN_ALTSTACK: *mut libc::c_void = 0 as *mut libc::c_void;
pub unsafe fn init() {
let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
if psize == -1 {
panic!("failed to get page size");
}
PAGE_SIZE = psize as usize;
PAGE_SIZE = ::sys::os::page_size();
let mut action: sigaction = mem::zeroed();
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
......
......@@ -23,7 +23,6 @@
use sys::os;
use time::Duration;
use sys_common::stack::RED_ZONE;
use sys_common::thread::*;
pub struct Thread {
......@@ -43,8 +42,7 @@ pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
let mut attr: libc::pthread_attr_t = mem::zeroed();
assert_eq!(pthread_attr_init(&mut attr), 0);
// Reserve room for the red zone, the runtime's stack of last resort.
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr));
let stack_size = cmp::max(stack, min_stack_size(&attr));
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
0 => {}
n => {
......@@ -72,7 +70,6 @@ pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
Ok(Thread { id: native })
};
#[no_stack_check]
extern fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
unsafe { start_thread(main); }
0 as *mut _
......
......@@ -78,6 +78,10 @@
pub const TOKEN_ADJUST_PRIVILEGES: libc::DWORD = 0x0020;
pub const SE_PRIVILEGE_ENABLED: libc::DWORD = 2;
pub const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
pub const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct WSADATA {
......@@ -327,6 +331,24 @@ pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
pub ReparseTarget: libc::WCHAR,
}
#[repr(C)]
pub struct EXCEPTION_RECORD {
pub ExceptionCode: DWORD,
pub ExceptionFlags: DWORD,
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ExceptionAddress: LPVOID,
pub NumberParameters: DWORD,
pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS]
}
#[repr(C)]
pub struct EXCEPTION_POINTERS {
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ContextRecord: LPVOID
}
pub type PVECTORED_EXCEPTION_HANDLER = extern "system"
fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
#[link(name = "ws2_32")]
#[link(name = "userenv")]
......@@ -487,6 +509,9 @@ pub fn AdjustTokenPrivileges(TokenHandle: libc::HANDLE,
BufferLength: libc::DWORD,
PreviousState: PTOKEN_PRIVILEGES,
ReturnLength: *mut libc::DWORD) -> libc::BOOL;
pub fn AddVectoredExceptionHandler(FirstHandler: ULONG,
VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
-> LPVOID;
}
// Functions that aren't available on Windows XP, but we still use them and just
......
......@@ -8,108 +8,44 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(stage0)]
use core::prelude::v1::*;
use libc::types::os::arch::extra::{LPVOID, DWORD, LONG};
use libc;
use mem;
use ptr;
use libc::{self, LONG};
use rt::util::report_overflow;
use sys::c;
use sys_common::stack;
pub struct Handler {
_data: *mut libc::c_void
}
pub struct Handler;
impl Handler {
pub unsafe fn new() -> Handler {
make_handler()
// This API isn't available on XP, so don't panic in that case and just
// pray it works out ok.
if c::SetThreadStackGuarantee(&mut 0x5000) == 0 {
if libc::GetLastError() as u32 != libc::ERROR_CALL_NOT_IMPLEMENTED as u32 {
panic!("failed to reserve stack space for exception handling");
}
}
Handler
}
}
impl Drop for Handler {
fn drop(&mut self) {}
}
// This is initialized in init() and only read from after
static mut PAGE_SIZE: usize = 0;
#[no_stack_check]
extern "system" fn vectored_handler(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG {
extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS)
-> LONG {
unsafe {
let rec = &(*(*ExceptionInfo).ExceptionRecord);
let code = rec.ExceptionCode;
if code != EXCEPTION_STACK_OVERFLOW {
return EXCEPTION_CONTINUE_SEARCH;
if code == c::EXCEPTION_STACK_OVERFLOW {
report_overflow();
}
// We're calling into functions with stack checks,
// however stack checks by limit should be disabled on Windows
stack::record_sp_limit(0);
report_overflow();
EXCEPTION_CONTINUE_SEARCH
c::EXCEPTION_CONTINUE_SEARCH
}
}
pub unsafe fn init() {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
PAGE_SIZE = info.dwPageSize as usize;
if AddVectoredExceptionHandler(0, vectored_handler) == ptr::null_mut() {
if c::AddVectoredExceptionHandler(0, vectored_handler).is_null() {
panic!("failed to install exception handler");
}
mem::forget(make_handler());
}
pub unsafe fn cleanup() {
}
pub unsafe fn make_handler() -> Handler {
// This API isn't available on XP, so don't panic in that case and just pray
// it works out ok.
if c::SetThreadStackGuarantee(&mut 0x5000) == 0 {
if libc::GetLastError() as u32 != libc::ERROR_CALL_NOT_IMPLEMENTED as u32 {
panic!("failed to reserve stack space for exception handling");
}
}
Handler { _data: 0 as *mut libc::c_void }
}
#[repr(C)]
pub struct EXCEPTION_RECORD {
pub ExceptionCode: DWORD,
pub ExceptionFlags: DWORD,
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ExceptionAddress: LPVOID,
pub NumberParameters: DWORD,
pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS]
// Set the thread stack guarantee for the main thread.
let _h = Handler::new();
}
#[repr(C)]
pub struct EXCEPTION_POINTERS {
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ContextRecord: LPVOID
}
pub type PVECTORED_EXCEPTION_HANDLER = extern "system"
fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
pub type ULONG = libc::c_ulong;
const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
extern "system" {
fn AddVectoredExceptionHandler(FirstHandler: ULONG,
VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
-> LPVOID;
}
pub unsafe fn cleanup() {}
......@@ -11,14 +11,12 @@
use prelude::v1::*;
use alloc::boxed::FnBox;
use cmp;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::stack::RED_ZONE;
use sys_common::thread::*;
use time::Duration;
......@@ -36,11 +34,9 @@ pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// For now, the only requirement is that it's big enough to hold the
// red zone. Round up to the next 64 kB because that's what the NT
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
// Round up to the next 64 kB because that's what the NT kernel does,
// might as well make it explicit.
let stack_size = (stack + 0xfffe) & (!0xfffe);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
......@@ -52,7 +48,6 @@ pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
Ok(Thread { handle: Handle::new(ret) })
};
#[no_stack_check]
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
......
......@@ -171,7 +171,7 @@
use rt::{self, unwind};
use sync::{Mutex, Condvar, Arc};
use sys::thread as imp;
use sys_common::{stack, thread_info};
use sys_common::thread_info;
use time::Duration;
////////////////////////////////////////////////////////////////////////////////
......@@ -298,24 +298,11 @@ unsafe fn spawn_inner<'a, T: Send>(self, f: Box<FnBox() -> T + Send + 'a>)
let my_packet = Arc::new(UnsafeCell::new(None));
let their_packet = my_packet.clone();
// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once
// this function starts executing. This raises the lower limit by a bit
// because by the time that this function is executing we've already
// consumed at least a little bit of stack (we don't know the exact byte
// address at which our stack started).
let main = move || {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const i32;
let my_stack_top = addr as usize;
let my_stack_bottom = my_stack_top - stack_size + 1024;
stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
if let Some(name) = their_thread.name() {
imp::Thread::set_name(name);
}
thread_info::set(imp::guard::current(), their_thread);
let mut output = None;
let try_result = {
let ptr = &mut output;
......
......@@ -222,6 +222,9 @@ enum Status {
("link_args", Normal),
("macro_escape", Normal),
// Not used any more, but we can't feature gate it
("no_stack_check", Normal),
("staged_api", Gated("staged_api",
"staged_api is for use by rustc only")),
("plugin", Gated("plugin",
......@@ -276,7 +279,6 @@ enum Status {
("link_section", Whitelisted),
("no_builtins", Whitelisted),
("no_mangle", Whitelisted),
("no_stack_check", Whitelisted),
("no_debug", Whitelisted),
("omit_gdb_pretty_printer_section", Whitelisted),
("unsafe_no_drop_flag", Gated("unsafe_no_drop_flag",
......
.macro func _name
.text
.align 2
.globl \_name
.type \_name, %function
\_name:
.endm
.macro endfunc _name
.size \_name, .-\_name
.endm
#include "macros.S"
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
#if defined(__APPLE__)
#define MORESTACK ___morestack
#define STACK_EXHAUSTED _rust_stack_exhausted@plt
#else
#define MORESTACK __morestack
#define STACK_EXHAUSTED rust_stack_exhausted
#endif
.global STACK_EXHAUSTED
#if defined(__APPLE__)
.private_extern MORESTACK
#else
.hidden MORESTACK
#endif
#if !defined(__APPLE__)
func MORESTACK
#endif
// FIXME(AARCH64): this might not be perfectly right but works for now
MORESTACK:
.cfi_startproc
bl STACK_EXHAUSTED
// the above function ensures that it never returns
.cfi_endproc
#if !defined(__APPLE__)
endfunc MORESTACK
#endif
#include "macros.S"
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
.code 32
.arm
#if defined(__APPLE__)
.align 2
#else
.align
#endif
#if defined(__APPLE__)
#define MORESTACK ___morestack
#define STACK_EXHAUSTED _rust_stack_exhausted
#else
#define MORESTACK __morestack
#define STACK_EXHAUSTED rust_stack_exhausted
#endif
.global STACK_EXHAUSTED
.global MORESTACK
// Unfortunately LLVM yet doesn't support emitting correct debug
// DWARF information for non-ELF targets so to make it compile
// on iOS all that directives are simply commented out
#if defined(__APPLE__)
#define UNWIND @
#else
#define UNWIND
#endif
#if defined(__APPLE__)
.private_extern MORESTACK
#else
.hidden MORESTACK
#endif
#if !defined(__APPLE__)
.type MORESTACK,%function
#endif
// r4 and r5 are scratch registers for __morestack due to llvm
// ARMFrameLowering::adjustForSegmentedStacks() implementation.
MORESTACK:
UNWIND .fnstart
// Save frame pointer and return address
UNWIND .save {r4, r5}
UNWIND .save {lr}
UNWIND .save {r6, fp, lr}
push {r6, fp, lr}
UNWIND .movsp r6
mov r6, sp
UNWIND .setfp fp, sp, #4
add fp, sp, #4
// Save argument registers of the original function
push {r0, r1, r2, r3, lr}
// Create new stack
bl STACK_EXHAUSTED@plt
// the above function ensures that it never returns
UNWIND .fnend
// Do not compile anything here for iOS because split stacks
// are disabled at all and do not need any runtime support.
//
// See also comments in librustrt/stack.rs about why it was
// disabled and how it could be implemented in case of need.
#if !defined(__APPLE__)
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
.text
.code 32
.arm
.align
#define RECORD_SP_LIMIT record_sp_limit
#define GET_SP_LIMIT get_sp_limit
.globl RECORD_SP_LIMIT
.globl GET_SP_LIMIT
RECORD_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // ANDROID
str r0, [r3]
mov pc, lr
GET_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // __ANDROID__
ldr r0, [r3]
mov pc, lr
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
.code 32
.arm
#if defined(__APPLE__)
.align 2
#else
.align
#endif
#if defined(__APPLE__)
#define MORESTACK ___morestack
#define STACK_EXHAUSTED _rust_stack_exhausted
#else
#define MORESTACK __morestack
#define STACK_EXHAUSTED rust_stack_exhausted
#endif
.global STACK_EXHAUSTED
.global MORESTACK
// Unfortunately LLVM yet doesn't support emitting correct debug
// DWARF information for non-ELF targets so to make it compile
// on iOS all that directives are simply commented out
#if defined(__APPLE__)
#define UNWIND @
#else
#define UNWIND
#endif
#if defined(__APPLE__)
.private_extern MORESTACK
#else
.hidden MORESTACK
#endif
#if !defined(__APPLE__)
.type MORESTACK,%function
#endif
// r4 and r5 are scratch registers for __morestack due to llvm
// ARMFrameLowering::adjustForSegmentedStacks() implementation.
MORESTACK:
UNWIND .fnstart
// Save frame pointer and return address
UNWIND .save {r4, r5}
UNWIND .save {lr}
UNWIND .save {r6, fp, lr}
push {r6, fp, lr}
UNWIND .movsp r6
mov r6, sp
UNWIND .setfp fp, sp, #4
add fp, sp, #4
// Save argument registers of the original function
push {r0, r1, r2, r3, lr}
// Create new stack
bl STACK_EXHAUSTED@plt
// the above function ensures that it never returns
UNWIND .fnend
// Do not compile anything here for iOS because split stacks
// are disabled at all and do not need any runtime support.
//
// See also comments in librustrt/stack.rs about why it was
// disabled and how it could be implemented in case of need.
#if !defined(__APPLE__)
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
.text
.code 32
.arm
.align
#define RECORD_SP_LIMIT record_sp_limit
#define GET_SP_LIMIT get_sp_limit
.globl RECORD_SP_LIMIT
.globl GET_SP_LIMIT
RECORD_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // ANDROID
str r0, [r3]
mov pc, lr
GET_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // __ANDROID__
ldr r0, [r3]
mov pc, lr
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
.code 32
.arm
#if defined(__APPLE__)
.align 2
#else
.align
#endif
#if defined(__APPLE__)
#define MORESTACK ___morestack
#define STACK_EXHAUSTED _rust_stack_exhausted
#else
#define MORESTACK __morestack
#define STACK_EXHAUSTED rust_stack_exhausted
#endif
.global STACK_EXHAUSTED
.global MORESTACK
// Unfortunately LLVM yet doesn't support emitting correct debug
// DWARF information for non-ELF targets so to make it compile
// on iOS all that directives are simply commented out
#if defined(__APPLE__)
#define UNWIND @
#else
#define UNWIND
#endif
#if defined(__APPLE__)
.private_extern MORESTACK
#else
.hidden MORESTACK
#endif
#if !defined(__APPLE__)
.type MORESTACK,%function
#endif
// r4 and r5 are scratch registers for __morestack due to llvm
// ARMFrameLowering::adjustForSegmentedStacks() implementation.
MORESTACK:
UNWIND .fnstart
// Save frame pointer and return address
UNWIND .save {r4, r5}
UNWIND .save {lr}
UNWIND .save {r6, fp, lr}
push {r6, fp, lr}
UNWIND .movsp r6
mov r6, sp
UNWIND .setfp fp, sp, #4
add fp, sp, #4
// Save argument registers of the original function
push {r0, r1, r2, r3, lr}
// Create new stack
bl STACK_EXHAUSTED@plt
// the above function ensures that it never returns
UNWIND .fnend
// Do not compile anything here for iOS because split stacks
// are disabled at all and do not need any runtime support.
//
// See also comments in librustrt/stack.rs about why it was
// disabled and how it could be implemented in case of need.
#if !defined(__APPLE__)
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
.text
.code 32
.arm
.align
#define RECORD_SP_LIMIT record_sp_limit
#define GET_SP_LIMIT get_sp_limit
.globl RECORD_SP_LIMIT
.globl GET_SP_LIMIT
RECORD_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // ANDROID
str r0, [r3]
mov pc, lr
GET_SP_LIMIT:
// First, try to read TLS address from coprocessor
mrc p15, #0, r3, c13, c0, #3
cmp r3, #0
// Otherwise, try to read from magic address 0xFFFF0FF0
mvneq r3, #0xF000
ldreq r3, [r3, #-15]
#if __ANDROID__
add r3, r3, #252
#elif __linux__
add r3, r3, #4
#endif // __ANDROID__
ldr r0, [r3]
mov pc, lr
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
/*
__morestack
This function is normally used to implement stack growth using the
mechanism devised by Ian Lance Taylor for gccgo, described here:
http://gcc.gnu.org/wiki/SplitStacks
Each Rust function contains an LLVM-generated prologue that compares the
stack space required for the current function to the space remaining in
the current stack segment, maintained in a platform-specific TLS slot.
The stack limit is strategically maintained by the Rust runtime so that
it is always in place whenever a Rust function is running.
In Rust, however, we currently do not use __morestack for stack growth
purposes. Rather each task has one large stack segment. When this
__morestack function is run, we interpret this as a "stack overflow"
event rather than an event requiring an allocation of a new stack.
In the early days, this implementation did indeed have all of the fiddly
bits in order to manage split stacks in the sense of always growing
stacks. For posterity, the implementation can be found at commit
c8e77d5586aed50821e0b9361b2e24c96ade816c if we ever need to refer back
to it.
-- The __morestack calling convention --
For reasons of efficiency the __morestack calling convention
is bizarre. The calling function does not attempt to align the
stack for the call, and on x86_64 the arguments to __morestack
are passed in scratch registers in order to preserve the
original function's arguments.
Once __morestack has switched to the new stack, instead of
returning, it then calls into the original function, resuming
execution at the instruction following the call to
__morestack. Thus, when the original function returns it
actually returns to __morestack, which then deallocates the
stack and returns again to the original function's caller.
-- Unwinding --
All this trickery causes hell when it comes time for the
unwinder to navigate it's way through this function. What
will happen is the original function will be unwound first
without any special effort, then the unwinder encounters
the __morestack frame, which is sitting just above a
tiny fraction of a frame (containing just a return pointer
and, on 32-bit, the arguments to __morestack).
We deal with this by claiming that little bit of stack
is actually part of the __morestack frame, encoded as
DWARF call frame instructions (CFI) by .cfi assembler
pseudo-ops.
One final complication (that took me a week to figure out)
is that OS X 10.6+ uses its own 'compact unwind info',
an undocumented format generated by the linker from
the DWARF CFI. This compact unwind info doesn't correctly
capture the nuance of the __morestack frame, so we need to
prevent the linker from attempting to convert its DWARF unwind
information.
*/
.text
#if defined(__APPLE__)
#define MORESTACK ___morestack
#define EXHAUSTED _rust_stack_exhausted
#else
#if defined(__linux__) || defined(__FreeBSD__)
#define MORESTACK __morestack
#define EXHAUSTED rust_stack_exhausted@plt
#else
#define MORESTACK ___morestack
#define EXHAUSTED _rust_stack_exhausted
#endif
#endif
.globl MORESTACK
// FIXME: What about __WIN32__?
#if defined(__linux__) || defined(__FreeBSD__)
.hidden MORESTACK
#else
#if defined(__APPLE__)
.private_extern MORESTACK
#endif
#endif
#ifdef __ELF__
.type MORESTACK,@function
#endif
MORESTACK:
.cfi_startproc
// This base pointer setup differs from most in that we are
// telling the unwinder to consider the Canonical Frame
// Address (CFA) for this frame to be the value of the stack
// pointer prior to entry to the original function, whereas
// the CFA would typically be the value of the stack
// pointer prior to entry to this function. This will allow
// the unwinder to understand how to skip the tiny partial
// frame that the original function created by calling
// __morestack.
// In practical terms, our CFA is 12 bytes greater than it
// would normally be, accounting for the two arguments to
// __morestack, and an extra return address.
// FIXME(#9854) these cfi directives don't work on windows.
pushl %ebp
#if defined(__APPLE__)
// The pattern of the return address being saved twice to the same location
// tells the OS X linker that it should not attempt to convert the DWARF
// unwind information to the compact format.
.cfi_offset %eip, -4
.cfi_offset %eip, -4
#endif
// The CFA is 20 bytes above the register that it is
// associated with for this frame (which will be %ebp)
.cfi_def_cfa_offset 20
// %ebp is -20 bytes from the CFA
.cfi_offset %ebp, -20
movl %esp, %ebp
// Calculate the CFA as an offset from %ebp
.cfi_def_cfa_register %ebp
// re-align the stack
subl $12,%esp
call EXHAUSTED
// the exhaustion function guarantees that it can't return
.cfi_endproc
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
.globl rust_stack_exhausted
.globl __morestack
.hidden __morestack
.cfi_startproc
.set nomips16
.ent __morestack
__morestack:
.set noreorder
.set nomacro
addiu $29, $29, -4
sw $30, 0($29)
// 16 = 4 (current) + 12 (previous)
.cfi_def_cfa_offset 16
.cfi_offset 31, -4
.cfi_offset 30, -16
move $30, $29
.cfi_def_cfa_register 30
// O32 ABI always reserves 16 bytes for arguments
addiu $29, $29, -16
lw $25, %call16(rust_stack_exhausted)($28)
jalr $25
nop
// the above function make sure that we never get here
.end __morestack
.cfi_endproc
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
.text
.globl record_sp_limit
.align 2
.set nomips16
.ent record_sp_limit
record_sp_limit:
.set noreorder
.set nomacro
.set push
.set mips32r2
rdhwr $3, $29
.set pop
addiu $3, $3, -0x7004
sw $4, 0($3)
jr $31
nop
.end record_sp_limit
.globl get_sp_limit
.align 2
.set nomips16
.ent get_sp_limit
get_sp_limit:
.set noreorder
.set nomacro
.set push
.set mips32r2
rdhwr $3, $29
.set pop
addiu $3, $3, -0x7004
lw $2, 0($3)
jr $31
nop
.end get_sp_limit
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
.globl rust_stack_exhausted
.globl __morestack
.hidden __morestack
.cfi_startproc
.set nomips16
.ent __morestack
__morestack:
.set noreorder
.set nomacro
addiu $29, $29, -4
sw $30, 0($29)
// 16 = 4 (current) + 12 (previous)
.cfi_def_cfa_offset 16
.cfi_offset 31, -4
.cfi_offset 30, -16
move $30, $29
.cfi_def_cfa_register 30
// O32 ABI always reserves 16 bytes for arguments
addiu $29, $29, -16
lw $25, %call16(rust_stack_exhausted)($28)
jalr $25
nop
// the above function make sure that we never get here
.end __morestack
.cfi_endproc
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
.text
.globl record_sp_limit
.align 2
.set nomips16
.ent record_sp_limit
record_sp_limit:
.set noreorder
.set nomacro
.set push
.set mips32r2
rdhwr $3, $29
.set pop
addiu $3, $3, -0x7004
sw $4, 0($3)
jr $31
nop
.end record_sp_limit
.globl get_sp_limit
.align 2
.set nomips16
.ent get_sp_limit
get_sp_limit:
.set noreorder
.set nomacro
.set push
.set mips32r2
rdhwr $3, $29
.set pop
addiu $3, $3, -0x7004
lw $2, 0($3)
jr $31
nop
.end get_sp_limit
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.global rust_stack_exhausted
.hidden __morestack
// FIXME(POWERPC): this might not be perfectly right but works for now
__morestack:
.cfi_startproc
bl rust_stack_exhausted
// the above function ensures that it never returns
.cfi_endproc
.end __morestack
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
/* See i386/morestack.S for the lengthy, general explanation. */
.text
#if defined(__APPLE__)
#define MORESTACK ___morestack
#else
#define MORESTACK __morestack
#endif
#if defined(__APPLE__)
#define EXHAUSTED _rust_stack_exhausted
#elif defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__Bitrig__)
#define EXHAUSTED rust_stack_exhausted@PLT
#else
#define EXHAUSTED rust_stack_exhausted
#endif
#if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__Bitrig__)
.hidden MORESTACK
#else
#if defined(__APPLE__)
.private_extern MORESTACK
#endif
#endif
#ifdef __ELF__
.type MORESTACK,@function
#endif
.globl MORESTACK
MORESTACK:
.cfi_startproc
pushq %rbp
// The CFA is 24 bytes above the register that it will
// be associated with for this frame (%rbp). That is 8
// bytes greater than a normal frame, to allow the unwinder
// to skip the partial frame of the original function.
.cfi_def_cfa_offset 24
#if defined(__APPLE__)
// The pattern of the return address being saved twice to the same location
// tells the OS X linker that it should not attempt to convert the DWARF
// unwind information to the compact format.
.cfi_offset %rip, -8
.cfi_offset %rip, -8
#endif
// %rbp is -24 bytes from the CFA
.cfi_offset %rbp, -24
movq %rsp, %rbp
// Calculate the CFA as on offset from %ebp
.cfi_def_cfa_register %rbp
// re-align the stack
subq $8, %rsp
// kill this program
call EXHAUSTED
// the exhaustion function guarantees that it can't return
.cfi_endproc
// Mark stack as non-executable
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", @progbits
#endif
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
......@@ -7,18 +7,3 @@
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type="lib"]
extern {
// Prevents optimizing away the stack buffer.
// This symbol is undefined, but the code doesn't need to pass
// the linker.
fn black_box(ptr: *const u8);
}
pub unsafe fn foo() {
// Make sure we use the stack
let x: [u8; 50] = [0; 50];
black_box(x.as_ptr());
}
......@@ -16,15 +16,6 @@ using namespace llvm;
using namespace llvm::sys;
using namespace llvm::object;
// libmorestack is not used on other platforms
#if defined(__linux__) || defined(__APPLE__)
extern "C" void __morestack(void);
static void* morestack_addr() {
return reinterpret_cast<void*>(__morestack);
}
#endif
class RustJITMemoryManager : public SectionMemoryManager
{
typedef SectionMemoryManager Base;
......@@ -35,13 +26,6 @@ class RustJITMemoryManager : public SectionMemoryManager
uint64_t getSymbolAddress(const std::string &Name) override
{
#if defined(__linux__) || defined(__APPLE__)
if (Name == "__morestack" || Name == "___morestack")
return reinterpret_cast<uint64_t>(__morestack);
if (Name == "__morestack_addr" || Name == "___morestack_addr")
return reinterpret_cast<uint64_t>(morestack_addr);
#endif
return Base::getSymbolAddress(Name);
}
};
......
......@@ -15,9 +15,6 @@
extern crate core;
extern crate libc;
#[lang = "stack_exhausted"]
extern fn stack_exhausted() {}
#[lang = "eh_personality"]
extern fn eh_personality() {}
......
......@@ -19,7 +19,6 @@ fn main() {
let x = box 1i32;
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "eh_unwind_resume"] extern fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
......@@ -10,7 +10,6 @@
// aux-build:weak-lang-items.rs
// error-pattern: language item required, but not found: `panic_fmt`
// error-pattern: language item required, but not found: `stack_exhausted`
// error-pattern: language item required, but not found: `eh_personality`
#![feature(no_std)]
......
......@@ -17,7 +17,6 @@
#[no_mangle]
pub extern fn bar() {}
#[lang = "stack_exhausted"] fn stack_exhausted() {}
#[lang = "eh_personality"] fn eh_personality() {}
#[lang = "eh_unwind_resume"] fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
......@@ -17,7 +17,6 @@
#[no_mangle]
pub extern fn foo() {}
#[lang = "stack_exhausted"] fn stack_exhausted() {}
#[lang = "eh_personality"] fn eh_personality() {}
#[lang = "eh_unwind_resume"] fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
......
-include ../tools.mk
ifndef IS_WINDOWS
SKIP_OS := 'OpenBSD Bitrig'
ifneq ($(UNAME),$(findstring $(UNAME),$(SKIP_OS)))
all:
$(RUSTC) -O --emit asm attr.rs
! grep -q morestack $(TMPDIR)/attr.s
$(RUSTC) -O --emit asm flag.rs
grep -q morestack $(TMPDIR)/flag.s
$(RUSTC) -O --emit asm -C no-stack-check flag.rs
! grep -q morestack $(TMPDIR)/flag.s
else
# On Bitrig/OpenBSD, morestack isn't used as the segmented stacks are disabled
all:
endif
else
# On Windows we use __chkstk and it only appears in functions with large allocations,
# so this test wouldn't be reliable.
all:
endif
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type="lib"]
extern {
// Prevents optimizing away the stack buffer.
// This symbol is undefined, but the code doesn't need to pass
// the linker.
fn black_box(ptr: *const u8);
}
#[no_stack_check]
pub unsafe fn foo() {
// Make sure we use the stack
let x: [u8; 50] = [0; 50];
black_box(x.as_ptr());
}
......@@ -6,6 +6,3 @@ all:
$(RUSTC) foo.rs --target=my-incomplete-platform.json 2>&1 | grep 'Field llvm-target'
RUST_TARGET_PATH=. $(RUSTC) foo.rs --target=my-awesome-platform --crate-type=lib --emit=asm
RUST_TARGET_PATH=. $(RUSTC) foo.rs --target=x86_64-unknown-linux-gnu --crate-type=lib --emit=asm
# The built-in target *should* override the one we have here, and thus we
# should have morestack
grep -q morestack < $(TMPDIR)/foo.s
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// ignore-freebsd
// ignore-ios
// ignore-dragonfly
// ignore-bitrig
// ignore-musl
#![feature(asm)]
use std::process::Command;
use std::env;
use std::thread;
// lifted from the test module
// Inlining to avoid llvm turning the recursive functions into tail calls,
// which doesn't consume stack.
#[inline(always)]
#[no_stack_check]
pub fn black_box<T>(dummy: T) { unsafe { asm!("" : : "r"(&dummy)) } }
#[no_stack_check]
fn recurse() {
let buf = [0; 10];
black_box(buf);
recurse();
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "recurse" {
thread::spawn(recurse).join();
} else {
let recurse = Command::new(&args[0]).arg("recurse").output().unwrap();
assert!(!recurse.status.success());
let error = String::from_utf8_lossy(&recurse.stderr);
println!("wut");
println!("`{}`", error);
assert!(error.contains("has overflowed its stack"));
}
}
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// ignore-linux
// ignore-freebsd
// ignore-ios
// ignore-dragonfly
// ignore-bitrig
#![feature(asm)]
use std::process::Command;
use std::env;
// lifted from the test module
// Inlining to avoid llvm turning the recursive functions into tail calls,
// which doesn't consume stack.
#[inline(always)]
#[no_stack_check]
pub fn black_box<T>(dummy: T) { unsafe { asm!("" : : "r"(&dummy)) } }
#[no_stack_check]
fn recurse() {
let buf = [0; 10];
black_box(buf);
recurse();
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "recurse" {
recurse();
} else {
let recurse = Command::new(&args[0]).arg("recurse").output().unwrap();
assert!(!recurse.status.success());
let error = String::from_utf8_lossy(&recurse.stderr);
assert!(error.contains("has overflowed its stack"));
}
}
......@@ -13,8 +13,9 @@
#![feature(asm)]
use std::process::Command;
use std::env;
use std::process::Command;
use std::thread;
// lifted from the test module
// Inlining to avoid llvm turning the recursive functions into tail calls,
......@@ -23,7 +24,7 @@
pub fn black_box<T>(dummy: T) { unsafe { asm!("" : : "r"(&dummy)) } }
fn silent_recurse() {
let buf = [0; 1000];
let buf = [0u8; 1000];
black_box(buf);
silent_recurse();
}
......@@ -40,15 +41,31 @@ fn main() {
silent_recurse();
} else if args.len() > 1 && args[1] == "loud" {
loud_recurse();
} else if args.len() > 1 && args[1] == "silent-thread" {
thread::spawn(silent_recurse).join();
} else if args.len() > 1 && args[1] == "loud-thread" {
thread::spawn(loud_recurse).join();
} else {
let silent = Command::new(&args[0]).arg("silent").output().unwrap();
assert!(!silent.status.success());
let error = String::from_utf8_lossy(&silent.stderr);
assert!(error.contains("has overflowed its stack"));
let loud = Command::new(&args[0]).arg("loud").output().unwrap();
assert!(!loud.status.success());
let error = String::from_utf8_lossy(&silent.stderr);
assert!(error.contains("has overflowed its stack"));
let mut modes = vec![
"silent-thread",
"loud-thread",
];
// On linux it looks like the main thread can sometimes grow its stack
// basically without bounds, so we only test the child thread cases
// there.
if !cfg!(target_os = "linux") {
modes.push("silent");
modes.push("loud");
}
for mode in modes {
println!("testing: {}", mode);
let silent = Command::new(&args[0]).arg(mode).output().unwrap();
assert!(!silent.status.success());
let error = String::from_utf8_lossy(&silent.stderr);
assert!(error.contains("has overflowed its stack"),
"missing overflow message: {}", error);
}
}
}
......@@ -8,8 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(libc)]
use std::process::Command;
extern crate libc;
use std::process::{Command, ExitStatus};
use std::env;
fn main() {
......@@ -18,8 +21,12 @@ fn main() {
unsafe { *(0 as *mut isize) = 1 }; // trigger a segfault
} else {
let segfault = Command::new(&args[0]).arg("segfault").output().unwrap();
let stderr = String::from_utf8_lossy(&segfault.stderr);
let stdout = String::from_utf8_lossy(&segfault.stdout);
println!("stdout: {}", stdout);
println!("stderr: {}", stderr);
println!("status: {}", segfault.status);
assert!(!segfault.status.success());
let error = String::from_utf8_lossy(&segfault.stderr);
assert!(!error.contains("has overflowed its stack"));
assert!(!stderr.contains("has overflowed its stack"));
}
}
......@@ -20,13 +20,11 @@
extern { fn puts(s: *const u8); }
extern "rust-intrinsic" { fn transmute<T, U>(t: T) -> U; }
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "eh_unwind_resume"] extern fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
#[start]
#[no_stack_check]
fn main(_: isize, _: *const *const u8) -> isize {
unsafe {
let (ptr, _): (*const u8, usize) = transmute("Hello!\0");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册