提交 0b64903a 编写于 作者: R robm

Merge

......@@ -98,7 +98,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
}
pop(rax);
#endif
reset_last_Java_frame(thread, true, align_stack);
reset_last_Java_frame(thread, true);
// discard thread and arguments
NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
......@@ -882,7 +882,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
}
__ pop(rax);
#endif
__ reset_last_Java_frame(thread, true, false);
__ reset_last_Java_frame(thread, true);
#ifndef _LP64
__ pop(rcx); // discard thread arg
__ pop(rcx); // discard dummy
......
......@@ -370,13 +370,16 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
assert(!entry_frame_is_first(), "next Java fp must be non zero");
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
// Since we are walking the stack now this nested anchor is obviously walkable
// even if it wasn't when it was stacked.
if (!jfa->walkable()) {
// Capture _last_Java_pc (if needed) and mark anchor walkable.
jfa->capture_last_Java_pc();
}
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL ) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
return fr;
}
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
assert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
return fr;
}
......@@ -714,3 +717,21 @@ frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
#endif
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
// last frame set?
if (last_Java_sp() == NULL) return;
// already walkable?
if (walkable()) return;
assert(Thread::current() == (Thread*)thread, "not current thread");
assert(last_Java_sp() != NULL, "not called from Java code?");
assert(last_Java_pc() == NULL, "already walkable");
capture_last_Java_pc();
assert(walkable(), "something went wrong");
}
void JavaFrameAnchor::capture_last_Java_pc() {
assert(_last_Java_sp != NULL, "no last frame set");
assert(_last_Java_pc == NULL, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1];
}
......@@ -96,6 +96,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// call a specialized frame constructor instead of this one.
// Then we could use the assert below. However this assert is of somewhat dubious
// value.
// UPDATE: this constructor is only used by trace_method_handle_stub() now.
// assert(_pc != NULL, "no pc?");
_cb = CodeCache::find_blob(_pc);
......
......@@ -62,10 +62,9 @@ public:
_last_Java_sp = src->_last_Java_sp;
}
// Always walkable
bool walkable(void) { return true; }
// Never any thing to do since we are always walkable and can find address of return addresses
void make_walkable(JavaThread* thread) { }
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
void make_walkable(JavaThread* thread);
void capture_last_Java_pc(void);
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
......
......@@ -748,8 +748,7 @@ void MacroAssembler::pushptr(AddressLiteral src) {
}
}
void MacroAssembler::reset_last_Java_frame(bool clear_fp,
bool clear_pc) {
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// we must set sp to zero to clear frame
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
// must clear fp, so that compiled frames are not confused; it is
......@@ -758,9 +757,8 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp,
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
}
if (clear_pc) {
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
}
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
......@@ -2561,7 +2559,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
}
// reset last Java frame
// Only interpreter should have to clear fp
reset_last_Java_frame(java_thread, true, false);
reset_last_Java_frame(java_thread, true);
#ifndef CC_INTERP
// C++ interp handles this in the interpreter
......@@ -3808,7 +3806,7 @@ void MacroAssembler::push_IU_state() {
pusha();
}
void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) {
// determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
......@@ -3820,8 +3818,8 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp,
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
}
if (clear_pc)
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
}
......
......@@ -289,10 +289,10 @@ class MacroAssembler: public Assembler {
Register last_java_fp,
address last_java_pc);
void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
void reset_last_Java_frame(Register thread, bool clear_fp);
// thread in the default location (r15_thread on 64bit)
void reset_last_Java_frame(bool clear_fp, bool clear_pc);
void reset_last_Java_frame(bool clear_fp);
// Stores
void store_check(Register obj); // store check for obj - register is destroyed afterwards
......
......@@ -116,7 +116,7 @@ void OptoRuntime::generate_exception_blob() {
// No registers to map, rbp is known implicitly
oop_maps->add_gc_map( __ pc() - start, new OopMap( framesize, 0 ));
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
__ reset_last_Java_frame(rcx, false);
// Restore callee-saved registers
__ movptr(rbp, Address(rsp, rbp_off * wordSize));
......
......@@ -1333,7 +1333,7 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
__ increment(rsp, wordSize);
__ get_thread(thread);
__ reset_last_Java_frame(thread, false, true);
__ reset_last_Java_frame(thread, false);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
......@@ -2251,7 +2251,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We can finally stop using that last_Java_frame we setup ages ago
__ reset_last_Java_frame(thread, false, true);
__ reset_last_Java_frame(thread, false);
// Unpack oop result
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
......@@ -2951,7 +2951,7 @@ void SharedRuntime::generate_deopt_blob() {
__ pop(rcx);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
__ reset_last_Java_frame(rcx, false);
// Load UnrollBlock into EDI
__ mov(rdi, rax);
......@@ -3117,7 +3117,7 @@ void SharedRuntime::generate_deopt_blob() {
__ push(rax);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
__ reset_last_Java_frame(rcx, false);
// Collect return values
__ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
......@@ -3219,7 +3219,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
__ reset_last_Java_frame(rcx, false);
// Load UnrollBlock into EDI
__ movptr(rdi, rax);
......@@ -3331,7 +3331,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
__ get_thread(rdi);
__ reset_last_Java_frame(rdi, true, false);
__ reset_last_Java_frame(rdi, true);
// Pop self-frame.
__ leave(); // Epilog!
......@@ -3426,7 +3426,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// Clear last_Java_sp again
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, false, false);
__ reset_last_Java_frame(java_thread, false);
__ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, noException);
......@@ -3501,7 +3501,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ addptr(rsp, wordSize);
// clear last_Java_sp
__ reset_last_Java_frame(thread, true, false);
__ reset_last_Java_frame(thread, true);
// check for pending exceptions
Label pending;
__ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
......
......@@ -1388,7 +1388,7 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
__ reset_last_Java_frame(false, true);
__ reset_last_Java_frame(false);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
......@@ -2497,7 +2497,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
restore_native_result(masm, ret_type, stack_slots);
}
__ reset_last_Java_frame(false, true);
__ reset_last_Java_frame(false);
// Unpack oop result
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
......@@ -3435,7 +3435,7 @@ void SharedRuntime::generate_deopt_blob() {
// find any register it might need.
oop_maps->add_gc_map(__ pc() - start, map);
__ reset_last_Java_frame(false, false);
__ reset_last_Java_frame(false);
// Load UnrollBlock* into rdi
__ mov(rdi, rax);
......@@ -3592,7 +3592,7 @@ void SharedRuntime::generate_deopt_blob() {
new OopMap( frame_size_in_words, 0 ));
// Clear fp AND pc
__ reset_last_Java_frame(true, true);
__ reset_last_Java_frame(true);
// Collect return values
__ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
......@@ -3662,7 +3662,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
oop_maps->add_gc_map(__ pc() - start, map);
__ reset_last_Java_frame(false, false);
__ reset_last_Java_frame(false);
// Load UnrollBlock* into rdi
__ mov(rdi, rax);
......@@ -3775,7 +3775,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
// Clear fp AND pc
__ reset_last_Java_frame(true, true);
__ reset_last_Java_frame(true);
// Pop self-frame.
__ leave(); // Epilog
......@@ -3858,7 +3858,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
Label noException;
__ reset_last_Java_frame(false, false);
__ reset_last_Java_frame(false);
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, noException);
......@@ -3928,7 +3928,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
// rax contains the address we are going to jump to assuming no exception got installed
// clear last_Java_sp
__ reset_last_Java_frame(false, false);
__ reset_last_Java_frame(false);
// check for pending exceptions
Label pending;
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
......@@ -4309,7 +4309,7 @@ void OptoRuntime::generate_exception_blob() {
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
__ reset_last_Java_frame(false, true);
__ reset_last_Java_frame(false);
// Restore callee-saved registers
......
......@@ -2901,7 +2901,7 @@ class StubGenerator: public StubCodeGenerator {
// however can use the register value directly if it is callee saved.
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, true, false);
__ reset_last_Java_frame(java_thread, true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
......
......@@ -3923,7 +3923,7 @@ class StubGenerator: public StubCodeGenerator {
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true, true);
__ reset_last_Java_frame(true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
......
......@@ -1289,7 +1289,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ reset_last_Java_frame(thread, true, true);
__ reset_last_Java_frame(thread, true);
// reset handle block
__ movptr(t, Address(thread, JavaThread::active_handles_offset()));
......@@ -1819,7 +1819,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ set_last_Java_frame(thread, noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
__ get_thread(thread);
__ reset_last_Java_frame(thread, true, true);
__ reset_last_Java_frame(thread, true);
// Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
......
......@@ -1262,7 +1262,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
// reset_last_Java_frame
__ reset_last_Java_frame(true, true);
__ reset_last_Java_frame(r15_thread, true);
// reset handle block
__ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
......@@ -1837,7 +1837,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// PC must point into interpreter here
__ set_last_Java_frame(noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
__ reset_last_Java_frame(true, true);
__ reset_last_Java_frame(r15_thread, true);
// Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
......
......@@ -2819,11 +2819,8 @@ extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
extern "C" JNIEXPORT void numa_error(char *where) { }
extern "C" JNIEXPORT int fork1() { return fork(); }
// If we are running with libnuma version > 2, then we should
// be trying to use symbols with versions 1.1
// If we are running with earlier version, which did not have symbol versions,
// we should use the base version.
// Handle request to load libnuma symbol version 1.1 (API v1). If it fails
// load symbol from base version instead.
void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
void *f = dlvsym(handle, name, "libnuma_1.1");
if (f == NULL) {
......@@ -2832,6 +2829,12 @@ void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
return f;
}
// Handle request to load libnuma symbol version 1.2 (API v2) only.
// Return NULL if the symbol is not defined in this particular version.
void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
return dlvsym(handle, name, "libnuma_1.2");
}
bool os::Linux::libnuma_init() {
// sched_getcpu() should be in libc.
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
......@@ -2856,6 +2859,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_tonode_memory")));
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
libnuma_dlsym(handle, "numa_interleave_memory")));
set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
libnuma_v2_dlsym(handle, "numa_interleave_memory")));
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
libnuma_dlsym(handle, "numa_set_bind_policy")));
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
......@@ -2975,6 +2980,7 @@ os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_node
os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
......
......@@ -190,6 +190,8 @@ class Linux {
static void libpthread_init();
static bool libnuma_init();
static void* libnuma_dlsym(void* handle, const char* name);
// libnuma v2 (libnuma_1.2) symbols
static void* libnuma_v2_dlsym(void* handle, const char* name);
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
......@@ -250,6 +252,8 @@ private:
typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
typedef void (*numa_set_bind_policy_func_t)(int policy);
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
typedef int (*numa_distance_func_t)(int node1, int node2);
......@@ -261,6 +265,7 @@ private:
static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory;
static numa_interleave_memory_func_t _numa_interleave_memory;
static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
static numa_distance_func_t _numa_distance;
......@@ -275,6 +280,7 @@ private:
static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
......@@ -296,7 +302,10 @@ public:
return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
}
static void numa_interleave_memory(void *start, size_t size) {
if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
// Use v2 api if available
if (_numa_interleave_memory_v2 != NULL && _numa_all_nodes_ptr != NULL) {
_numa_interleave_memory_v2(start, size, _numa_all_nodes_ptr);
} else if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
_numa_interleave_memory(start, size, _numa_all_nodes);
}
}
......
......@@ -44,7 +44,7 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than ucontext info.
if (jt->has_last_Java_frame()) {
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
......
......@@ -32,12 +32,8 @@
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
if (_anchor.last_Java_pc() != NULL) {
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
} else {
// This will pick up pc from sp
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
}
assert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
public:
......
......@@ -45,7 +45,7 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than ucontext info.
if (jt->has_last_Java_frame()) {
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
......
......@@ -32,12 +32,8 @@
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
if (_anchor.last_Java_pc() != NULL) {
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
} else {
// This will pick up pc from sp
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
}
assert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
public:
......
......@@ -442,7 +442,7 @@ int VM_Version::platform_features(int features) {
// is available to us as well
Sysinfo cpu_info(SI_CPUBRAND);
bool use_solaris_12_api = cpu_info.valid();
const char* impl;
const char* impl = "unknown";
int impl_m = 0;
if (use_solaris_12_api) {
impl = cpu_info.value();
......@@ -477,7 +477,7 @@ int VM_Version::platform_features(int features) {
kstat_close(kc);
}
}
assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl));
assert(impl_m != 0, err_msg("Unrecognized CPU implementation %s", impl));
features |= impl_m;
bool is_sun4v = (features & sun4v_m) != 0;
......
......@@ -44,9 +44,8 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr,
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// last_Java_frame is always walkable and safe use it if we have it
if (jt->has_last_Java_frame()) {
// There is small window where last_Java_frame is not walkable or safe
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
......
......@@ -30,12 +30,8 @@
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
if (_anchor.last_Java_pc() != NULL) {
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
} else {
// This will pick up pc from sp
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
}
assert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
public:
......
......@@ -47,7 +47,7 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than CONTEXT info.
if (jt->has_last_Java_frame()) {
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
......
......@@ -32,12 +32,8 @@
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
if (_anchor.last_Java_pc() != NULL) {
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
} else {
// This will pick up pc from sp
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
}
assert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
public:
......
......@@ -1530,7 +1530,7 @@ void GraphBuilder::method_return(Value x) {
ciMethod* caller = state()->scope()->method();
ciMethodData* md = caller->method_data_or_null();
ciProfileData* data = md->bci_to_data(invoke_bci);
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
// May not be true in case of an inlined call through a method handle intrinsic.
if (has_return) {
......@@ -1747,7 +1747,7 @@ Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool
start = has_receiver ? 1 : 0;
if (profile_arguments()) {
ciProfileData* data = method()->method_data()->bci_to_data(bci());
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
}
}
......@@ -4465,7 +4465,7 @@ void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m,
}
ciMethodData* md = m->method_data_or_null();
ciProfileData* data = md->bci_to_data(invoke_bci);
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
append(new ProfileReturnType(m , invoke_bci, callee, ret));
}
}
......
......@@ -3185,50 +3185,52 @@ void LIRGenerator::profile_arguments(ProfileCall* x) {
int bci = x->bci_of_invoke();
ciMethodData* md = x->method()->method_data_or_null();
ciProfileData* data = md->bci_to_data(bci);
if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
(data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
int base_offset = md->byte_offset_of_slot(data, extra);
LIR_Opr mdp = LIR_OprFact::illegalOpr;
ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
int start = 0;
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
// first argument is not profiled at call (method handle invoke)
assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
start = 1;
}
ciSignature* callee_signature = x->callee()->signature();
// method handle call to virtual method
bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
ciSignatureStream signature_at_call_stream(signature_at_call);
// if called through method handle invoke, some arguments may have been popped
for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
ciKlass* exact = profile_type(md, base_offset, off,
args->type(i), x->profiled_arg_at(i+start), mdp,
!x->arg_needs_null_check(i+start),
signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
if (exact != NULL) {
md->set_argument_type(bci, i, exact);
if (data != NULL) {
if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
(data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
int base_offset = md->byte_offset_of_slot(data, extra);
LIR_Opr mdp = LIR_OprFact::illegalOpr;
ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
int start = 0;
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
// first argument is not profiled at call (method handle invoke)
assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
start = 1;
}
}
} else {
ciSignature* callee_signature = x->callee()->signature();
// method handle call to virtual method
bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
ciSignatureStream signature_at_call_stream(signature_at_call);
// if called through method handle invoke, some arguments may have been popped
for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
ciKlass* exact = profile_type(md, base_offset, off,
args->type(i), x->profiled_arg_at(i+start), mdp,
!x->arg_needs_null_check(i+start),
signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
if (exact != NULL) {
md->set_argument_type(bci, i, exact);
}
}
} else {
#ifdef ASSERT
Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
int n = x->nb_profiled_args();
assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
(x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
"only at JSR292 bytecodes");
Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
int n = x->nb_profiled_args();
assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
(x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
"only at JSR292 bytecodes");
#endif
}
}
}
}
......@@ -3319,24 +3321,26 @@ void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
int bci = x->bci_of_invoke();
ciMethodData* md = x->method()->method_data_or_null();
ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
// The offset within the MDO of the entry to update may be too large
// to be used in load/store instructions on some platforms. So have
// profile_type() compute the address of the profile in a register.
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
ret->type(), x->ret(), mdp,
!x->needs_null_check(),
signature_at_call->return_type()->as_klass(),
x->callee()->signature()->return_type()->as_klass());
if (exact != NULL) {
md->set_return_type(bci, exact);
if (data != NULL) {
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
// The offset within the MDO of the entry to update may be too large
// to be used in load/store instructions on some platforms. So have
// profile_type() compute the address of the profile in a register.
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
ret->type(), x->ret(), mdp,
!x->needs_null_check(),
signature_at_call->return_type()->as_klass(),
x->callee()->signature()->return_type()->as_klass());
if (exact != NULL) {
md->set_return_type(bci, exact);
}
}
}
......
......@@ -391,11 +391,13 @@ void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
MethodData* mdo = get_MethodData();
if (mdo != NULL) {
ProfileData* data = mdo->bci_to_data(bci);
if (data->is_CallTypeData()) {
data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
} else {
assert(data->is_VirtualCallTypeData(), "no arguments!");
data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
if (data != NULL) {
if (data->is_CallTypeData()) {
data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
} else {
assert(data->is_VirtualCallTypeData(), "no arguments!");
data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
}
}
}
}
......@@ -413,11 +415,13 @@ void ciMethodData::set_return_type(int bci, ciKlass* k) {
MethodData* mdo = get_MethodData();
if (mdo != NULL) {
ProfileData* data = mdo->bci_to_data(bci);
if (data->is_CallTypeData()) {
data->as_CallTypeData()->set_return_type(k->get_Klass());
} else {
assert(data->is_VirtualCallTypeData(), "no arguments!");
data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
if (data != NULL) {
if (data->is_CallTypeData()) {
data->as_CallTypeData()->set_return_type(k->get_Klass());
} else {
assert(data->is_VirtualCallTypeData(), "no arguments!");
data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
}
}
}
}
......
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -55,6 +55,15 @@ const TypePtr *MemNode::adr_type() const {
return calculate_adr_type(adr->bottom_type(), cross_check);
}
bool MemNode::check_if_adr_maybe_raw(Node* adr) {
if (adr != NULL) {
if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) {
return true;
}
}
return false;
}
#ifndef PRODUCT
void MemNode::dump_spec(outputStream *st) const {
if (in(Address) == NULL) return; // node is dead
......@@ -503,6 +512,7 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
if (offset == Type::OffsetBot)
return NULL; // cannot unalias unless there are precise offsets
const bool adr_maybe_raw = check_if_adr_maybe_raw(adr);
const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr();
intptr_t size_in_bytes = memory_size();
......@@ -519,6 +529,13 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
if (st_base == NULL)
break; // inscrutable pointer
// For raw accesses it's not enough to prove that constant offsets don't intersect.
// We need the bases to be the equal in order for the offset check to make sense.
if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
break;
}
if (st_offset != offset && st_offset != Type::OffsetBot) {
const int MAX_STORE = BytesPerLong;
if (st_offset >= offset + size_in_bytes ||
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -75,6 +75,8 @@ protected:
debug_only(_adr_type=at; adr_type();)
}
static bool check_if_adr_maybe_raw(Node* adr);
public:
// Helpers for the optimizer. Documented in memnode.cpp.
static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
......
......@@ -812,6 +812,9 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t
ciMethodData* methodData = method()->method_data();
if (!methodData->is_mature()) return PROB_UNKNOWN;
ciProfileData* data = methodData->bci_to_data(bci());
if (data == NULL) {
return PROB_UNKNOWN;
}
if (!data->is_JumpData()) return PROB_UNKNOWN;
// get taken and not taken values
......@@ -903,8 +906,8 @@ float Parse::branch_prediction(float& cnt,
// of the OSR-ed method, and we want to deopt to gather more stats.
// If you have ANY counts, then this loop is simply 'cold' relative
// to the OSR loop.
if (data->as_BranchData()->taken() +
data->as_BranchData()->not_taken() == 0 ) {
if (data == NULL ||
(data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) {
// This is the only way to return PROB_UNKNOWN:
return PROB_UNKNOWN;
}
......
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -298,19 +298,41 @@ void SensorInfo::trigger(int count, TRAPS) {
Klass* k = Management::sun_management_Sensor_klass(CHECK);
instanceKlassHandle sensorKlass (THREAD, k);
Handle sensor_h(THREAD, _sensor_obj);
Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, CHECK);
Symbol* trigger_method_signature;
JavaValue result(T_VOID);
JavaCallArguments args(sensor_h);
args.push_int((int) count);
args.push_oop(usage_h);
Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, THREAD);
// Call Sensor::trigger(int, MemoryUsage) to send notification to listeners.
// When OOME occurs and fails to allocate MemoryUsage object, call
// Sensor::trigger(int) instead. The pending request will be processed
// but no notification will be sent.
if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here");
CLEAR_PENDING_EXCEPTION;
trigger_method_signature = vmSymbols::int_void_signature();
} else {
trigger_method_signature = vmSymbols::trigger_method_signature();
args.push_oop(usage_h);
}
JavaCalls::call_virtual(&result,
sensorKlass,
vmSymbols::trigger_name(),
vmSymbols::trigger_method_signature(),
trigger_method_signature,
&args,
CHECK);
THREAD);
if (HAS_PENDING_EXCEPTION) {
// We just clear the OOM pending exception that we might have encountered
// in Java's tiggerAction(), and continue with updating the counters since
// the Java counters have been updated too.
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOME here");
CLEAR_PENDING_EXCEPTION;
}
}
{
......
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8178047
* @run main/othervm -XX:CompileCommand=exclude,*.main -XX:-TieredCompilation -XX:-BackgroundCompilation compiler.unsafe.TestRawAliasing
*/
package compiler.unsafe;
import java.lang.reflect.Field;
public class TestRawAliasing {
static private final sun.misc.Unsafe UNSAFE;
static {
try {
Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
UNSAFE = (sun.misc.Unsafe) f.get(null);
} catch (Exception e) {
throw new RuntimeException("Unable to get Unsafe instance.", e);
}
}
static private final int OFFSET_X = 50;
static private final int OFFSET_Y = 100;
private static int test(long base_plus_offset_x, long base_plus_offset_y, int magic_value) {
// write 0 to a location
UNSAFE.putByte(base_plus_offset_x - OFFSET_X, (byte)0);
// write unfoldable value to really the same location with another base
UNSAFE.putByte(base_plus_offset_y - OFFSET_Y, (byte)magic_value);
// read the value back, should be equal to "unfoldable_value"
return UNSAFE.getByte(base_plus_offset_x - OFFSET_X);
}
private static final int OFF_HEAP_AREA_SIZE = 128;
private static final byte MAGIC = 123;
// main is excluded from compilation since we don't want the test method to inline and make base values fold
public static void main(String... args) {
long base = UNSAFE.allocateMemory(OFF_HEAP_AREA_SIZE);
for (int i = 0; i < 100_000; i++) {
if (test(base + OFFSET_X, base + OFFSET_Y, MAGIC) != MAGIC) {
throw new RuntimeException("Unexpected magic value");
}
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册