提交 cbec2694 编写于 作者: S sgoldman

6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on

Summary: Rewrite frame::safe_for_sender and friends to be safe for collector/analyzer
Reviewed-by: dcubed, kvn
上级 5c23285a
......@@ -157,22 +157,158 @@ void RegisterMap::shift_individual_registers() {
check_location_valid();
}
bool frame::safe_for_sender(JavaThread *thread) {
address sp = (address)_sp;
if (sp != NULL &&
(sp <= thread->stack_base() && sp >= thread->stack_base() - thread->stack_size())) {
// Unfortunately we can only check frame complete for runtime stubs and nmethod
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
if (_cb != NULL && !_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
address _SP = (address) sp();
address _FP = (address) fp();
address _UNEXTENDED_SP = (address) unextended_sp();
// sp must be within the stack
bool sp_safe = (_SP <= thread->stack_base()) &&
(_SP >= thread->stack_base() - thread->stack_size());
if (!sp_safe) {
return false;
}
// unextended sp must be within the stack and above or equal sp
bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
(_UNEXTENDED_SP >= _SP);
if (!unextended_sp_safe) return false;
// an fp must be within the stack and above (but not equal) sp
bool fp_safe = (_FP <= thread->stack_base()) &&
(_FP > _SP);
// We know sp/unextended_sp are safe only fp is questionable here
// If the current frame is known to the code cache then we can attempt to
// to construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code
if (_cb != NULL ) {
// First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
if (!fp_safe) {
return false;
}
return true;
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
return jcw_safe;
}
intptr_t* younger_sp = sp();
intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
bool adjusted_stack = is_interpreted_frame();
address sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// It should be safe to construct the sender though it might not be valid
frame sender(_SENDER_SP, younger_sp, adjusted_stack);
// Do we have a valid fp?
address sender_fp = (address) sender.fp();
// an fp must be within the stack and above (but not equal) current frame's _FP
bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
(sender_fp > _FP);
if (!sender_fp_safe) {
return false;
}
// If the potential sender is the interpreter then we can do some more checking
if (Interpreter::contains(sender_pc)) {
return sender.is_interpreted_frame_valid(thread);
}
// Could just be some random pointer within the codeBlob
if (!sender.cb()->instructions_contains(sender_pc)) return false;
// We should never be able to see an adapter if the current frame is something from code cache
if ( sender_blob->is_adapter_blob()) {
return false;
}
if( sender.is_entry_frame()) {
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
return jcw_safe;
}
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size
// because you must allocate window space
if (sender_blob->frame_size() == 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
// The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
// The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
// window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the
// stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
// the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
// that initial frame and retrying.
if (!sender_blob->is_nmethod()) {
return false;
}
// Could put some more validation for the potential non-interpreted sender
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
// We've validated the potential sender that would be created
return true;
}
return false;
// Must be native-compiled frame. Since sender will try and use fp to find
// linkages it must be safe
if (!fp_safe) return false;
// could try and do some more potential verification of native frame if we could think of some...
return true;
}
// constructors
......@@ -450,7 +586,7 @@ void frame::pd_gc_epilog() {
}
bool frame::is_interpreted_frame_valid() const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
#ifdef CC_INTERP
// Is there anything to do?
#else
......@@ -462,6 +598,7 @@ bool frame::is_interpreted_frame_valid() const {
if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
return false;
}
const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
return false;
......@@ -471,9 +608,43 @@ bool frame::is_interpreted_frame_valid() const {
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
return false;
}
if (fp() - sp() > 4096) { // stack frames shouldn't be large.
// do some validation of frame elements
// first the method
methodOop m = *interpreter_frame_method_addr();
// validate the method we'd find in this potential sender
if (!Universe::heap()->is_valid_method(m)) return false;
// stack frames shouldn't be much larger than max_stack elements
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
return false;
}
// validate bci/bcx
intptr_t bcx = interpreter_frame_bcx();
if (m->validate_bci_from_bcx(bcx) < 0) {
return false;
}
// validate constantPoolCacheOop
constantPoolCacheOop cp = *interpreter_frame_cache_addr();
if (cp == NULL ||
!Space::is_aligned(cp) ||
!Universe::heap()->is_permanent((void*)cp)) return false;
// validate locals
address locals = (address) *interpreter_frame_locals_addr();
if (locals > thread->stack_base() || locals < (address) fp()) return false;
// We'd have to be pretty unlucky to be mislead at this point
#endif /* CC_INTERP */
return true;
}
......
......@@ -37,39 +37,181 @@ bool frame::safe_for_sender(JavaThread *thread) {
address sp = (address)_sp;
address fp = (address)_fp;
address unextended_sp = (address)_unextended_sp;
bool sp_safe = (sp != NULL &&
(sp <= thread->stack_base()) &&
(sp >= thread->stack_base() - thread->stack_size()));
bool unextended_sp_safe = (unextended_sp != NULL &&
(unextended_sp <= thread->stack_base()) &&
(unextended_sp >= thread->stack_base() - thread->stack_size()));
bool fp_safe = (fp != NULL &&
(fp <= thread->stack_base()) &&
(fp >= thread->stack_base() - thread->stack_size()));
if (sp_safe && unextended_sp_safe && fp_safe) {
// sp must be within the stack
bool sp_safe = (sp <= thread->stack_base()) &&
(sp >= thread->stack_base() - thread->stack_size());
if (!sp_safe) {
return false;
}
// unextended sp must be within the stack and above or equal sp
bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
(unextended_sp >= sp);
if (!unextended_sp_safe) {
return false;
}
// an fp must be within the stack and above (but not equal) sp
bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
// We know sp/unextended_sp are safe only fp is questionable here
// If the current frame is known to the code cache then we can attempt to
// to construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code
if (_cb != NULL ) {
// First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
if (_cb != NULL && !_cb->is_frame_complete_at(_pc)) {
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
if (!fp_safe) return false;
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
return jcw_safe;
}
intptr_t* sender_sp = NULL;
address sender_pc = NULL;
if (is_interpreted_frame()) {
// fp must be safe
if (!fp_safe) {
return false;
}
sender_pc = (address) this->fp()[return_addr_offset];
sender_sp = (intptr_t*) addr_at(sender_sp_offset);
} else {
// must be some sort of compiled/runtime frame
// fp does not have to be safe (although it could be check for c1?)
sender_sp = _unextended_sp + _cb->frame_size();
// On Intel the return_address is always the word on the stack
sender_pc = (address) *(sender_sp-1);
}
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// If the potential sender is the interpreter then we can do some more checking
if (Interpreter::contains(sender_pc)) {
// ebp is always saved in a recognizable place in any code we generate. However
// only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
// is really a frame pointer.
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) {
return false;
}
// construct the potential sender
frame sender(sender_sp, saved_fp, sender_pc);
return sender.is_interpreted_frame_valid(thread);
}
// Could just be some random pointer within the codeBlob
if (!sender_blob->instructions_contains(sender_pc)) return false;
// We should never be able to see an adapter if the current frame is something from code cache
if ( sender_blob->is_adapter_blob()) {
return false;
}
// Could be the call_stub
if (StubRoutines::returns_to_call_stub(sender_pc)) {
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) {
return false;
}
// construct the potential sender
frame sender(sender_sp, saved_fp, sender_pc);
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
return jcw_safe;
}
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() == 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
// We should never be able to see anything here except an nmethod. If something in the
// code cache (current frame) is called by an entity within the code cache that entity
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
assert(sender_blob->is_nmethod(), "Impossible call chain");
// Could put some more validation for the potential non-interpreted sender
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
// We've validated the potential sender that would be created
return true;
}
// Note: fp == NULL is not really a prerequisite for this to be safe to
// walk for c2. However we've modified the code such that if we get
// a failure with fp != NULL that we then try with FP == NULL.
// This is basically to mimic what a last_frame would look like if
// c2 had generated it.
if (sp_safe && unextended_sp_safe && fp == NULL) {
// frame must be complete if fp == NULL as fp == NULL is only sensible
// if we are looking at a nmethod and frame complete assures us of that.
if (_cb != NULL && _cb->is_frame_complete_at(_pc) && _cb->is_compiled_by_c2()) {
return true;
}
// Must be native-compiled frame. Since sender will try and use fp to find
// linkages it must be safe
if (!fp_safe) {
return false;
}
return false;
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
if ( (address) this->fp()[return_addr_offset] == NULL) return false;
// could try and do some more potential verification of native frame if we could think of some...
return true;
}
......@@ -292,7 +434,7 @@ void frame::pd_gc_epilog() {
// nothing done here now
}
bool frame::is_interpreted_frame_valid() const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// QQQ
#ifdef CC_INTERP
#else
......@@ -312,9 +454,45 @@ bool frame::is_interpreted_frame_valid() const {
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
return false;
}
if (fp() - sp() > 4096) { // stack frames shouldn't be large.
// do some validation of frame elements
// first the method
methodOop m = *interpreter_frame_method_addr();
// validate the method we'd find in this potential sender
if (!Universe::heap()->is_valid_method(m)) return false;
// stack frames shouldn't be much larger than max_stack elements
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
return false;
}
// validate bci/bcx
intptr_t bcx = interpreter_frame_bcx();
if (m->validate_bci_from_bcx(bcx) < 0) {
return false;
}
// validate constantPoolCacheOop
constantPoolCacheOop cp = *interpreter_frame_cache_addr();
if (cp == NULL ||
!Space::is_aligned(cp) ||
!Universe::heap()->is_permanent((void*)cp)) return false;
// validate locals
address locals = (address) *interpreter_frame_locals_addr();
if (locals > thread->stack_base() || locals < (address) fp()) return false;
// We'd have to be pretty unlucky to be mislead at this point
#endif // CC_INTERP
return true;
}
......
......@@ -72,15 +72,20 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_unextended_sp = sp;
_fp = fp;
_pc = (address)(sp[-1]);
assert(_pc != NULL, "no pc?");
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
// unlucky the junk value could be to a zombied method and we'll die on the
// find_blob call. This is also why we can have no asserts on the validity
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
// -> pd_last_frame should use a specialized version of pd_last_frame which could
// call a specilaized frame constructor instead of this one.
// Then we could use the assert below. However this assert is of somewhat dubious
// value.
// assert(_pc != NULL, "no pc?");
_cb = CodeCache::find_blob(_pc);
// In case of native stubs, the pc retreived here might be
// wrong. (the _last_native_pc will have the right value)
// So do not put add any asserts on the _pc here.
// QQQ The above comment is wrong and has been wrong for years. This constructor
// should (and MUST) not be called in that situation. In the native situation
// the pc should be supplied to the constructor.
_deopt_state = not_deoptimized;
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
_pc = (((nmethod*)_cb)->get_original_pc(this));
......
......@@ -1632,7 +1632,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// We need to prepare to execute the OSR method. First we must
// migrate the locals and monitors off of the stack.
__ movl(rsi, rax); // save the nmethod
__ movl(rbx, rax); // save the nmethod
const Register thread = rcx;
__ get_thread(thread);
......@@ -1688,7 +1688,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ pushl(rdi);
// and begin the OSR nmethod
__ jmp(Address(rsi, nmethod::osr_entry_point_offset()));
__ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
}
}
}
......
......@@ -50,17 +50,6 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
// even if isInJava == true. It should be more reliable than
// ucontext info.
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
#if 0
// This sanity check may not be needed with the new frame
// walking code. Remove it for now.
if (!jt->frame_anchor()->post_Java_state_is_pc()
&& frame::next_younger_sp_or_null(last_Java_sp(),
jt->frame_anchor()->post_Java_sp()) == NULL) {
// the anchor contains an SP, but the frame is not walkable
// because post_Java_sp isn't valid relative to last_Java_sp
return false;
}
#endif
*fr_addr = jt->pd_last_frame();
return true;
}
......@@ -77,23 +66,59 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
return false;
}
frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
// we were running Java code when SIGPROF came in
if (isInJava) {
// If the frame we got is safe then it is most certainly valid
if (ret_frame.safe_for_sender(jt)) {
*fr_addr = ret_frame;
return true;
}
// If it isn't safe then we can try several things to try and get
// a good starting point.
//
// On sparc the frames are almost certainly walkable in the sense
// of sp/fp linkages. However because of recycling of windows if
// a piece of code does multiple save's where the initial save creates
// a real frame with a return pc and the succeeding save's are used to
// simply get free registers and have no real pc then the pc linkage on these
// "inner" temporary frames will be bogus.
// Since there is in general only a nesting level like
// this one deep in general we'll try and unwind such an "inner" frame
// here ourselves and see if it makes sense
frame unwind_frame(ret_frame.fp(), frame::unpatchable, addr.pc());
if (unwind_frame.safe_for_sender(jt)) {
*fr_addr = unwind_frame;
return true;
}
// Well that didn't work. Most likely we're toast on this tick
// The previous code would try this. I think it is dubious in light
// of changes to safe_for_sender and the unwind trick above but
// if it gets us a safe frame who wants to argue.
// If we have a last_Java_sp, then the SIGPROF signal caught us
// right when we were transitioning from _thread_in_Java to a new
// JavaThreadState. We use last_Java_sp instead of the sp from
// the ucontext since it should be more reliable.
if (jt->has_last_Java_frame()) {
ret_sp = jt->last_Java_sp();
frame ret_frame2(ret_sp, frame::unpatchable, addr.pc());
if (ret_frame2.safe_for_sender(jt)) {
*fr_addr = ret_frame2;
return true;
}
}
// Implied else: we don't have a last_Java_sp so we use what we
// got from the ucontext.
frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;
}
// This is the best we can do. We will only be able to decode the top frame
*fr_addr = ret_frame;
return true;
}
......@@ -105,17 +130,13 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
if (jt->has_last_Java_frame()) {
assert(!jt->frame_anchor()->walkable(), "case covered above");
if (jt->thread_state() == _thread_in_native) {
frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;
}
*fr_addr = ret_frame;
return true;
}
frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc());
*fr_addr = ret_frame;
return true;
}
// nothing else to try
return false;
// nothing else to try but what we found initially
*fr_addr = ret_frame;
return true;
}
......@@ -212,7 +212,8 @@ frame os::current_frame() {
CAST_FROM_FN_PTR(address, os::current_frame));
if (os::is_first_C_frame(&myframe)) {
// stack is not walkable
return frame(NULL, NULL, NULL);
frame ret; // This will be a null useless frame
return ret;
} else {
return os::get_sender_for_C_frame(&myframe);
}
......
......@@ -32,49 +32,53 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
assert(Thread::current() == this, "caller must be current thread");
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than ucontext info.
// last_Java_frame is always walkable and safe use it if we have it
if (jt->has_last_Java_frame()) {
*fr_addr = jt->pd_last_frame();
return true;
}
// At this point, we don't have a last_Java_frame, so
// we try to glean some information out of the ucontext
// if we were running Java code when SIGPROF came in.
if (isInJava) {
ucontext_t* uc = (ucontext_t*) ucontext;
intptr_t* ret_fp;
intptr_t* ret_sp;
ExtendedPC addr = os::Solaris::fetch_frame_from_ucontext(this, uc,
&ret_sp, &ret_fp);
if (addr.pc() == NULL || ret_sp == NULL ) {
// ucontext wasn't useful
return false;
}
ucontext_t* uc = (ucontext_t*) ucontext;
// We always want to use the initial frame we create from the ucontext as
// it certainly signals where we currently are. However that frame may not
// be safe for calling sender. In that case if we have a last_Java_frame
// then the forte walker will switch to that frame as the virtual sender
// for the frame we create here which is not sender safe.
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {
#ifdef COMPILER2
frame ret_frame2(ret_sp, NULL, addr.pc());
if (!ret_frame2.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;
}
ret_frame = ret_frame2;
#else
// nothing else to try if the frame isn't good
return false;
#endif /* COMPILER2 */
intptr_t* ret_fp;
intptr_t* ret_sp;
ExtendedPC addr = os::Solaris::fetch_frame_from_ucontext(this, uc, &ret_sp, &ret_fp);
// Something would really have to be screwed up to get a NULL pc
if (addr.pc() == NULL ) {
assert(false, "NULL pc from signal handler!");
return false;
}
// If sp and fp are nonsense just leave them out
if ((address)ret_sp >= jt->stack_base() ||
(address)ret_sp < jt->stack_base() - jt->stack_size() ) {
ret_sp = NULL;
ret_fp = NULL;
} else {
// sp is reasonable is fp reasonable?
if ( (address)ret_fp >= jt->stack_base() || ret_fp < ret_sp) {
ret_fp = NULL;
}
*fr_addr = ret_frame;
return true;
}
// nothing else to try
return false;
frame ret_frame(ret_sp, ret_fp, addr.pc());
*fr_addr = ret_frame;
return true;
}
......@@ -71,7 +71,22 @@ class CodeCache : AllStatic {
// what you are doing)
static CodeBlob* find_blob_unsafe(void* start) {
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
// this assert is too strong because the heap code will return the
// heapblock containing start. That block can often be larger than
// the codeBlob itself. If you look up an address that is within
// the heapblock but not in the codeBlob you will assert.
//
// Most things will not lookup such bad addresses. However
// AsyncGetCallTrace can see intermediate frames and get that kind
// of invalid address and so can a developer using hsfind.
//
// The more correct answer is to return NULL if blob_contains() returns
// false.
// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
if (result != NULL && !result->blob_contains((address)start)) {
result = NULL;
}
return result;
}
......
此差异已折叠。
......@@ -924,29 +924,23 @@ void FlatProfilerTask::task() {
FlatProfiler::record_thread_ticks();
}
void ThreadProfiler::record_interpreted_tick(frame fr, TickPosition where, int* ticks) {
void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
FlatProfiler::all_int_ticks++;
if (!FlatProfiler::full_profile()) {
return;
}
if (!fr.is_interpreted_frame_valid()) {
if (!fr.is_interpreted_frame_valid(thread)) {
// tick came at a bad time
interpreter_ticks += 1;
FlatProfiler::interpreter_ticks += 1;
return;
}
methodOop method = NULL;
if (fr.fp() != NULL) {
method = *fr.interpreter_frame_method_addr();
}
if (!Universe::heap()->is_valid_method(method)) {
// tick came at a bad time, stack frame not initialized correctly
interpreter_ticks += 1;
FlatProfiler::interpreter_ticks += 1;
return;
}
// The frame has been fully validated so we can trust the method and bci
methodOop method = *fr.interpreter_frame_method_addr();
interpreted_update(method, where);
// update byte code table
......@@ -997,7 +991,7 @@ void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr)
// The tick happend in real code -> non VM code
if (fr.is_interpreted_frame()) {
interval_data_ref()->inc_interpreted();
record_interpreted_tick(fr, tp_code, FlatProfiler::bytecode_ticks);
record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
return;
}
......@@ -1028,7 +1022,7 @@ void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr)
// The tick happend in VM code
interval_data_ref()->inc_native();
if (fr.is_interpreted_frame()) {
record_interpreted_tick(fr, tp_native, FlatProfiler::bytecode_ticks_stub);
record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
return;
}
if (CodeCache::contains(fr.pc())) {
......
......@@ -135,7 +135,7 @@ private:
ProfilerNode** table;
private:
void record_interpreted_tick(frame fr, TickPosition where, int* ticks);
void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
void interpreted_update(methodOop method, TickPosition where);
void compiled_update (methodOop method, TickPosition where);
......
......@@ -108,7 +108,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
bool is_first_frame() const; // oldest frame? (has no sender)
bool is_first_java_frame() const; // same for Java frame
bool is_interpreted_frame_valid() const; // performs sanity checks on interpreted frames.
bool is_interpreted_frame_valid(JavaThread* thread) const; // performs sanity checks on interpreted frames.
// tells whether this frame is marked for deoptimization
bool should_be_deoptimized() const;
......
......@@ -416,6 +416,48 @@ inline bool vframeStreamCommon::fill_from_frame() {
int decode_offset;
if (pc_desc == NULL) {
// Should not happen, but let fill_from_compiled_frame handle it.
// If we are trying to walk the stack of a thread that is not
// at a safepoint (like AsyncGetCallTrace would do) then this is an
// acceptable result. [ This is assuming that safe_for_sender
// is so bullet proof that we can trust the frames it produced. ]
//
// So if we see that the thread is not safepoint safe
// then simply produce the method and a bci of zero
// and skip the possibility of decoding any inlining that
// may be present. That is far better than simply stopping (or
// asserting. If however the thread is safepoint safe this
// is the sign of a compiler bug and we'll let
// fill_from_compiled_frame handle it.
JavaThreadState state = _thread->thread_state();
// in_Java should be good enough to test safepoint safety
// if state were say in_Java_trans then we'd expect that
// the pc would have already been slightly adjusted to
// one that would produce a pcDesc since the trans state
// would be one that might in fact anticipate a safepoint
if (state == _thread_in_Java ) {
// This will get a method a zero bci and no inlining.
// Might be nice to have a unique bci to signify this
// particular case but for now zero will do.
fill_from_compiled_native_frame();
// There is something to be said for setting the mode to
// at_end_mode to prevent trying to walk further up the
// stack. There is evidence that if we walk any further
// that we could produce a bad stack chain. However until
// we see evidence that allowing this causes us to find
// frames bad enough to cause segv's or assertion failures
// we don't do it as while we may get a bad call chain the
// probability is much higher (several magnitudes) that we
// get good data.
return true;
}
decode_offset = DebugInformationRecorder::serialized_null;
} else {
decode_offset = pc_desc->scope_decode_offset();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册