提交 203bc3b0 编写于 作者: D dcubed

Merge

......@@ -2465,7 +2465,10 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
// InterpreterRuntime::post_method_entry();
// }
// if (DTraceMethodProbes) {
// SharedRuntime::dtrace_method_entry(method, reciever);
// SharedRuntime::dtrace_method_entry(method, receiver);
// }
// if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
// SharedRuntime::rc_trace_method_entry(method, receiver);
// }
void InterpreterMacroAssembler::notify_method_entry() {
......@@ -2497,6 +2500,13 @@ void InterpreterMacroAssembler::notify_method_entry() {
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
G2_thread, Lmethod);
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
call_VM_leaf(noreg,
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
G2_thread, Lmethod);
}
}
......
......@@ -2161,6 +2161,18 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ restore();
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
// create inner frame
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ set_oop_constant(JNIHandles::make_local(method()), O1);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
G2_thread, O1);
__ restore();
}
// We are in the jni frame unless saved_frame is true in which case
// we are in one frame deeper (the "inner" frame). If we are in the
// "inner" frames the args are in the Iregs and if the jni frame then
......
......@@ -1512,6 +1512,15 @@ void InterpreterMacroAssembler::notify_method_entry() {
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), rcx, rbx);
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
get_thread(rcx);
get_method(rbx);
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
rcx, rbx);
}
}
......
......@@ -1593,6 +1593,14 @@ void InterpreterMacroAssembler::notify_method_entry() {
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
r15_thread, c_rarg1);
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
get_method(c_rarg1);
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
r15_thread, c_rarg1);
}
}
......
......@@ -1534,6 +1534,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
thread, rax);
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
__ movoop(rax, JNIHandles::make_local(method()));
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
thread, rax);
}
// These are register definitions we need for locking/unlocking
const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
......
......@@ -1508,6 +1508,17 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
restore_args(masm, total_c_args, c_arg, out_regs);
}
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
// protect the args we've loaded
save_args(masm, total_c_args, c_arg, out_regs);
__ movoop(c_rarg1, JNIHandles::make_local(method()));
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
r15_thread, c_rarg1);
restore_args(masm, total_c_args, c_arg, out_regs);
}
// Lock a synchronized method
// Register definitions used by locking and unlocking
......
......@@ -2094,6 +2094,7 @@ interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp
interp_masm_<arch_model>.cpp interpreterRuntime.hpp
interp_masm_<arch_model>.cpp interpreter.hpp
interp_masm_<arch_model>.cpp jvmtiExport.hpp
interp_masm_<arch_model>.cpp jvmtiRedefineClassesTrace.hpp
interp_masm_<arch_model>.cpp jvmtiThreadState.hpp
interp_masm_<arch_model>.cpp markOop.hpp
interp_masm_<arch_model>.cpp methodDataOop.hpp
......@@ -3670,6 +3671,7 @@ sharedRuntime.cpp interpreterRuntime.hpp
sharedRuntime.cpp interpreter.hpp
sharedRuntime.cpp javaCalls.hpp
sharedRuntime.cpp jvmtiExport.hpp
sharedRuntime.cpp jvmtiRedefineClassesTrace.hpp
sharedRuntime.cpp nativeInst_<arch>.hpp
sharedRuntime.cpp nativeLookup.hpp
sharedRuntime.cpp oop.inline.hpp
......@@ -3699,6 +3701,7 @@ sharedRuntime_<arch_model>.cpp compiledICHolderOop.hpp
sharedRuntime_<arch_model>.cpp debugInfoRec.hpp
sharedRuntime_<arch_model>.cpp icBuffer.hpp
sharedRuntime_<arch_model>.cpp interpreter.hpp
sharedRuntime_<arch_model>.cpp jvmtiRedefineClassesTrace.hpp
sharedRuntime_<arch_model>.cpp sharedRuntime.hpp
sharedRuntime_<arch_model>.cpp vframeArray.hpp
sharedRuntime_<arch_model>.cpp vmreg_<arch>.inline.hpp
......
......@@ -992,6 +992,10 @@ void klassItable::adjust_method_entries(methodOop* old_methods, methodOop* new_m
methodOop new_method = new_methods[j];
itableMethodEntry* ime = method_entry(0);
// The itable can describe more than one interface and the same
// method signature can be specified by more than one interface.
// This means we have to do an exhaustive search to find all the
// old_method references.
for (int i = 0; i < _size_method_table; i++) {
if (ime->method() == old_method) {
ime->initialize(new_method);
......@@ -1008,7 +1012,6 @@ void klassItable::adjust_method_entries(methodOop* old_methods, methodOop* new_m
new_method->name()->as_C_string(),
new_method->signature()->as_C_string()));
}
break;
}
ime++;
}
......
......@@ -99,6 +99,9 @@ JvmtiEnv::SetThreadLocalStorage(JavaThread* java_thread, const void* data) {
}
// otherwise, create the state
state = JvmtiThreadState::state_for(java_thread);
if (state == NULL) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
}
state->env_thread_state(this)->set_agent_thread_local_storage_data((void*)data);
return JVMTI_ERROR_NONE;
......@@ -1308,6 +1311,9 @@ JvmtiEnv::GetFrameCount(JavaThread* java_thread, jint* count_ptr) {
// retrieve or create JvmtiThreadState.
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
if (state == NULL) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
uint32_t debug_bits = 0;
if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
err = get_frame_count(state, count_ptr);
......@@ -1329,6 +1335,12 @@ JvmtiEnv::PopFrame(JavaThread* java_thread) {
HandleMark hm(current_thread);
uint32_t debug_bits = 0;
// retrieve or create the state
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
if (state == NULL) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
// Check if java_thread is fully suspended
if (!is_thread_fully_suspended(java_thread, true /* wait for suspend completion */, &debug_bits)) {
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
......@@ -1399,9 +1411,6 @@ JvmtiEnv::PopFrame(JavaThread* java_thread) {
// It's fine to update the thread state here because no JVMTI events
// shall be posted for this PopFrame.
// retreive or create the state
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
state->update_for_pop_top_frame();
java_thread->set_popframe_condition(JavaThread::popframe_pending_bit);
// Set pending step flag for this popframe and it is cleared when next
......@@ -1445,6 +1454,11 @@ JvmtiEnv::NotifyFramePop(JavaThread* java_thread, jint depth) {
ResourceMark rm;
uint32_t debug_bits = 0;
JvmtiThreadState *state = JvmtiThreadState::state_for(java_thread);
if (state == NULL) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
if (!JvmtiEnv::is_thread_fully_suspended(java_thread, true, &debug_bits)) {
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
......@@ -1464,7 +1478,6 @@ JvmtiEnv::NotifyFramePop(JavaThread* java_thread, jint depth) {
assert(vf->frame_pointer() != NULL, "frame pointer mustn't be NULL");
JvmtiThreadState *state = JvmtiThreadState::state_for(java_thread);
int frame_number = state->count_frames() - depth;
state->env_thread_state(this)->set_frame_pop(frame_number);
......
......@@ -94,6 +94,35 @@ JvmtiEnvBase::initialize() {
}
bool
JvmtiEnvBase::is_valid() {
jint value = 0;
// This object might not be a JvmtiEnvBase so we can't assume
// the _magic field is properly aligned. Get the value in a safe
// way and then check against JVMTI_MAGIC.
switch (sizeof(_magic)) {
case 2:
value = Bytes::get_native_u2((address)&_magic);
break;
case 4:
value = Bytes::get_native_u4((address)&_magic);
break;
case 8:
value = Bytes::get_native_u8((address)&_magic);
break;
default:
guarantee(false, "_magic field is an unexpected size");
}
return value == JVMTI_MAGIC;
}
JvmtiEnvBase::JvmtiEnvBase() : _env_event_enable() {
_env_local_storage = NULL;
_tag_map = NULL;
......@@ -1322,6 +1351,12 @@ JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState
HandleMark hm(current_thread);
uint32_t debug_bits = 0;
// retrieve or create the state
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
if (state == NULL) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
// Check if java_thread is fully suspended
if (!is_thread_fully_suspended(java_thread,
true /* wait for suspend completion */,
......@@ -1329,9 +1364,6 @@ JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
// retreive or create the state
JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
// Check to see if a ForceEarlyReturn was already in progress
if (state->is_earlyret_pending()) {
// Probably possible for JVMTI clients to trigger this, but the
......
......@@ -120,7 +120,7 @@ class JvmtiEnvBase : public CHeapObj {
public:
bool is_valid() { return _magic == JVMTI_MAGIC; }
bool is_valid();
bool is_retransformable() { return _is_retransformable; }
......
......@@ -478,6 +478,11 @@ JvmtiEventControllerPrivate::recompute_env_thread_enabled(JvmtiEnvThreadState* e
// set external state accordingly. Only thread-filtered events are included.
jlong
JvmtiEventControllerPrivate::recompute_thread_enabled(JvmtiThreadState *state) {
if (state == NULL) {
// associated JavaThread is exiting
return (jlong)0;
}
jlong was_any_env_enabled = state->thread_event_enable()->_event_enabled.get_bits();
jlong any_env_enabled = 0;
......@@ -553,6 +558,7 @@ JvmtiEventControllerPrivate::recompute_enabled() {
{
MutexLocker mu(Threads_lock); //hold the Threads_lock for the iteration
for (JavaThread *tp = Threads::first(); tp != NULL; tp = tp->next()) {
// state_for_while_locked() makes tp->is_exiting() check
JvmtiThreadState::state_for_while_locked(tp); // create the thread state if missing
}
}// release Threads_lock
......
......@@ -1872,6 +1872,9 @@ void JvmtiExport::post_dynamic_code_generated_while_holding_locks(const char* na
{
// register the stub with the current dynamic code event collector
JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
// state can only be NULL if the current thread is exiting which
// should not happen since we're trying to post an event
guarantee(state != NULL, "attempt to register stub via an exiting thread");
JvmtiDynamicCodeEventCollector* collector = state->get_dynamic_code_event_collector();
guarantee(collector != NULL, "attempt to register stub without event collector");
collector->register_stub(name, code_begin, code_end);
......@@ -2253,6 +2256,9 @@ void JvmtiExport::cms_ref_processing_epilogue() {
void JvmtiEventCollector::setup_jvmti_thread_state() {
// set this event collector to be the current one.
JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
// state can only be NULL if the current thread is exiting which
// should not happen since we're trying to configure for event collection
guarantee(state != NULL, "exiting thread called setup_jvmti_thread_state");
if (is_vm_object_alloc_event()) {
_prev = state->get_vm_object_alloc_event_collector();
state->set_vm_object_alloc_event_collector((JvmtiVMObjectAllocEventCollector *)this);
......
......@@ -238,6 +238,35 @@ JvmtiRawMonitor::~JvmtiRawMonitor() {
}
bool
JvmtiRawMonitor::is_valid() {
int value = 0;
// This object might not be a JvmtiRawMonitor so we can't assume
// the _magic field is properly aligned. Get the value in a safe
// way and then check against JVMTI_RM_MAGIC.
switch (sizeof(_magic)) {
case 2:
value = Bytes::get_native_u2((address)&_magic);
break;
case 4:
value = Bytes::get_native_u4((address)&_magic);
break;
case 8:
value = Bytes::get_native_u8((address)&_magic);
break;
default:
guarantee(false, "_magic field is an unexpected size");
}
return value == JVMTI_RM_MAGIC;
}
//
// class JvmtiBreakpoint
//
......
......@@ -349,7 +349,7 @@ public:
~JvmtiRawMonitor();
int magic() { return _magic; }
const char *get_name() { return _name; }
bool is_valid() { return _magic == JVMTI_RM_MAGIC; }
bool is_valid();
};
// Onload pending raw monitors
......
......@@ -831,6 +831,9 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
ResourceMark rm(THREAD);
JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current());
// state can only be NULL if the current thread is exiting which
// should not happen since we're trying to do a RedefineClasses
guarantee(state != NULL, "exiting thread calling load_new_class_versions");
for (int i = 0; i < _class_count; i++) {
oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass);
// classes for primitives cannot be redefined
......
......@@ -49,8 +49,8 @@
// 0x00000400 | 1024 - previous class weak reference mgmt during
// add previous ops (GC)
// 0x00000800 | 2048 - previous class breakpoint mgmt
// 0x00001000 | 4096 - unused
// 0x00002000 | 8192 - unused
// 0x00001000 | 4096 - detect calls to obsolete methods
// 0x00002000 | 8192 - fail a guarantee() in addition to detection
// 0x00004000 | 16384 - unused
// 0x00008000 | 32768 - old/new method matching/add/delete
// 0x00010000 | 65536 - impl details: CP size info
......
......@@ -314,17 +314,24 @@ class JvmtiThreadState : public CHeapObj {
void update_for_pop_top_frame();
// already holding JvmtiThreadState_lock - retrieve or create JvmtiThreadState
// Can return NULL if JavaThread is exiting.
inline static JvmtiThreadState *state_for_while_locked(JavaThread *thread) {
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
JvmtiThreadState *state = thread->jvmti_thread_state();
if (state == NULL) {
if (thread->is_exiting()) {
// don't add a JvmtiThreadState to a thread that is exiting
return NULL;
}
state = new JvmtiThreadState(thread);
}
return state;
}
// retrieve or create JvmtiThreadState
// Can return NULL if JavaThread is exiting.
inline static JvmtiThreadState *state_for(JavaThread *thread) {
JvmtiThreadState *state = thread->jvmti_thread_state();
if (state == NULL) {
......
......@@ -377,6 +377,32 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop
throw_and_post_jvmti_exception(thread, h_exception);
}
// The interpreter code to call this tracing function is only
// called/generated when TraceRedefineClasses has the right bits
// set. Since obsolete methods are never compiled, we don't have
// to modify the compilers to generate calls to this function.
//
JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
JavaThread* thread, methodOopDesc* method))
assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
if (method->is_obsolete()) {
// We are calling an obsolete method, but this is not necessarily
// an error. Our method could have been redefined just after we
// fetched the methodOop from the constant pool.
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE_WITH_THREAD(0x00001000, thread,
("calling obsolete method '%s'",
method->name_and_sig_as_C_string()));
if (RC_TRACE_ENABLED(0x00002000)) {
// this option is provided to debug calls to obsolete methods
guarantee(false, "faulting at call to an obsolete method.");
}
}
return 0;
JRT_END
// ret_pc points into caller; we are returning caller's exception handler
// for given exception
address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
......
......@@ -166,6 +166,9 @@ class SharedRuntime: AllStatic {
static void throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception);
static void throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message = NULL);
// RedefineClasses() tracing support for obsolete method entry
static int rc_trace_method_entry(JavaThread* thread, methodOopDesc* m);
// To be used as the entry point for unresolved native methods.
static address native_method_throw_unsatisfied_link_error_entry();
......
......@@ -1345,6 +1345,13 @@ public:
public:
// Thread local information maintained by JVMTI.
void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; }
// A JvmtiThreadState is lazily allocated. This jvmti_thread_state()
// getter is used to get this JavaThread's JvmtiThreadState if it has
// one which means NULL can be returned. JvmtiThreadState::state_for()
// is used to get the specified JavaThread's JvmtiThreadState if it has
// one or it allocates a new JvmtiThreadState for the JavaThread and
// returns it. JvmtiThreadState::state_for() will return NULL only if
// the specified JavaThread is exiting.
JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; }
static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); }
void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册