From 348f2314050539fdf93cf5a6d9b79e1c0d7f5b6c Mon Sep 17 00:00:00 2001 From: neugens Date: Fri, 14 Feb 2020 17:13:16 +0100 Subject: [PATCH] 8238589: Necessary code cleanup in JFR for JDK8u Reviewed-by: shade, apetushkov --- src/os/linux/vm/perfMemory_linux.cpp | 1 - src/share/vm/classfile/systemDictionary.cpp | 31 ++++---- src/share/vm/compiler/compileBroker.cpp | 2 +- .../gc_implementation/shared/gcTraceSend.cpp | 76 ------------------- src/share/vm/opto/superword.hpp | 52 ++++++------- src/share/vm/runtime/biasedLocking.cpp | 8 ++ src/share/vm/runtime/mutexLocker.cpp | 2 +- src/share/vm/runtime/mutexLocker.hpp | 2 +- src/share/vm/runtime/synchronizer.cpp | 2 - 9 files changed, 52 insertions(+), 124 deletions(-) diff --git a/src/os/linux/vm/perfMemory_linux.cpp b/src/os/linux/vm/perfMemory_linux.cpp index 173f2bf6c..8293b7168 100644 --- a/src/os/linux/vm/perfMemory_linux.cpp +++ b/src/os/linux/vm/perfMemory_linux.cpp @@ -34,7 +34,6 @@ #include "utilities/exceptions.hpp" // put OS-includes here -#include # include # include # include diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp index 4ce6a3d96..05b196aeb 100644 --- a/src/share/vm/classfile/systemDictionary.cpp +++ b/src/share/vm/classfile/systemDictionary.cpp @@ -617,7 +617,7 @@ static void post_class_load_event(EventClassLoad &event, (ClassLoaderData*)NULL); event.commit(); } -#endif // INCLUDE_JFR +#endif } Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, @@ -1337,24 +1337,23 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha if (!k.is_null()) { k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh)); } + #if INCLUDE_JFR - else { + if (k.is_null() && (class_name == jfr_event_handler_proxy)) { assert(jfr_event_handler_proxy != NULL, "invariant"); - if (class_name == jfr_event_handler_proxy) { - // EventHandlerProxy class is generated dynamically in - // EventHandlerProxyCreator::makeEventHandlerProxyClass - // method, so we generate a Java call from here. - // - // EventHandlerProxy class will finally be defined in - // SystemDictionary::resolve_from_stream method, down - // the call stack. Bootstrap classloader is parallel-capable, - // so no concurrency issues are expected. - CLEAR_PENDING_EXCEPTION; - k = JfrUpcalls::load_event_handler_proxy_class(THREAD); - assert(!k.is_null(), "invariant"); - } + // EventHandlerProxy class is generated dynamically in + // EventHandlerProxyCreator::makeEventHandlerProxyClass + // method, so we generate a Java call from here. + // + // EventHandlerProxy class will finally be defined in + // SystemDictionary::resolve_from_stream method, down + // the call stack. Bootstrap classloader is parallel-capable, + // so no concurrency issues are expected. + CLEAR_PENDING_EXCEPTION; + k = JfrUpcalls::load_event_handler_proxy_class(THREAD); + assert(!k.is_null(), "invariant"); } -#endif // INCLUDE_JFR +#endif return k; } else { diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp index 89836911e..dc9a9fcf9 100644 --- a/src/share/vm/compiler/compileBroker.cpp +++ b/src/share/vm/compiler/compileBroker.cpp @@ -2023,7 +2023,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { compilable = ci_env.compilable(); if (ci_env.failing()) { - const char *failure_reason = ci_env.failure_reason(); + const char* failure_reason = ci_env.failure_reason(); const char* retry_message = ci_env.retry_message(); task->set_failure_reason(failure_reason); if (_compilation_log != NULL) { diff --git a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp index f877e3bea..d74e95aa5 100644 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp @@ -233,82 +233,6 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_in } } -// XXX -//static JfrStructG1EvacuationStatistics -//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) { -// JfrStructG1EvacuationStatistics s; -// s.set_gcId(gcid); -// s.set_allocated(summary.allocated() * HeapWordSize); -// s.set_wasted(summary.wasted() * HeapWordSize); -// s.set_used(summary.used() * HeapWordSize); -// s.set_undoWaste(summary.undo_wasted() * HeapWordSize); -// s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize); -// s.set_regionsRefilled(summary.regions_filled()); -// s.set_directAllocated(summary.direct_allocated() * HeapWordSize); -// s.set_failureUsed(summary.failure_used() * HeapWordSize); -// s.set_failureWaste(summary.failure_waste() * HeapWordSize); -// return s; -//} -// -//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { -// EventG1EvacuationYoungStatistics surv_evt; -// if (surv_evt.should_commit()) { -// surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); -// surv_evt.commit(); -// } -//} -// -//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { -// EventG1EvacuationOldStatistics old_evt; -// if (old_evt.should_commit()) { -// old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); -// old_evt.commit(); -// } -//} -// -//void G1NewTracer::send_basic_ihop_statistics(size_t threshold, -// size_t target_occupancy, -// size_t current_occupancy, -// size_t last_allocation_size, -// double last_allocation_duration, -// double last_marking_length) { -// EventG1BasicIHOP evt; -// if (evt.should_commit()) { -// evt.set_gcId(_shared_gc_info.gc_id().id()); -// evt.set_threshold(threshold); -// evt.set_targetOccupancy(target_occupancy); -// evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0); -// evt.set_currentOccupancy(current_occupancy); -// evt.set_recentMutatorAllocationSize(last_allocation_size); -// evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS); -// evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0); -// evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS); -// evt.commit(); -// } -//} -// -//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold, -// size_t internal_target_occupancy, -// size_t current_occupancy, -// size_t additional_buffer_size, -// double predicted_allocation_rate, -// double predicted_marking_length, -// bool prediction_active) { -// EventG1AdaptiveIHOP evt; -// if (evt.should_commit()) { -// evt.set_gcId(_shared_gc_info.gc_id().id()); -// evt.set_threshold(threshold); -// evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0); -// evt.set_ihopTargetOccupancy(internal_target_occupancy); -// evt.set_currentOccupancy(current_occupancy); -// evt.set_additionalBufferSize(additional_buffer_size); -// evt.set_predictedAllocationRate(predicted_allocation_rate); -// evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS); -// evt.set_predictionActive(prediction_active); -// evt.commit(); -// } -//} - #endif // INCLUDE_ALL_GCS static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) { diff --git a/src/share/vm/opto/superword.hpp b/src/share/vm/opto/superword.hpp index 45b767cea..138d65990 100644 --- a/src/share/vm/opto/superword.hpp +++ b/src/share/vm/opto/superword.hpp @@ -201,32 +201,6 @@ class SWNodeInfo VALUE_OBJ_CLASS_SPEC { static const SWNodeInfo initial; }; - -// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows -//------------------------------OrderedPair--------------------------- -// Ordered pair of Node*. -class OrderedPair VALUE_OBJ_CLASS_SPEC { - protected: - Node* _p1; - Node* _p2; - public: - OrderedPair() : _p1(NULL), _p2(NULL) {} - OrderedPair(Node* p1, Node* p2) { - if (p1->_idx < p2->_idx) { - _p1 = p1; _p2 = p2; - } else { - _p1 = p2; _p2 = p1; - } - } - - bool operator==(const OrderedPair &rhs) { - return _p1 == rhs._p1 && _p2 == rhs._p2; - } - void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } - - static const OrderedPair initial; -}; - // -----------------------------SuperWord--------------------------------- // Transforms scalar operations into packed (superword) operations. class SuperWord : public ResourceObj { @@ -450,6 +424,7 @@ class SuperWord : public ResourceObj { }; + //------------------------------SWPointer--------------------------- // Information about an address for dependence checking and vector alignment class SWPointer VALUE_OBJ_CLASS_SPEC { @@ -531,4 +506,29 @@ class SWPointer VALUE_OBJ_CLASS_SPEC { void print(); }; + +//------------------------------OrderedPair--------------------------- +// Ordered pair of Node*. +class OrderedPair VALUE_OBJ_CLASS_SPEC { + protected: + Node* _p1; + Node* _p2; + public: + OrderedPair() : _p1(NULL), _p2(NULL) {} + OrderedPair(Node* p1, Node* p2) { + if (p1->_idx < p2->_idx) { + _p1 = p1; _p2 = p2; + } else { + _p1 = p2; _p2 = p1; + } + } + + bool operator==(const OrderedPair &rhs) { + return _p1 == rhs._p1 && _p2 == rhs._p2; + } + void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } + + static const OrderedPair initial; +}; + #endif // SHARE_VM_OPTO_SUPERWORD_HPP diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp index 4a2a8ab1f..0dbce6d15 100644 --- a/src/share/vm/runtime/biasedLocking.cpp +++ b/src/share/vm/runtime/biasedLocking.cpp @@ -256,10 +256,12 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_ } } +#if INCLUDE_JFR // If requested, return information on which thread held the bias if (biased_locker != NULL) { *biased_locker = biased_thread; } +#endif // INCLUDE_JFR return BiasedLocking::BIAS_REVOKED; } @@ -497,11 +499,15 @@ public: if (TraceBiasedLocking) { tty->print_cr("Revoking bias with potentially per-thread safepoint:"); } + JavaThread* biased_locker = NULL; _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); +#if INCLUDE_JFR if (biased_locker != NULL) { _biased_locker_id = JFR_THREAD_ID(biased_locker); } +#endif // INCLUDE_JFR + clean_up_cached_monitor_info(); return; } else { @@ -516,9 +522,11 @@ public: return _status_code; } +#if INCLUDE_JFR traceid biased_locker() const { return _biased_locker_id; } +#endif // INCLUDE_JFR }; diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp index 65dc0b070..320564df8 100644 --- a/src/share/vm/runtime/mutexLocker.cpp +++ b/src/share/vm/runtime/mutexLocker.cpp @@ -284,7 +284,7 @@ void mutex_init() { def(CompileThread_lock , Monitor, nonleaf+5, false ); def(PeriodicTask_lock , Monitor, nonleaf+5, true); -#ifdef INCLUDE_JFR +#if INCLUDE_JFR def(JfrMsg_lock , Monitor, leaf, true); def(JfrBuffer_lock , Mutex, leaf, true); def(JfrThreadGroups_lock , Mutex, leaf, true); diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp index c431735b9..3875493b2 100644 --- a/src/share/vm/runtime/mutexLocker.hpp +++ b/src/share/vm/runtime/mutexLocker.hpp @@ -142,7 +142,7 @@ extern Mutex* Management_lock; // a lock used to serialize JVM extern Monitor* Service_lock; // a lock used for service thread operation extern Monitor* PeriodicTask_lock; // protects the periodic task structure -#ifdef INCLUDE_JFR +#if INCLUDE_JFR extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table extern Monitor* JfrMsg_lock; // protects JFR messaging extern Mutex* JfrBuffer_lock; // protects JFR buffer operations diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp index 1bb6bb13d..befe7ccf8 100644 --- a/src/share/vm/runtime/synchronizer.cpp +++ b/src/share/vm/runtime/synchronizer.cpp @@ -1185,8 +1185,6 @@ static void post_monitor_inflate_event(EventJavaMonitorInflate* event, assert(event->should_commit(), "invariant"); event->set_monitorClass(obj->klass()); event->set_address((uintptr_t)(void*)obj); - // XXX no such counters. implement? -// event->set_cause((u1)cause); event->commit(); } -- GitLab