diff --git a/src/os/linux/vm/perfMemory_linux.cpp b/src/os/linux/vm/perfMemory_linux.cpp index 173f2bf6cd360fd099b5eafc6ac1efb360b91173..8293b71685ad2065aa48fca5490a7dcce45d1ca7 100644 --- a/src/os/linux/vm/perfMemory_linux.cpp +++ b/src/os/linux/vm/perfMemory_linux.cpp @@ -34,7 +34,6 @@ #include "utilities/exceptions.hpp" // put OS-includes here -#include # include # include # include diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp index 4ce6a3d9697fad7872000ccf8ec0efe7c4c3265b..05b196aebcafdfc5ae25f33945d3a3ee6bbf413e 100644 --- a/src/share/vm/classfile/systemDictionary.cpp +++ b/src/share/vm/classfile/systemDictionary.cpp @@ -617,7 +617,7 @@ static void post_class_load_event(EventClassLoad &event, (ClassLoaderData*)NULL); event.commit(); } -#endif // INCLUDE_JFR +#endif } Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, @@ -1337,24 +1337,23 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha if (!k.is_null()) { k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh)); } + #if INCLUDE_JFR - else { + if (k.is_null() && (class_name == jfr_event_handler_proxy)) { assert(jfr_event_handler_proxy != NULL, "invariant"); - if (class_name == jfr_event_handler_proxy) { - // EventHandlerProxy class is generated dynamically in - // EventHandlerProxyCreator::makeEventHandlerProxyClass - // method, so we generate a Java call from here. - // - // EventHandlerProxy class will finally be defined in - // SystemDictionary::resolve_from_stream method, down - // the call stack. Bootstrap classloader is parallel-capable, - // so no concurrency issues are expected. - CLEAR_PENDING_EXCEPTION; - k = JfrUpcalls::load_event_handler_proxy_class(THREAD); - assert(!k.is_null(), "invariant"); - } + // EventHandlerProxy class is generated dynamically in + // EventHandlerProxyCreator::makeEventHandlerProxyClass + // method, so we generate a Java call from here. + // + // EventHandlerProxy class will finally be defined in + // SystemDictionary::resolve_from_stream method, down + // the call stack. Bootstrap classloader is parallel-capable, + // so no concurrency issues are expected. + CLEAR_PENDING_EXCEPTION; + k = JfrUpcalls::load_event_handler_proxy_class(THREAD); + assert(!k.is_null(), "invariant"); } -#endif // INCLUDE_JFR +#endif return k; } else { diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp index 89836911e9eab5311d3782f50f0239e6443bdbd4..dc9a9fcf97c456efdf977b9b2d4e3b5593846464 100644 --- a/src/share/vm/compiler/compileBroker.cpp +++ b/src/share/vm/compiler/compileBroker.cpp @@ -2023,7 +2023,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { compilable = ci_env.compilable(); if (ci_env.failing()) { - const char *failure_reason = ci_env.failure_reason(); + const char* failure_reason = ci_env.failure_reason(); const char* retry_message = ci_env.retry_message(); task->set_failure_reason(failure_reason); if (_compilation_log != NULL) { diff --git a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp index f877e3beaa23ae329a82606e7fe61cd3aefe90ce..d74e95aa5962c71bde6ac3bbf1575ae0ac5076d1 100644 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp @@ -233,82 +233,6 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_in } } -// XXX -//static JfrStructG1EvacuationStatistics -//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) { -// JfrStructG1EvacuationStatistics s; -// s.set_gcId(gcid); -// s.set_allocated(summary.allocated() * HeapWordSize); -// s.set_wasted(summary.wasted() * HeapWordSize); -// s.set_used(summary.used() * HeapWordSize); -// s.set_undoWaste(summary.undo_wasted() * HeapWordSize); -// s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize); -// s.set_regionsRefilled(summary.regions_filled()); -// s.set_directAllocated(summary.direct_allocated() * HeapWordSize); -// s.set_failureUsed(summary.failure_used() * HeapWordSize); -// s.set_failureWaste(summary.failure_waste() * HeapWordSize); -// return s; -//} -// -//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { -// EventG1EvacuationYoungStatistics surv_evt; -// if (surv_evt.should_commit()) { -// surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); -// surv_evt.commit(); -// } -//} -// -//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { -// EventG1EvacuationOldStatistics old_evt; -// if (old_evt.should_commit()) { -// old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); -// old_evt.commit(); -// } -//} -// -//void G1NewTracer::send_basic_ihop_statistics(size_t threshold, -// size_t target_occupancy, -// size_t current_occupancy, -// size_t last_allocation_size, -// double last_allocation_duration, -// double last_marking_length) { -// EventG1BasicIHOP evt; -// if (evt.should_commit()) { -// evt.set_gcId(_shared_gc_info.gc_id().id()); -// evt.set_threshold(threshold); -// evt.set_targetOccupancy(target_occupancy); -// evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0); -// evt.set_currentOccupancy(current_occupancy); -// evt.set_recentMutatorAllocationSize(last_allocation_size); -// evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS); -// evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0); -// evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS); -// evt.commit(); -// } -//} -// -//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold, -// size_t internal_target_occupancy, -// size_t current_occupancy, -// size_t additional_buffer_size, -// double predicted_allocation_rate, -// double predicted_marking_length, -// bool prediction_active) { -// EventG1AdaptiveIHOP evt; -// if (evt.should_commit()) { -// evt.set_gcId(_shared_gc_info.gc_id().id()); -// evt.set_threshold(threshold); -// evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0); -// evt.set_ihopTargetOccupancy(internal_target_occupancy); -// evt.set_currentOccupancy(current_occupancy); -// evt.set_additionalBufferSize(additional_buffer_size); -// evt.set_predictedAllocationRate(predicted_allocation_rate); -// evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS); -// evt.set_predictionActive(prediction_active); -// evt.commit(); -// } -//} - #endif // INCLUDE_ALL_GCS static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) { diff --git a/src/share/vm/opto/superword.hpp b/src/share/vm/opto/superword.hpp index 45b767cea887e7be5ae91695a2a4e047fbc2c240..138d6599077f3b0dfc9e925f6a67b18c2a1c9083 100644 --- a/src/share/vm/opto/superword.hpp +++ b/src/share/vm/opto/superword.hpp @@ -201,32 +201,6 @@ class SWNodeInfo VALUE_OBJ_CLASS_SPEC { static const SWNodeInfo initial; }; - -// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows -//------------------------------OrderedPair--------------------------- -// Ordered pair of Node*. -class OrderedPair VALUE_OBJ_CLASS_SPEC { - protected: - Node* _p1; - Node* _p2; - public: - OrderedPair() : _p1(NULL), _p2(NULL) {} - OrderedPair(Node* p1, Node* p2) { - if (p1->_idx < p2->_idx) { - _p1 = p1; _p2 = p2; - } else { - _p1 = p2; _p2 = p1; - } - } - - bool operator==(const OrderedPair &rhs) { - return _p1 == rhs._p1 && _p2 == rhs._p2; - } - void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } - - static const OrderedPair initial; -}; - // -----------------------------SuperWord--------------------------------- // Transforms scalar operations into packed (superword) operations. class SuperWord : public ResourceObj { @@ -450,6 +424,7 @@ class SuperWord : public ResourceObj { }; + //------------------------------SWPointer--------------------------- // Information about an address for dependence checking and vector alignment class SWPointer VALUE_OBJ_CLASS_SPEC { @@ -531,4 +506,29 @@ class SWPointer VALUE_OBJ_CLASS_SPEC { void print(); }; + +//------------------------------OrderedPair--------------------------- +// Ordered pair of Node*. +class OrderedPair VALUE_OBJ_CLASS_SPEC { + protected: + Node* _p1; + Node* _p2; + public: + OrderedPair() : _p1(NULL), _p2(NULL) {} + OrderedPair(Node* p1, Node* p2) { + if (p1->_idx < p2->_idx) { + _p1 = p1; _p2 = p2; + } else { + _p1 = p2; _p2 = p1; + } + } + + bool operator==(const OrderedPair &rhs) { + return _p1 == rhs._p1 && _p2 == rhs._p2; + } + void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } + + static const OrderedPair initial; +}; + #endif // SHARE_VM_OPTO_SUPERWORD_HPP diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp index 4a2a8ab1f727e73f9f9c91b7abb62c7fae0a79ce..0dbce6d15dfbe36aed109bffe0bcc3da2f0b5e82 100644 --- a/src/share/vm/runtime/biasedLocking.cpp +++ b/src/share/vm/runtime/biasedLocking.cpp @@ -256,10 +256,12 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_ } } +#if INCLUDE_JFR // If requested, return information on which thread held the bias if (biased_locker != NULL) { *biased_locker = biased_thread; } +#endif // INCLUDE_JFR return BiasedLocking::BIAS_REVOKED; } @@ -497,11 +499,15 @@ public: if (TraceBiasedLocking) { tty->print_cr("Revoking bias with potentially per-thread safepoint:"); } + JavaThread* biased_locker = NULL; _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); +#if INCLUDE_JFR if (biased_locker != NULL) { _biased_locker_id = JFR_THREAD_ID(biased_locker); } +#endif // INCLUDE_JFR + clean_up_cached_monitor_info(); return; } else { @@ -516,9 +522,11 @@ public: return _status_code; } +#if INCLUDE_JFR traceid biased_locker() const { return _biased_locker_id; } +#endif // INCLUDE_JFR }; diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp index 65dc0b070c2541c35f863d5c28bccac3f3ade9b9..320564df86fd2ecf320a5595601257eebd2af378 100644 --- a/src/share/vm/runtime/mutexLocker.cpp +++ b/src/share/vm/runtime/mutexLocker.cpp @@ -284,7 +284,7 @@ void mutex_init() { def(CompileThread_lock , Monitor, nonleaf+5, false ); def(PeriodicTask_lock , Monitor, nonleaf+5, true); -#ifdef INCLUDE_JFR +#if INCLUDE_JFR def(JfrMsg_lock , Monitor, leaf, true); def(JfrBuffer_lock , Mutex, leaf, true); def(JfrThreadGroups_lock , Mutex, leaf, true); diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp index c431735b9cc8d96fca1d8d93f934921046d651a8..3875493b2d91bb4739250918b4697e861a778a87 100644 --- a/src/share/vm/runtime/mutexLocker.hpp +++ b/src/share/vm/runtime/mutexLocker.hpp @@ -142,7 +142,7 @@ extern Mutex* Management_lock; // a lock used to serialize JVM extern Monitor* Service_lock; // a lock used for service thread operation extern Monitor* PeriodicTask_lock; // protects the periodic task structure -#ifdef INCLUDE_JFR +#if INCLUDE_JFR extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table extern Monitor* JfrMsg_lock; // protects JFR messaging extern Mutex* JfrBuffer_lock; // protects JFR buffer operations diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp index 1bb6bb13d57ce393d15bb957f3266793f829a320..befe7ccf8f9f3c2a38e4462ee36b68823cbee1cd 100644 --- a/src/share/vm/runtime/synchronizer.cpp +++ b/src/share/vm/runtime/synchronizer.cpp @@ -1185,8 +1185,6 @@ static void post_monitor_inflate_event(EventJavaMonitorInflate* event, assert(event->should_commit(), "invariant"); event->set_monitorClass(obj->klass()); event->set_address((uintptr_t)(void*)obj); - // XXX no such counters. implement? -// event->set_cause((u1)cause); event->commit(); }