提交 348f2314 编写于 作者: N neugens

8238589: Necessary code cleanup in JFR for JDK8u

Reviewed-by: shade, apetushkov
上级 9dd0bdd6
......@@ -34,7 +34,6 @@
#include "utilities/exceptions.hpp"
// put OS-includes here
#include <dirent.h>
# include <sys/types.h>
# include <sys/mman.h>
# include <errno.h>
......
......@@ -617,7 +617,7 @@ static void post_class_load_event(EventClassLoad &event,
(ClassLoaderData*)NULL);
event.commit();
}
#endif // INCLUDE_JFR
#endif
}
Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
......@@ -1337,10 +1337,10 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
if (!k.is_null()) {
k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
}
#if INCLUDE_JFR
else {
if (k.is_null() && (class_name == jfr_event_handler_proxy)) {
assert(jfr_event_handler_proxy != NULL, "invariant");
if (class_name == jfr_event_handler_proxy) {
// EventHandlerProxy class is generated dynamically in
// EventHandlerProxyCreator::makeEventHandlerProxyClass
// method, so we generate a Java call from here.
......@@ -1353,8 +1353,7 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
k = JfrUpcalls::load_event_handler_proxy_class(THREAD);
assert(!k.is_null(), "invariant");
}
}
#endif // INCLUDE_JFR
#endif
return k;
} else {
......
......@@ -2023,7 +2023,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
compilable = ci_env.compilable();
if (ci_env.failing()) {
const char *failure_reason = ci_env.failure_reason();
const char* failure_reason = ci_env.failure_reason();
const char* retry_message = ci_env.retry_message();
task->set_failure_reason(failure_reason);
if (_compilation_log != NULL) {
......
......@@ -233,82 +233,6 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_in
}
}
// XXX
//static JfrStructG1EvacuationStatistics
//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
// JfrStructG1EvacuationStatistics s;
// s.set_gcId(gcid);
// s.set_allocated(summary.allocated() * HeapWordSize);
// s.set_wasted(summary.wasted() * HeapWordSize);
// s.set_used(summary.used() * HeapWordSize);
// s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
// s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
// s.set_regionsRefilled(summary.regions_filled());
// s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
// s.set_failureUsed(summary.failure_used() * HeapWordSize);
// s.set_failureWaste(summary.failure_waste() * HeapWordSize);
// return s;
//}
//
//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
// EventG1EvacuationYoungStatistics surv_evt;
// if (surv_evt.should_commit()) {
// surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
// surv_evt.commit();
// }
//}
//
//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
// EventG1EvacuationOldStatistics old_evt;
// if (old_evt.should_commit()) {
// old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
// old_evt.commit();
// }
//}
//
//void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
// size_t target_occupancy,
// size_t current_occupancy,
// size_t last_allocation_size,
// double last_allocation_duration,
// double last_marking_length) {
// EventG1BasicIHOP evt;
// if (evt.should_commit()) {
// evt.set_gcId(_shared_gc_info.gc_id().id());
// evt.set_threshold(threshold);
// evt.set_targetOccupancy(target_occupancy);
// evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
// evt.set_currentOccupancy(current_occupancy);
// evt.set_recentMutatorAllocationSize(last_allocation_size);
// evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
// evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
// evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
// evt.commit();
// }
//}
//
//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
// size_t internal_target_occupancy,
// size_t current_occupancy,
// size_t additional_buffer_size,
// double predicted_allocation_rate,
// double predicted_marking_length,
// bool prediction_active) {
// EventG1AdaptiveIHOP evt;
// if (evt.should_commit()) {
// evt.set_gcId(_shared_gc_info.gc_id().id());
// evt.set_threshold(threshold);
// evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
// evt.set_ihopTargetOccupancy(internal_target_occupancy);
// evt.set_currentOccupancy(current_occupancy);
// evt.set_additionalBufferSize(additional_buffer_size);
// evt.set_predictedAllocationRate(predicted_allocation_rate);
// evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
// evt.set_predictionActive(prediction_active);
// evt.commit();
// }
//}
#endif // INCLUDE_ALL_GCS
static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
......
......@@ -201,32 +201,6 @@ class SWNodeInfo VALUE_OBJ_CLASS_SPEC {
static const SWNodeInfo initial;
};
// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
//------------------------------OrderedPair---------------------------
// Ordered pair of Node*.
class OrderedPair VALUE_OBJ_CLASS_SPEC {
protected:
Node* _p1;
Node* _p2;
public:
OrderedPair() : _p1(NULL), _p2(NULL) {}
OrderedPair(Node* p1, Node* p2) {
if (p1->_idx < p2->_idx) {
_p1 = p1; _p2 = p2;
} else {
_p1 = p2; _p2 = p1;
}
}
bool operator==(const OrderedPair &rhs) {
return _p1 == rhs._p1 && _p2 == rhs._p2;
}
void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); }
static const OrderedPair initial;
};
// -----------------------------SuperWord---------------------------------
// Transforms scalar operations into packed (superword) operations.
class SuperWord : public ResourceObj {
......@@ -450,6 +424,7 @@ class SuperWord : public ResourceObj {
};
//------------------------------SWPointer---------------------------
// Information about an address for dependence checking and vector alignment
class SWPointer VALUE_OBJ_CLASS_SPEC {
......@@ -531,4 +506,29 @@ class SWPointer VALUE_OBJ_CLASS_SPEC {
void print();
};
//------------------------------OrderedPair---------------------------
// Ordered pair of Node*.
class OrderedPair VALUE_OBJ_CLASS_SPEC {
protected:
Node* _p1;
Node* _p2;
public:
OrderedPair() : _p1(NULL), _p2(NULL) {}
OrderedPair(Node* p1, Node* p2) {
if (p1->_idx < p2->_idx) {
_p1 = p1; _p2 = p2;
} else {
_p1 = p2; _p2 = p1;
}
}
bool operator==(const OrderedPair &rhs) {
return _p1 == rhs._p1 && _p2 == rhs._p2;
}
void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); }
static const OrderedPair initial;
};
#endif // SHARE_VM_OPTO_SUPERWORD_HPP
......@@ -256,10 +256,12 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
}
}
#if INCLUDE_JFR
// If requested, return information on which thread held the bias
if (biased_locker != NULL) {
*biased_locker = biased_thread;
}
#endif // INCLUDE_JFR
return BiasedLocking::BIAS_REVOKED;
}
......@@ -497,11 +499,15 @@ public:
if (TraceBiasedLocking) {
tty->print_cr("Revoking bias with potentially per-thread safepoint:");
}
JavaThread* biased_locker = NULL;
_status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
#if INCLUDE_JFR
if (biased_locker != NULL) {
_biased_locker_id = JFR_THREAD_ID(biased_locker);
}
#endif // INCLUDE_JFR
clean_up_cached_monitor_info();
return;
} else {
......@@ -516,9 +522,11 @@ public:
return _status_code;
}
#if INCLUDE_JFR
traceid biased_locker() const {
return _biased_locker_id;
}
#endif // INCLUDE_JFR
};
......
......@@ -284,7 +284,7 @@ void mutex_init() {
def(CompileThread_lock , Monitor, nonleaf+5, false );
def(PeriodicTask_lock , Monitor, nonleaf+5, true);
#ifdef INCLUDE_JFR
#if INCLUDE_JFR
def(JfrMsg_lock , Monitor, leaf, true);
def(JfrBuffer_lock , Mutex, leaf, true);
def(JfrThreadGroups_lock , Mutex, leaf, true);
......
......@@ -142,7 +142,7 @@ extern Mutex* Management_lock; // a lock used to serialize JVM
extern Monitor* Service_lock; // a lock used for service thread operation
extern Monitor* PeriodicTask_lock; // protects the periodic task structure
#ifdef INCLUDE_JFR
#if INCLUDE_JFR
extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table
extern Monitor* JfrMsg_lock; // protects JFR messaging
extern Mutex* JfrBuffer_lock; // protects JFR buffer operations
......
......@@ -1185,8 +1185,6 @@ static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
assert(event->should_commit(), "invariant");
event->set_monitorClass(obj->klass());
event->set_address((uintptr_t)(void*)obj);
// XXX no such counters. implement?
// event->set_cause((u1)cause);
event->commit();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册