提交 8b06765f 编写于 作者: K kamg

6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent

Summary: Make JvmtiGCMark safe to run non-safepoint and instrument CMS
Reviewed-by: ysr, dcubed
上级 ec7cdf05
...@@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) { ...@@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
assert(_collectorState == InitialMarking, "Wrong collector state"); assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing(); check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState); TraceCMSMemoryManagerStats tms(_collectorState);
ReferenceProcessor* rp = ref_processor(); ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear(); SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant"); assert(_restart_addr == NULL, "Control point invariant");
...@@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { ...@@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled"); assert(!rp->discovery_enabled(), "should have been disabled");
// JVMTI object tagging is based on JNI weak refs. If any of these
// refs were cleared then JVMTI needs to update its maps and
// maybe post ObjectFrees to agents.
JvmtiExport::cms_ref_processing_epilogue();
} }
#ifndef PRODUCT #ifndef PRODUCT
...@@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { ...@@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
switch (op) { switch (op) {
case CMS_op_checkpointRootsInitial: { case CMS_op_checkpointRootsInitial: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsInitial(true); // asynch checkpointRootsInitial(true); // asynch
if (PrintGC) { if (PrintGC) {
_cmsGen->printOccupancy("initial-mark"); _cmsGen->printOccupancy("initial-mark");
...@@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { ...@@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
break; break;
} }
case CMS_op_checkpointRootsFinal: { case CMS_op_checkpointRootsFinal: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsFinal(true, // asynch checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous false); // !init_mark_was_synchronous
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
...@@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { ...@@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return; return;
} }
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)"); gclog_or_tty->print(" VerifyDuringGC:(before)");
......
...@@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(true /* full */); SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
...@@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(false /* full */); SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
......
...@@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation( ...@@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
} }
void VM_G1CollectForAllocation::doit() { void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded, assert(_result == NULL || _pause_succeeded,
...@@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() { ...@@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() {
} }
void VM_G1CollectFull::doit() { void VM_G1CollectFull::doit() {
JvmtiGCFullMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_full_collection(false /* clear_all_soft_refs */); g1h->do_full_collection(false /* clear_all_soft_refs */);
...@@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause( ...@@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
} }
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
......
...@@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size, ...@@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
} }
void VM_ParallelGCFailedAllocation::doit() { void VM_ParallelGCFailedAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
...@@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() { ...@@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size, VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
...@@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s ...@@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s
} }
void VM_ParallelGCFailedPermanentAllocation::doit() { void VM_ParallelGCFailedPermanentAllocation::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
...@@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() { ...@@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
// Only used for System.gc() calls // Only used for System.gc() calls
...@@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count, ...@@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
} }
void VM_ParallelGCSystemGC::doit() { void VM_ParallelGCSystemGC::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
...@@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() { ...@@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() {
} else { } else {
heap->invoke_full_gc(false); heap->invoke_full_gc(false);
} }
notify_gc_end();
} }
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
...@@ -40,6 +39,7 @@ ...@@ -40,6 +39,7 @@
#ifndef SERIALGC #ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif #endif
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end); HS_DTRACE_PROBE_DECL(hotspot, gc__end);
...@@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() { ...@@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() {
void VM_GenCollectForAllocation::doit() { void VM_GenCollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
...@@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() { ...@@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
void VM_GenCollectFull::doit() { void VM_GenCollectFull::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
} }
void VM_GenCollectForPermanentAllocation::doit() { void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
SharedHeap* heap = (SharedHeap*)Universe::heap(); SharedHeap* heap = (SharedHeap*)Universe::heap();
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
switch (heap->kind()) { switch (heap->kind()) {
...@@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() { ...@@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#include "prims/jvmtiExport.hpp"
// The following class hierarchy represents // The following class hierarchy represents
// a set of operations (VM_Operation) related to GC. // a set of operations (VM_Operation) related to GC.
...@@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation { ...@@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; } HeapWord* result() const { return _res; }
}; };
class DTraceGCProbeMarker : public StackObj { class SvcGCMarker : public StackObj {
public: private:
DTraceGCProbeMarker(bool full) { JvmtiGCMarker _jgcm;
VM_GC_Operation::notify_gc_begin(full); public:
typedef enum { MINOR, FULL, OTHER } reason_type;
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
} }
~DTraceGCProbeMarker() { ~SvcGCMarker() {
VM_GC_Operation::notify_gc_end(); VM_GC_Operation::notify_gc_end();
} }
}; };
......
...@@ -13048,8 +13048,8 @@ myInit() { ...@@ -13048,8 +13048,8 @@ myInit() {
<event label="Garbage Collection Start" <event label="Garbage Collection Start"
id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81"> id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
<description> <description>
A Garbage Collection Start event is sent when a full cycle A Garbage Collection Start event is sent when a
garbage collection begins. garbage collection pause begins.
Only stop-the-world collections are reported--that is, collections during Only stop-the-world collections are reported--that is, collections during
which all threads cease to modify the state of the Java virtual machine. which all threads cease to modify the state of the Java virtual machine.
This means that some collectors will never generate these events. This means that some collectors will never generate these events.
...@@ -13075,8 +13075,8 @@ myInit() { ...@@ -13075,8 +13075,8 @@ myInit() {
<event label="Garbage Collection Finish" <event label="Garbage Collection Finish"
id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82"> id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
<description> <description>
A Garbage Collection Finish event is sent when a full A Garbage Collection Finish event is sent when a
garbage collection cycle ends. garbage collection pause ends.
This event is sent while the VM is still stopped, thus This event is sent while the VM is still stopped, thus
the event handler must not use JNI functions and the event handler must not use JNI functions and
must not use <jvmti/> functions except those which must not use <jvmti/> functions except those which
......
...@@ -2358,15 +2358,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) { ...@@ -2358,15 +2358,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
} }
#endif // SERVICES_KERNEL #endif // SERVICES_KERNEL
// CMS has completed referencing processing so may need to update
// tag maps.
void JvmtiExport::cms_ref_processing_epilogue() {
if (JvmtiEnv::environments_might_exist()) {
JvmtiTagMap::cms_ref_processing_epilogue();
}
}
//////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////
// Setup current current thread for event collection. // Setup current current thread for event collection.
...@@ -2536,36 +2527,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() { ...@@ -2536,36 +2527,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() {
} }
}; };
JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) { JvmtiGCMarker::JvmtiGCMarker() {
assert(Thread::current()->is_VM_thread(), "wrong thread");
// if there aren't any JVMTI environments then nothing to do // if there aren't any JVMTI environments then nothing to do
if (!JvmtiEnv::environments_might_exist()) { if (!JvmtiEnv::environments_might_exist()) {
return; return;
} }
if (ForceFullGCJVMTIEpilogues) {
// force 'Full GC' was done semantics for JVMTI GC epilogues
_full = true;
}
// GarbageCollectionStart event posted from VM thread - okay because
// JVMTI is clear that the "world is stopped" and callback shouldn't
// try to call into the VM.
if (JvmtiExport::should_post_garbage_collection_start()) { if (JvmtiExport::should_post_garbage_collection_start()) {
JvmtiExport::post_garbage_collection_start(); JvmtiExport::post_garbage_collection_start();
} }
// if "full" is false it probably means this is a scavenge of the young if (SafepointSynchronize::is_at_safepoint()) {
// generation. However it could turn out that a "full" GC is required // Do clean up tasks that need to be done at a safepoint
// so we record the number of collections so that it can be checked in JvmtiEnvBase::check_for_periodic_clean_up();
// the destructor.
if (!_full) {
_invocation_count = Universe::heap()->total_full_collections();
} }
// Do clean up tasks that need to be done at a safepoint
JvmtiEnvBase::check_for_periodic_clean_up();
} }
JvmtiGCMarker::~JvmtiGCMarker() { JvmtiGCMarker::~JvmtiGCMarker() {
...@@ -2578,21 +2553,5 @@ JvmtiGCMarker::~JvmtiGCMarker() { ...@@ -2578,21 +2553,5 @@ JvmtiGCMarker::~JvmtiGCMarker() {
if (JvmtiExport::should_post_garbage_collection_finish()) { if (JvmtiExport::should_post_garbage_collection_finish()) {
JvmtiExport::post_garbage_collection_finish(); JvmtiExport::post_garbage_collection_finish();
} }
// we might have initially started out doing a scavenge of the young
// generation but could have ended up doing a "full" GC - check the
// GC count to see.
if (!_full) {
_full = (_invocation_count != Universe::heap()->total_full_collections());
}
// Full collection probably means the perm generation has been GC'ed
// so we clear the breakpoint cache.
if (_full) {
JvmtiCurrentBreakpoints::gc_epilogue();
}
// Notify heap/object tagging support
JvmtiTagMap::gc_epilogue(_full);
} }
#endif // JVMTI_KERNEL #endif // JVMTI_KERNEL
...@@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic { ...@@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic {
// SetNativeMethodPrefix support // SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr); static char** get_all_native_method_prefixes(int* count_ptr);
// call after CMS has completed referencing processing
static void cms_ref_processing_epilogue() KERNEL_RETURN;
}; };
// Support class used by JvmtiDynamicCodeEventCollector and others. It // Support class used by JvmtiDynamicCodeEventCollector and others. It
...@@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj { ...@@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj {
// Base class for reporting GC events to JVMTI. // Base class for reporting GC events to JVMTI.
class JvmtiGCMarker : public StackObj { class JvmtiGCMarker : public StackObj {
private:
bool _full; // marks a "full" GC
unsigned int _invocation_count; // GC invocation count
protected:
JvmtiGCMarker(bool full) KERNEL_RETURN; // protected
~JvmtiGCMarker() KERNEL_RETURN; // protected
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a stop-the-world GC for failed allocation.
//
// Usage :-
//
// void VM_GenCollectForAllocation::doit() {
// JvmtiGCForAllocationMarker jgcm;
// :
// }
//
// If jvmti is not enabled the constructor and destructor is essentially
// a no-op (no overhead).
//
class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
public: public:
JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) { JvmtiGCMarker() KERNEL_RETURN;
} ~JvmtiGCMarker() KERNEL_RETURN;
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a "full" stop-the-world GC. This class differs
// from JvmtiGCForAllocationMarker in that this class assumes that a
// "full" GC will happen.
//
// Usage :-
//
// void VM_GenCollectFull::doit() {
// JvmtiGCFullMarker jgcm;
// :
// }
//
class JvmtiGCFullMarker : public JvmtiGCMarker {
public:
JvmtiGCFullMarker() : JvmtiGCMarker(true) {
}
}; };
// JvmtiHideSingleStepping is a helper class for hiding // JvmtiHideSingleStepping is a helper class for hiding
// internal single step events. // internal single step events.
class JvmtiHideSingleStepping : public StackObj { class JvmtiHideSingleStepping : public StackObj {
......
...@@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) { ...@@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) {
for (int i=0; i<len; i++) { for (int i=0; i<len; i++) {
GrowableElement *e = _elements->at(i); GrowableElement *e = _elements->at(i);
e->oops_do(f); e->oops_do(f);
} _cache[i] = e->getCacheValue();
}
void GrowableCache::gc_epilogue() {
int len = _elements->length();
// recompute the new cache value after GC
for (int i=0; i<len; i++) {
_cache[i] = _elements->at(i)->getCacheValue();
} }
} }
...@@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) { ...@@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) {
_bps.oops_do(f); _bps.oops_do(f);
} }
void JvmtiBreakpoints::gc_epilogue() {
_bps.gc_epilogue();
}
void JvmtiBreakpoints::print() { void JvmtiBreakpoints::print() {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
...@@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) { ...@@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
} }
} }
void JvmtiCurrentBreakpoints::gc_epilogue() {
if (_jvmti_breakpoints != NULL) {
_jvmti_breakpoints->gc_epilogue();
}
}
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// //
// class VM_GetOrSetLocal // class VM_GetOrSetLocal
......
...@@ -117,7 +117,6 @@ public: ...@@ -117,7 +117,6 @@ public:
void clear(); void clear();
// apply f to every element and update the cache // apply f to every element and update the cache
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
}; };
...@@ -149,7 +148,6 @@ public: ...@@ -149,7 +148,6 @@ public:
void remove (int index) { _cache.remove(index); } void remove (int index) { _cache.remove(index); }
void clear() { _cache.clear(); } void clear() { _cache.clear(); }
void oops_do(OopClosure* f) { _cache.oops_do(f); } void oops_do(OopClosure* f) { _cache.oops_do(f); }
void gc_epilogue() { _cache.gc_epilogue(); }
}; };
...@@ -278,7 +276,6 @@ public: ...@@ -278,7 +276,6 @@ public:
int length(); int length();
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
void print(); void print();
int set(JvmtiBreakpoint& bp); int set(JvmtiBreakpoint& bp);
...@@ -328,7 +325,6 @@ public: ...@@ -328,7 +325,6 @@ public:
static inline bool is_breakpoint(address bcp); static inline bool is_breakpoint(address bcp);
static void oops_do(OopClosure* f); static void oops_do(OopClosure* f);
static void gc_epilogue();
}; };
// quickly test whether the bcp matches a cached breakpoint in the list // quickly test whether the bcp matches a cached breakpoint in the list
......
此差异已折叠。
...@@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj { ...@@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj {
private: private:
enum{ enum{
n_hashmaps = 2, // encapsulates 2 hashmaps max_free_entries = 4096 // maximum number of free entries per env
max_free_entries = 4096 // maximum number of free entries per env
}; };
// memory region for young generation
static MemRegion _young_gen;
static void get_young_generation();
JvmtiEnv* _env; // the jvmti environment JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map Mutex _lock; // lock for this tag map
JvmtiTagHashmap* _hashmap[n_hashmaps]; // the hashmaps JvmtiTagHashmap* _hashmap; // the hashmap
JvmtiTagHashmapEntry* _free_entries; // free list for this environment JvmtiTagHashmapEntry* _free_entries; // free list for this environment
int _free_entries_count; // number of entries on the free list int _free_entries_count; // number of entries on the free list
...@@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj { ...@@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj {
inline Mutex* lock() { return &_lock; } inline Mutex* lock() { return &_lock; }
inline JvmtiEnv* env() const { return _env; } inline JvmtiEnv* env() const { return _env; }
// rehash tags maps for generation start to end void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
void rehash(int start, int end);
// indicates if the object is in the young generation
static bool is_in_young(oop o);
// iterate over all entries in this tag map // iterate over all entries in this tag map
void entry_iterate(JvmtiTagHashmapEntryClosure* closure); void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
...@@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj { ...@@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj {
// indicates if this tag map is locked // indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); } bool is_locked() { return lock()->is_locked(); }
// return the appropriate hashmap for a given object JvmtiTagHashmap* hashmap() { return _hashmap; }
JvmtiTagHashmap* hashmap_for(oop o);
// create/destroy entries // create/destroy entries
JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag); JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
void destroy_entry(JvmtiTagHashmapEntry* entry); void destroy_entry(JvmtiTagHashmapEntry* entry);
// returns true if the hashmaps are empty // returns true if the hashmaps are empty
...@@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj { ...@@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj {
jint* count_ptr, jobject** object_result_ptr, jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr); jlong** tag_result_ptr);
// call post-GC to rehash the tag maps. static void weak_oops_do(
static void gc_epilogue(bool full); BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
// call after referencing processing has completed (CMS)
static void cms_ref_processing_epilogue();
}; };
#endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
...@@ -1198,9 +1198,6 @@ class CommandLineFlags { ...@@ -1198,9 +1198,6 @@ class CommandLineFlags {
product(ccstr, TraceJVMTI, NULL, \ product(ccstr, TraceJVMTI, NULL, \
"Trace flags for JVMTI functions and events") \ "Trace flags for JVMTI functions and events") \
\ \
product(bool, ForceFullGCJVMTIEpilogues, false, \
"Force 'Full GC' was done semantics for JVMTI GC epilogues") \
\
/* This option can change an EMCP method into an obsolete method. */ \ /* This option can change an EMCP method into an obsolete method. */ \
/* This can affect tests that except specific methods to be EMCP. */ \ /* This can affect tests that except specific methods to be EMCP. */ \
/* This option should be used with caution. */ \ /* This option should be used with caution. */ \
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#ifdef TARGET_OS_FAMILY_linux #ifdef TARGET_OS_FAMILY_linux
...@@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive, ...@@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
break; break;
} }
} }
/*
* JvmtiTagMap may also contain weak oops. The iteration of it is placed
* here so that we don't need to add it to each of the collectors.
*/
JvmtiTagMap::weak_oops_do(is_alive, f);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册