提交 94595835 编写于 作者: K kamg

6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent

Summary: Make JvmtiGCMark safe to run non-safepoint and instrument CMS
Reviewed-by: ysr, dcubed
上级 adaa3c1f
......@@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState);
ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant");
......@@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
}
rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled");
// JVMTI object tagging is based on JNI weak refs. If any of these
// refs were cleared then JVMTI needs to update its maps and
// maybe post ObjectFrees to agents.
JvmtiExport::cms_ref_processing_epilogue();
}
#ifndef PRODUCT
......@@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
switch (op) {
case CMS_op_checkpointRootsInitial: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsInitial(true); // asynch
if (PrintGC) {
_cmsGen->printOccupancy("initial-mark");
......@@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
break;
}
case CMS_op_checkpointRootsFinal: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous
......
......@@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
......@@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return;
}
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)");
......
......@@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false;
}
DTraceGCProbeMarker gc_probe_marker(true /* full */);
SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm;
if (PrintHeapAtGC) {
......@@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false;
}
DTraceGCProbeMarker gc_probe_marker(false /* full */);
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
if (PrintHeapAtGC) {
......
......@@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
}
void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded,
......@@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() {
}
void VM_G1CollectFull::doit() {
JvmtiGCFullMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause);
g1h->do_full_collection(false /* clear_all_soft_refs */);
......@@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
}
void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
......
......@@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
}
void VM_ParallelGCFailedAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
notify_gc_begin(false);
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
......@@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end();
}
VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
......@@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s
}
void VM_ParallelGCFailedPermanentAllocation::doit() {
JvmtiGCFullMarker jgcm;
notify_gc_begin(true);
SvcGCMarker sgcm(SvcGCMarker::FULL);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
......@@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end();
}
// Only used for System.gc() calls
......@@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
}
void VM_ParallelGCSystemGC::doit() {
JvmtiGCFullMarker jgcm;
notify_gc_begin(true);
SvcGCMarker sgcm(SvcGCMarker::FULL);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
......@@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() {
} else {
heap->invoke_full_gc(false);
}
notify_gc_end();
}
......@@ -31,7 +31,6 @@
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
......@@ -40,6 +39,7 @@
#ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end);
......@@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() {
void VM_GenCollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
notify_gc_begin(false);
SvcGCMarker sgcm(SvcGCMarker::MINOR);
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
......@@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end();
}
void VM_GenCollectFull::doit() {
JvmtiGCFullMarker jgcm;
notify_gc_begin(true);
SvcGCMarker sgcm(SvcGCMarker::FULL);
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
}
void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
notify_gc_begin(true);
SvcGCMarker sgcm(SvcGCMarker::FULL);
SharedHeap* heap = (SharedHeap*)Universe::heap();
GCCauseSetter gccs(heap, _gc_cause);
switch (heap->kind()) {
......@@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end();
}
......@@ -30,6 +30,7 @@
#include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/vm_operations.hpp"
#include "prims/jvmtiExport.hpp"
// The following class hierarchy represents
// a set of operations (VM_Operation) related to GC.
......@@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; }
};
class DTraceGCProbeMarker : public StackObj {
public:
DTraceGCProbeMarker(bool full) {
VM_GC_Operation::notify_gc_begin(full);
class SvcGCMarker : public StackObj {
private:
JvmtiGCMarker _jgcm;
public:
typedef enum { MINOR, FULL, OTHER } reason_type;
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
}
~DTraceGCProbeMarker() {
~SvcGCMarker() {
VM_GC_Operation::notify_gc_end();
}
};
......
......@@ -13048,8 +13048,8 @@ myInit() {
<event label="Garbage Collection Start"
id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
<description>
A Garbage Collection Start event is sent when a full cycle
garbage collection begins.
A Garbage Collection Start event is sent when a
garbage collection pause begins.
Only stop-the-world collections are reported--that is, collections during
which all threads cease to modify the state of the Java virtual machine.
This means that some collectors will never generate these events.
......@@ -13075,8 +13075,8 @@ myInit() {
<event label="Garbage Collection Finish"
id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
<description>
A Garbage Collection Finish event is sent when a full
garbage collection cycle ends.
A Garbage Collection Finish event is sent when a
garbage collection pause ends.
This event is sent while the VM is still stopped, thus
the event handler must not use JNI functions and
must not use <jvmti/> functions except those which
......
......@@ -2358,15 +2358,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
}
#endif // SERVICES_KERNEL
// CMS has completed referencing processing so may need to update
// tag maps.
void JvmtiExport::cms_ref_processing_epilogue() {
if (JvmtiEnv::environments_might_exist()) {
JvmtiTagMap::cms_ref_processing_epilogue();
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
// Setup current current thread for event collection.
......@@ -2536,36 +2527,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() {
}
};
JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) {
assert(Thread::current()->is_VM_thread(), "wrong thread");
JvmtiGCMarker::JvmtiGCMarker() {
// if there aren't any JVMTI environments then nothing to do
if (!JvmtiEnv::environments_might_exist()) {
return;
}
if (ForceFullGCJVMTIEpilogues) {
// force 'Full GC' was done semantics for JVMTI GC epilogues
_full = true;
}
// GarbageCollectionStart event posted from VM thread - okay because
// JVMTI is clear that the "world is stopped" and callback shouldn't
// try to call into the VM.
if (JvmtiExport::should_post_garbage_collection_start()) {
JvmtiExport::post_garbage_collection_start();
}
// if "full" is false it probably means this is a scavenge of the young
// generation. However it could turn out that a "full" GC is required
// so we record the number of collections so that it can be checked in
// the destructor.
if (!_full) {
_invocation_count = Universe::heap()->total_full_collections();
if (SafepointSynchronize::is_at_safepoint()) {
// Do clean up tasks that need to be done at a safepoint
JvmtiEnvBase::check_for_periodic_clean_up();
}
// Do clean up tasks that need to be done at a safepoint
JvmtiEnvBase::check_for_periodic_clean_up();
}
JvmtiGCMarker::~JvmtiGCMarker() {
......@@ -2578,21 +2553,5 @@ JvmtiGCMarker::~JvmtiGCMarker() {
if (JvmtiExport::should_post_garbage_collection_finish()) {
JvmtiExport::post_garbage_collection_finish();
}
// we might have initially started out doing a scavenge of the young
// generation but could have ended up doing a "full" GC - check the
// GC count to see.
if (!_full) {
_full = (_invocation_count != Universe::heap()->total_full_collections());
}
// Full collection probably means the perm generation has been GC'ed
// so we clear the breakpoint cache.
if (_full) {
JvmtiCurrentBreakpoints::gc_epilogue();
}
// Notify heap/object tagging support
JvmtiTagMap::gc_epilogue(_full);
}
#endif // JVMTI_KERNEL
......@@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic {
// SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr);
// call after CMS has completed referencing processing
static void cms_ref_processing_epilogue() KERNEL_RETURN;
};
// Support class used by JvmtiDynamicCodeEventCollector and others. It
......@@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj {
// Base class for reporting GC events to JVMTI.
class JvmtiGCMarker : public StackObj {
private:
bool _full; // marks a "full" GC
unsigned int _invocation_count; // GC invocation count
protected:
JvmtiGCMarker(bool full) KERNEL_RETURN; // protected
~JvmtiGCMarker() KERNEL_RETURN; // protected
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a stop-the-world GC for failed allocation.
//
// Usage :-
//
// void VM_GenCollectForAllocation::doit() {
// JvmtiGCForAllocationMarker jgcm;
// :
// }
//
// If jvmti is not enabled the constructor and destructor is essentially
// a no-op (no overhead).
//
class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
public:
JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) {
}
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a "full" stop-the-world GC. This class differs
// from JvmtiGCForAllocationMarker in that this class assumes that a
// "full" GC will happen.
//
// Usage :-
//
// void VM_GenCollectFull::doit() {
// JvmtiGCFullMarker jgcm;
// :
// }
//
class JvmtiGCFullMarker : public JvmtiGCMarker {
public:
JvmtiGCFullMarker() : JvmtiGCMarker(true) {
}
JvmtiGCMarker() KERNEL_RETURN;
~JvmtiGCMarker() KERNEL_RETURN;
};
// JvmtiHideSingleStepping is a helper class for hiding
// internal single step events.
class JvmtiHideSingleStepping : public StackObj {
......
......@@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) {
for (int i=0; i<len; i++) {
GrowableElement *e = _elements->at(i);
e->oops_do(f);
}
}
void GrowableCache::gc_epilogue() {
int len = _elements->length();
// recompute the new cache value after GC
for (int i=0; i<len; i++) {
_cache[i] = _elements->at(i)->getCacheValue();
_cache[i] = e->getCacheValue();
}
}
......@@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) {
_bps.oops_do(f);
}
void JvmtiBreakpoints::gc_epilogue() {
_bps.gc_epilogue();
}
void JvmtiBreakpoints::print() {
#ifndef PRODUCT
ResourceMark rm;
......@@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
}
}
void JvmtiCurrentBreakpoints::gc_epilogue() {
if (_jvmti_breakpoints != NULL) {
_jvmti_breakpoints->gc_epilogue();
}
}
///////////////////////////////////////////////////////////////
//
// class VM_GetOrSetLocal
......
......@@ -117,7 +117,6 @@ public:
void clear();
// apply f to every element and update the cache
void oops_do(OopClosure* f);
void gc_epilogue();
};
......@@ -149,7 +148,6 @@ public:
void remove (int index) { _cache.remove(index); }
void clear() { _cache.clear(); }
void oops_do(OopClosure* f) { _cache.oops_do(f); }
void gc_epilogue() { _cache.gc_epilogue(); }
};
......@@ -278,7 +276,6 @@ public:
int length();
void oops_do(OopClosure* f);
void gc_epilogue();
void print();
int set(JvmtiBreakpoint& bp);
......@@ -328,7 +325,6 @@ public:
static inline bool is_breakpoint(address bcp);
static void oops_do(OopClosure* f);
static void gc_epilogue();
};
// quickly test whether the bcp matches a cached breakpoint in the list
......
......@@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj {
private:
enum{
n_hashmaps = 2, // encapsulates 2 hashmaps
max_free_entries = 4096 // maximum number of free entries per env
max_free_entries = 4096 // maximum number of free entries per env
};
// memory region for young generation
static MemRegion _young_gen;
static void get_young_generation();
JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map
JvmtiTagHashmap* _hashmap[n_hashmaps]; // the hashmaps
JvmtiTagHashmap* _hashmap; // the hashmap
JvmtiTagHashmapEntry* _free_entries; // free list for this environment
int _free_entries_count; // number of entries on the free list
......@@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj {
inline Mutex* lock() { return &_lock; }
inline JvmtiEnv* env() const { return _env; }
// rehash tags maps for generation start to end
void rehash(int start, int end);
// indicates if the object is in the young generation
static bool is_in_young(oop o);
void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
// iterate over all entries in this tag map
void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
......@@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj {
// indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); }
// return the appropriate hashmap for a given object
JvmtiTagHashmap* hashmap_for(oop o);
JvmtiTagHashmap* hashmap() { return _hashmap; }
// create/destroy entries
JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag);
JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
void destroy_entry(JvmtiTagHashmapEntry* entry);
// returns true if the hashmaps are empty
......@@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj {
jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr);
// call post-GC to rehash the tag maps.
static void gc_epilogue(bool full);
// call after referencing processing has completed (CMS)
static void cms_ref_processing_epilogue();
static void weak_oops_do(
BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
};
#endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
......@@ -1198,9 +1198,6 @@ class CommandLineFlags {
product(ccstr, TraceJVMTI, NULL, \
"Trace flags for JVMTI functions and events") \
\
product(bool, ForceFullGCJVMTIEpilogues, false, \
"Force 'Full GC' was done semantics for JVMTI GC epilogues") \
\
/* This option can change an EMCP method into an obsolete method. */ \
/* This can affect tests that except specific methods to be EMCP. */ \
/* This option should be used with caution. */ \
......
......@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp"
#ifdef TARGET_OS_FAMILY_linux
......@@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
break;
}
}
/*
* JvmtiTagMap may also contain weak oops. The iteration of it is placed
* here so that we don't need to add it to each of the collectors.
*/
JvmtiTagMap::weak_oops_do(is_alive, f);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册