diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index 36a070a18265222afe7706e5d7d35a822c7deb63..2ca6c6d0dd60569156b8161880f6d32f710523bb 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState); + ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); @@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { } rp->verify_no_references_recorded(); assert(!rp->discovery_enabled(), "should have been disabled"); - - // JVMTI object tagging is based on JNI weak refs. If any of these - // refs were cleared then JVMTI needs to update its maps and - // maybe post ObjectFrees to agents. - JvmtiExport::cms_ref_processing_epilogue(); } #ifndef PRODUCT @@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { switch (op) { case CMS_op_checkpointRootsInitial: { + SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsInitial(true); // asynch if (PrintGC) { _cmsGen->printOccupancy("initial-mark"); @@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { break; } case CMS_op_checkpointRootsFinal: { + SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsFinal(true, // asynch false, // !clear_all_soft_refs false); // !init_mark_was_synchronous diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index d1989d4a24ef6f284e61359909f104fb79a337b0..994cc683ace89cac0c2837fde44cd767b3824f9e 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -31,6 +31,7 @@ #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/shared/vmGCOperations.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" @@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { return; } + SvcGCMarker sgcm(SvcGCMarker::OTHER); + if (VerifyDuringGC) { HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(before)"); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 030aecade7725d747a759b270c31a6b92d059a8d..7bc64ce3a259c4ed4e949b61f23d9ac7ee0ec85a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, return false; } - DTraceGCProbeMarker gc_probe_marker(true /* full */); + SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; if (PrintHeapAtGC) { @@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { return false; } - DTraceGCProbeMarker gc_probe_marker(false /* full */); + SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; if (PrintHeapAtGC) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp index 1ceeead58ffece9d0c01fa59c9b442659630cf14..aaf44b02d3f223689ca410d013f83ce854cec09a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation( } void VM_G1CollectForAllocation::doit() { - JvmtiGCForAllocationMarker jgcm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); assert(_result == NULL || _pause_succeeded, @@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() { } void VM_G1CollectFull::doit() { - JvmtiGCFullMarker jgcm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); GCCauseSetter x(g1h, _gc_cause); g1h->do_full_collection(false /* clear_all_soft_refs */); @@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause( } void VM_G1IncCollectionPause::doit() { - JvmtiGCForAllocationMarker jgcm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert(!_should_initiate_conc_mark || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp index 453c791621bc22ac11f934b843cbd100a2927811..0f122a97b577311ae78f2654fb592380ea9639c2 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp @@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size, } void VM_ParallelGCFailedAllocation::doit() { - JvmtiGCForAllocationMarker jgcm; - notify_gc_begin(false); + SvcGCMarker sgcm(SvcGCMarker::MINOR); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); @@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() { if (_result == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } - - notify_gc_end(); } VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size, @@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s } void VM_ParallelGCFailedPermanentAllocation::doit() { - JvmtiGCFullMarker jgcm; - notify_gc_begin(true); + SvcGCMarker sgcm(SvcGCMarker::FULL); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); @@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() { if (_result == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } - notify_gc_end(); } // Only used for System.gc() calls @@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count, } void VM_ParallelGCSystemGC::doit() { - JvmtiGCFullMarker jgcm; - notify_gc_begin(true); + SvcGCMarker sgcm(SvcGCMarker::FULL); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, @@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() { } else { heap->invoke_full_gc(false); } - notify_gc_end(); } diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp index 350d6dbacb62866d535a35ca3fbb86d7edb06311..5799550d1140b60e4d104372a83064c741311ac6 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -31,7 +31,6 @@ #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceRefKlass.hpp" -#include "prims/jvmtiExport.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" @@ -40,6 +39,7 @@ #ifndef SERIALGC #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #endif + HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); HS_DTRACE_PROBE_DECL(hotspot, gc__end); @@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() { void VM_GenCollectForAllocation::doit() { - JvmtiGCForAllocationMarker jgcm; - notify_gc_begin(false); + SvcGCMarker sgcm(SvcGCMarker::MINOR); GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, _gc_cause); @@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() { if (_res == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } - notify_gc_end(); } void VM_GenCollectFull::doit() { - JvmtiGCFullMarker jgcm; - notify_gc_begin(true); + SvcGCMarker sgcm(SvcGCMarker::FULL); GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, _gc_cause); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); - notify_gc_end(); } void VM_GenCollectForPermanentAllocation::doit() { - JvmtiGCForAllocationMarker jgcm; - notify_gc_begin(true); + SvcGCMarker sgcm(SvcGCMarker::FULL); + SharedHeap* heap = (SharedHeap*)Universe::heap(); GCCauseSetter gccs(heap, _gc_cause); switch (heap->kind()) { @@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() { if (_res == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } - notify_gc_end(); } diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp index c7ee95f445bfb28d116876656023ab0f20ca86cb..1a828facadc953229c44427be8c21ce06e35e9ad 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp @@ -30,6 +30,7 @@ #include "runtime/jniHandles.hpp" #include "runtime/synchronizer.hpp" #include "runtime/vm_operations.hpp" +#include "prims/jvmtiExport.hpp" // The following class hierarchy represents // a set of operations (VM_Operation) related to GC. @@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation { HeapWord* result() const { return _res; } }; -class DTraceGCProbeMarker : public StackObj { -public: - DTraceGCProbeMarker(bool full) { - VM_GC_Operation::notify_gc_begin(full); +class SvcGCMarker : public StackObj { + private: + JvmtiGCMarker _jgcm; + public: + typedef enum { MINOR, FULL, OTHER } reason_type; + + SvcGCMarker(reason_type reason ) { + VM_GC_Operation::notify_gc_begin(reason == FULL); } - ~DTraceGCProbeMarker() { + ~SvcGCMarker() { VM_GC_Operation::notify_gc_end(); } }; diff --git a/hotspot/src/share/vm/prims/jvmti.xml b/hotspot/src/share/vm/prims/jvmti.xml index 9e24fb64b1c1dc6a40076281d42e16dc48358241..0d845050ac9c5dc98a638546ab8990a5a36bb782 100644 --- a/hotspot/src/share/vm/prims/jvmti.xml +++ b/hotspot/src/share/vm/prims/jvmti.xml @@ -13048,8 +13048,8 @@ myInit() { - A Garbage Collection Start event is sent when a full cycle - garbage collection begins. + A Garbage Collection Start event is sent when a + garbage collection pause begins. Only stop-the-world collections are reported--that is, collections during which all threads cease to modify the state of the Java virtual machine. This means that some collectors will never generate these events. @@ -13075,8 +13075,8 @@ myInit() { - A Garbage Collection Finish event is sent when a full - garbage collection cycle ends. + A Garbage Collection Finish event is sent when a + garbage collection pause ends. This event is sent while the VM is still stopped, thus the event handler must not use JNI functions and must not use functions except those which diff --git a/hotspot/src/share/vm/prims/jvmtiExport.cpp b/hotspot/src/share/vm/prims/jvmtiExport.cpp index ab5ced93ba460ebe9f56f00bafcd126313b123cf..6e87bbc8c901a342fd7b67727df23d11746e988f 100644 --- a/hotspot/src/share/vm/prims/jvmtiExport.cpp +++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp @@ -2358,15 +2358,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) { } #endif // SERVICES_KERNEL -// CMS has completed referencing processing so may need to update -// tag maps. -void JvmtiExport::cms_ref_processing_epilogue() { - if (JvmtiEnv::environments_might_exist()) { - JvmtiTagMap::cms_ref_processing_epilogue(); - } -} - - //////////////////////////////////////////////////////////////////////////////////////////////// // Setup current current thread for event collection. @@ -2536,36 +2527,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() { } }; -JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) { - assert(Thread::current()->is_VM_thread(), "wrong thread"); - +JvmtiGCMarker::JvmtiGCMarker() { // if there aren't any JVMTI environments then nothing to do if (!JvmtiEnv::environments_might_exist()) { return; } - if (ForceFullGCJVMTIEpilogues) { - // force 'Full GC' was done semantics for JVMTI GC epilogues - _full = true; - } - - // GarbageCollectionStart event posted from VM thread - okay because - // JVMTI is clear that the "world is stopped" and callback shouldn't - // try to call into the VM. if (JvmtiExport::should_post_garbage_collection_start()) { JvmtiExport::post_garbage_collection_start(); } - // if "full" is false it probably means this is a scavenge of the young - // generation. However it could turn out that a "full" GC is required - // so we record the number of collections so that it can be checked in - // the destructor. - if (!_full) { - _invocation_count = Universe::heap()->total_full_collections(); + if (SafepointSynchronize::is_at_safepoint()) { + // Do clean up tasks that need to be done at a safepoint + JvmtiEnvBase::check_for_periodic_clean_up(); } - - // Do clean up tasks that need to be done at a safepoint - JvmtiEnvBase::check_for_periodic_clean_up(); } JvmtiGCMarker::~JvmtiGCMarker() { @@ -2578,21 +2553,5 @@ JvmtiGCMarker::~JvmtiGCMarker() { if (JvmtiExport::should_post_garbage_collection_finish()) { JvmtiExport::post_garbage_collection_finish(); } - - // we might have initially started out doing a scavenge of the young - // generation but could have ended up doing a "full" GC - check the - // GC count to see. - if (!_full) { - _full = (_invocation_count != Universe::heap()->total_full_collections()); - } - - // Full collection probably means the perm generation has been GC'ed - // so we clear the breakpoint cache. - if (_full) { - JvmtiCurrentBreakpoints::gc_epilogue(); - } - - // Notify heap/object tagging support - JvmtiTagMap::gc_epilogue(_full); } #endif // JVMTI_KERNEL diff --git a/hotspot/src/share/vm/prims/jvmtiExport.hpp b/hotspot/src/share/vm/prims/jvmtiExport.hpp index 88a233831169396389d59b3ffedb64b17796574c..b46c9c126228e9f4c50205fbef6a59fb3bbbd6a4 100644 --- a/hotspot/src/share/vm/prims/jvmtiExport.hpp +++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp @@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic { // SetNativeMethodPrefix support static char** get_all_native_method_prefixes(int* count_ptr); - - // call after CMS has completed referencing processing - static void cms_ref_processing_epilogue() KERNEL_RETURN; }; // Support class used by JvmtiDynamicCodeEventCollector and others. It @@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj { // Base class for reporting GC events to JVMTI. class JvmtiGCMarker : public StackObj { - private: - bool _full; // marks a "full" GC - unsigned int _invocation_count; // GC invocation count - protected: - JvmtiGCMarker(bool full) KERNEL_RETURN; // protected - ~JvmtiGCMarker() KERNEL_RETURN; // protected -}; - - -// Support class used to report GC events to JVMTI. The class is stack -// allocated and should be placed in the doit() implementation of all -// vm operations that do a stop-the-world GC for failed allocation. -// -// Usage :- -// -// void VM_GenCollectForAllocation::doit() { -// JvmtiGCForAllocationMarker jgcm; -// : -// } -// -// If jvmti is not enabled the constructor and destructor is essentially -// a no-op (no overhead). -// -class JvmtiGCForAllocationMarker : public JvmtiGCMarker { public: - JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) { - } -}; - -// Support class used to report GC events to JVMTI. The class is stack -// allocated and should be placed in the doit() implementation of all -// vm operations that do a "full" stop-the-world GC. This class differs -// from JvmtiGCForAllocationMarker in that this class assumes that a -// "full" GC will happen. -// -// Usage :- -// -// void VM_GenCollectFull::doit() { -// JvmtiGCFullMarker jgcm; -// : -// } -// -class JvmtiGCFullMarker : public JvmtiGCMarker { - public: - JvmtiGCFullMarker() : JvmtiGCMarker(true) { - } + JvmtiGCMarker() KERNEL_RETURN; + ~JvmtiGCMarker() KERNEL_RETURN; }; - // JvmtiHideSingleStepping is a helper class for hiding // internal single step events. class JvmtiHideSingleStepping : public StackObj { diff --git a/hotspot/src/share/vm/prims/jvmtiImpl.cpp b/hotspot/src/share/vm/prims/jvmtiImpl.cpp index 90864b68abeee2fa2cf6d99e3e495781d9af2f6c..2e6a7df0f656c57e6f4c8ce75d0f2487279661d4 100644 --- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp +++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp @@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) { for (int i=0; iat(i); e->oops_do(f); - } -} - -void GrowableCache::gc_epilogue() { - int len = _elements->length(); - // recompute the new cache value after GC - for (int i=0; iat(i)->getCacheValue(); + _cache[i] = e->getCacheValue(); } } @@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) { _bps.oops_do(f); } -void JvmtiBreakpoints::gc_epilogue() { - _bps.gc_epilogue(); -} - void JvmtiBreakpoints::print() { #ifndef PRODUCT ResourceMark rm; @@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) { } } -void JvmtiCurrentBreakpoints::gc_epilogue() { - if (_jvmti_breakpoints != NULL) { - _jvmti_breakpoints->gc_epilogue(); - } -} - - /////////////////////////////////////////////////////////////// // // class VM_GetOrSetLocal diff --git a/hotspot/src/share/vm/prims/jvmtiImpl.hpp b/hotspot/src/share/vm/prims/jvmtiImpl.hpp index df28abf7d721bcd77bc3c8f0acebc3ea806e8aa4..023fe93874dd3f8350a5c71c81b333078664bd1f 100644 --- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp +++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp @@ -117,7 +117,6 @@ public: void clear(); // apply f to every element and update the cache void oops_do(OopClosure* f); - void gc_epilogue(); }; @@ -149,7 +148,6 @@ public: void remove (int index) { _cache.remove(index); } void clear() { _cache.clear(); } void oops_do(OopClosure* f) { _cache.oops_do(f); } - void gc_epilogue() { _cache.gc_epilogue(); } }; @@ -278,7 +276,6 @@ public: int length(); void oops_do(OopClosure* f); - void gc_epilogue(); void print(); int set(JvmtiBreakpoint& bp); @@ -328,7 +325,6 @@ public: static inline bool is_breakpoint(address bcp); static void oops_do(OopClosure* f); - static void gc_epilogue(); }; // quickly test whether the bcp matches a cached breakpoint in the list diff --git a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp index 085952af5655f0ce41aa6fca3ec9ff027557004d..bb556ca4886c14dc2a503ef9b10e77dfe5b123a7 100644 --- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp +++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp @@ -50,7 +50,7 @@ // JvmtiTagHashmapEntry // -// Each entry encapsulates a JNI weak reference to the tagged object +// Each entry encapsulates a reference to the tagged object // and the tag value. In addition an entry includes a next pointer which // is used to chain entries together. @@ -58,24 +58,25 @@ class JvmtiTagHashmapEntry : public CHeapObj { private: friend class JvmtiTagMap; - jweak _object; // JNI weak ref to tagged object + oop _object; // tagged object jlong _tag; // the tag JvmtiTagHashmapEntry* _next; // next on the list - inline void init(jweak object, jlong tag) { + inline void init(oop object, jlong tag) { _object = object; _tag = tag; _next = NULL; } // constructor - JvmtiTagHashmapEntry(jweak object, jlong tag) { init(object, tag); } + JvmtiTagHashmapEntry(oop object, jlong tag) { init(object, tag); } public: // accessor methods - inline jweak object() const { return _object; } - inline jlong tag() const { return _tag; } + inline oop object() const { return _object; } + inline oop* object_addr() { return &_object; } + inline jlong tag() const { return _tag; } inline void set_tag(jlong tag) { assert(tag != 0, "can't be zero"); @@ -92,9 +93,7 @@ class JvmtiTagHashmapEntry : public CHeapObj { // A hashmap is essentially a table of pointers to entries. Entries // are hashed to a location, or position in the table, and then // chained from that location. The "key" for hashing is address of -// the object, or oop. The "value" is the JNI weak reference to the -// object and the tag value. Keys are not stored with the entry. -// Instead the weak reference is resolved to obtain the key. +// the object, or oop. The "value" is the tag value. // // A hashmap maintains a count of the number entries in the hashmap // and resizes if the number of entries exceeds a given threshold. @@ -206,7 +205,7 @@ class JvmtiTagHashmap : public CHeapObj { JvmtiTagHashmapEntry* entry = _table[i]; while (entry != NULL) { JvmtiTagHashmapEntry* next = entry->next(); - oop key = JNIHandles::resolve(entry->object()); + oop key = entry->object(); assert(key != NULL, "jni weak reference cleared!!"); unsigned int h = hash(key, new_size); JvmtiTagHashmapEntry* anchor = new_table[h]; @@ -299,14 +298,12 @@ class JvmtiTagHashmap : public CHeapObj { unsigned int h = hash(key); JvmtiTagHashmapEntry* entry = _table[h]; while (entry != NULL) { - oop orig_key = JNIHandles::resolve(entry->object()); - assert(orig_key != NULL, "jni weak reference cleared!!"); - if (key == orig_key) { - break; + if (entry->object() == key) { + return entry; } entry = entry->next(); } - return entry; + return NULL; } @@ -343,9 +340,7 @@ class JvmtiTagHashmap : public CHeapObj { JvmtiTagHashmapEntry* entry = _table[h]; JvmtiTagHashmapEntry* prev = NULL; while (entry != NULL) { - oop orig_key = JNIHandles::resolve(entry->object()); - assert(orig_key != NULL, "jni weak reference cleared!!"); - if (key == orig_key) { + if (key == entry->object()) { break; } prev = entry; @@ -418,54 +413,6 @@ void JvmtiTagHashmap::compute_next_trace_threshold() { } } -// memory region for young generation -MemRegion JvmtiTagMap::_young_gen; - -// get the memory region used for the young generation -void JvmtiTagMap::get_young_generation() { - CollectedHeap* ch = Universe::heap(); - switch (ch->kind()) { - case (CollectedHeap::GenCollectedHeap): { - _young_gen = ((GenCollectedHeap*)ch)->get_gen(0)->reserved(); - break; - } -#ifndef SERIALGC - case (CollectedHeap::ParallelScavengeHeap): { - _young_gen = ((ParallelScavengeHeap*)ch)->young_gen()->reserved(); - break; - } - case (CollectedHeap::G1CollectedHeap): { - // Until a more satisfactory solution is implemented, all - // oops in the tag map will require rehash at each gc. - // This is a correct, if extremely inefficient solution. - // See RFE 6621729 for related commentary. - _young_gen = ch->reserved_region(); - break; - } -#endif // !SERIALGC - default: - ShouldNotReachHere(); - } -} - -// returns true if oop is in the young generation -inline bool JvmtiTagMap::is_in_young(oop o) { - assert(_young_gen.start() != NULL, "checking"); - void* p = (void*)o; - bool in_young = _young_gen.contains(p); - return in_young; -} - -// returns the appropriate hashmap for a given object -inline JvmtiTagHashmap* JvmtiTagMap::hashmap_for(oop o) { - if (is_in_young(o)) { - return _hashmap[0]; - } else { - return _hashmap[1]; - } -} - - // create a JvmtiTagMap JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) : _env(env), @@ -476,13 +423,7 @@ JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) : assert(JvmtiThreadState_lock->is_locked(), "sanity check"); assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment"); - // create the hashmaps - for (int i=0; iset_tag_map(this); @@ -496,25 +437,20 @@ JvmtiTagMap::~JvmtiTagMap() { // also being destroryed. ((JvmtiEnvBase *)_env)->set_tag_map(NULL); - // iterate over the hashmaps and destroy each of the entries - for (int i=0; itable(); - for (int j=0; jsize(); j++) { - JvmtiTagHashmapEntry *entry = table[j]; - while (entry != NULL) { - JvmtiTagHashmapEntry* next = entry->next(); - jweak ref = entry->object(); - JNIHandles::destroy_weak_global(ref); - delete entry; - entry = next; - } + JvmtiTagHashmapEntry** table = _hashmap->table(); + for (int j = 0; j < _hashmap->size(); j++) { + JvmtiTagHashmapEntry* entry = table[j]; + while (entry != NULL) { + JvmtiTagHashmapEntry* next = entry->next(); + delete entry; + entry = next; } - - // finally destroy the hashmap - delete hashmap; } + // finally destroy the hashmap + delete _hashmap; + _hashmap = NULL; + // remove any entries on the free list JvmtiTagHashmapEntry* entry = _free_entries; while (entry != NULL) { @@ -522,12 +458,13 @@ JvmtiTagMap::~JvmtiTagMap() { delete entry; entry = next; } + _free_entries = NULL; } // create a hashmap entry // - if there's an entry on the (per-environment) free list then this // is returned. Otherwise an new entry is allocated. -JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(jweak ref, jlong tag) { +JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(oop ref, jlong tag) { assert(Thread::current()->is_VM_thread() || is_locked(), "checking"); JvmtiTagHashmapEntry* entry; if (_free_entries == NULL) { @@ -558,10 +495,10 @@ void JvmtiTagMap::destroy_entry(JvmtiTagHashmapEntry* entry) { // returns the tag map for the given environments. If the tag map // doesn't exist then it is created. JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) { - JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map(); + JvmtiTagMap* tag_map = ((JvmtiEnvBase*)env)->tag_map(); if (tag_map == NULL) { MutexLocker mu(JvmtiThreadState_lock); - tag_map = ((JvmtiEnvBase *)env)->tag_map(); + tag_map = ((JvmtiEnvBase*)env)->tag_map(); if (tag_map == NULL) { tag_map = new JvmtiTagMap(env); } @@ -573,17 +510,13 @@ JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) { // iterate over all entries in the tag map. void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) { - for (int i=0; ientry_iterate(closure); - } + hashmap()->entry_iterate(closure); } // returns true if the hashmaps are empty bool JvmtiTagMap::is_empty() { assert(SafepointSynchronize::is_at_safepoint() || is_locked(), "checking"); - assert(n_hashmaps == 2, "not implemented"); - return ((_hashmap[0]->entry_count() == 0) && (_hashmap[1]->entry_count() == 0)); + return hashmap()->entry_count() == 0; } @@ -591,7 +524,7 @@ bool JvmtiTagMap::is_empty() { // not tagged // static inline jlong tag_for(JvmtiTagMap* tag_map, oop o) { - JvmtiTagHashmapEntry* entry = tag_map->hashmap_for(o)->find(o); + JvmtiTagHashmapEntry* entry = tag_map->hashmap()->find(o); if (entry == NULL) { return 0; } else { @@ -655,7 +588,7 @@ class CallbackWrapper : public StackObj { // record the context _tag_map = tag_map; - _hashmap = tag_map->hashmap_for(_o); + _hashmap = tag_map->hashmap(); _entry = _hashmap->find(_o); // get object tag @@ -694,23 +627,18 @@ void inline CallbackWrapper::post_callback_tag_update(oop o, if (obj_tag != 0) { // callback has tagged the object assert(Thread::current()->is_VM_thread(), "must be VMThread"); - HandleMark hm; - Handle h(o); - jweak ref = JNIHandles::make_weak_global(h); - entry = tag_map()->create_entry(ref, obj_tag); + entry = tag_map()->create_entry(o, obj_tag); hashmap->add(o, entry); } } else { // object was previously tagged - the callback may have untagged // the object or changed the tag value if (obj_tag == 0) { - jweak ref = entry->object(); JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o); assert(entry_removed == entry, "checking"); tag_map()->destroy_entry(entry); - JNIHandles::destroy_weak_global(ref); } else { if (obj_tag != entry->tag()) { entry->set_tag(obj_tag); @@ -760,7 +688,7 @@ class TwoOopCallbackWrapper : public CallbackWrapper { // for Classes the klassOop is tagged _referrer = klassOop_if_java_lang_Class(referrer); // record the context - _referrer_hashmap = tag_map->hashmap_for(_referrer); + _referrer_hashmap = tag_map->hashmap(); _referrer_entry = _referrer_hashmap->find(_referrer); // get object tag @@ -796,8 +724,7 @@ class TwoOopCallbackWrapper : public CallbackWrapper { // // This function is performance critical. If many threads attempt to tag objects // around the same time then it's possible that the Mutex associated with the -// tag map will be a hot lock. Eliminating this lock will not eliminate the issue -// because creating a JNI weak reference requires acquiring a global lock also. +// tag map will be a hot lock. void JvmtiTagMap::set_tag(jobject object, jlong tag) { MutexLocker ml(lock()); @@ -808,22 +735,14 @@ void JvmtiTagMap::set_tag(jobject object, jlong tag) { o = klassOop_if_java_lang_Class(o); // see if the object is already tagged - JvmtiTagHashmap* hashmap = hashmap_for(o); + JvmtiTagHashmap* hashmap = _hashmap; JvmtiTagHashmapEntry* entry = hashmap->find(o); // if the object is not already tagged then we tag it if (entry == NULL) { if (tag != 0) { - HandleMark hm; - Handle h(o); - jweak ref = JNIHandles::make_weak_global(h); - - // the object may have moved because make_weak_global may - // have blocked - thus it is necessary resolve the handle - // and re-hash the object. - o = h(); - entry = create_entry(ref, tag); - hashmap_for(o)->add(o, entry); + entry = create_entry(o, tag); + hashmap->add(o, entry); } else { // no-op } @@ -831,13 +750,9 @@ void JvmtiTagMap::set_tag(jobject object, jlong tag) { // if the object is already tagged then we either update // the tag (if a new tag value has been provided) // or remove the object if the new tag value is 0. - // Removing the object requires that we also delete the JNI - // weak ref to the object. if (tag == 0) { - jweak ref = entry->object(); hashmap->remove(o); destroy_entry(entry); - JNIHandles::destroy_weak_global(ref); } else { entry->set_tag(tag); } @@ -1626,8 +1541,8 @@ class TagObjectCollector : public JvmtiTagHashmapEntryClosure { void do_entry(JvmtiTagHashmapEntry* entry) { for (int i=0; i<_tag_count; i++) { if (_tags[i] == entry->tag()) { - oop o = JNIHandles::resolve(entry->object()); - assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check"); + oop o = entry->object(); + assert(o != NULL, "sanity check"); // the mirror is tagged if (o->is_klass()) { @@ -3374,62 +3289,21 @@ void JvmtiTagMap::follow_references(jint heap_filter, } -// called post-GC -// - for each JVMTI environment with an object tag map, call its rehash -// function to re-sync with the new object locations. -void JvmtiTagMap::gc_epilogue(bool full) { - assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); +void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + assert(SafepointSynchronize::is_at_safepoint(), + "must be executed at a safepoint"); if (JvmtiEnv::environments_might_exist()) { - // re-obtain the memory region for the young generation (might - // changed due to adaptive resizing policy) - get_young_generation(); - JvmtiEnvIterator it; for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { JvmtiTagMap* tag_map = env->tag_map(); if (tag_map != NULL && !tag_map->is_empty()) { - TraceTime t(full ? "JVMTI Full Rehash " : "JVMTI Rehash ", TraceJVMTIObjectTagging); - if (full) { - tag_map->rehash(0, n_hashmaps); - } else { - tag_map->rehash(0, 0); // tag map for young gen only - } + tag_map->do_weak_oops(is_alive, f); } } } } -// CMS has completed referencing processing so we may have JNI weak refs -// to objects in the CMS generation that have been GC'ed. -void JvmtiTagMap::cms_ref_processing_epilogue() { - assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); - assert(UseConcMarkSweepGC, "should only be used with CMS"); - if (JvmtiEnv::environments_might_exist()) { - JvmtiEnvIterator it; - for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { - JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map(); - if (tag_map != NULL && !tag_map->is_empty()) { - TraceTime t("JVMTI Rehash (CMS) ", TraceJVMTIObjectTagging); - tag_map->rehash(1, n_hashmaps); // assume CMS not used in young gen - } - } - } -} - - -// For each entry in the hashmaps 'start' to 'end' : -// -// 1. resolve the JNI weak reference -// -// 2. If it resolves to NULL it means the object has been freed so the entry -// is removed, the weak reference destroyed, and the object free event is -// posted (if enabled). -// -// 3. If the weak reference resolves to an object then we re-hash the object -// to see if it has moved or has been promoted (from the young to the old -// generation for example). -// -void JvmtiTagMap::rehash(int start, int end) { +void JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) { // does this environment have the OBJECT_FREE event enabled bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE); @@ -3437,143 +3311,98 @@ void JvmtiTagMap::rehash(int start, int end) { // counters used for trace message int freed = 0; int moved = 0; - int promoted = 0; - // we assume there are two hashmaps - one for the young generation - // and the other for all other spaces. - assert(n_hashmaps == 2, "not implemented"); - JvmtiTagHashmap* young_hashmap = _hashmap[0]; - JvmtiTagHashmap* other_hashmap = _hashmap[1]; + JvmtiTagHashmap* hashmap = this->hashmap(); // reenable sizing (if disabled) - young_hashmap->set_resizing_enabled(true); - other_hashmap->set_resizing_enabled(true); - - // when re-hashing the hashmap corresponding to the young generation we - // collect the entries corresponding to objects that have been promoted. - JvmtiTagHashmapEntry* promoted_entries = NULL; + hashmap->set_resizing_enabled(true); - if (end >= n_hashmaps) { - end = n_hashmaps - 1; + // if the hashmap is empty then we can skip it + if (hashmap->_entry_count == 0) { + return; } - for (int i=start; i <= end; i++) { - JvmtiTagHashmap* hashmap = _hashmap[i]; - - // if the hashmap is empty then we can skip it - if (hashmap->_entry_count == 0) { - continue; - } - - // now iterate through each entry in the table - - JvmtiTagHashmapEntry** table = hashmap->table(); - int size = hashmap->size(); - - for (int pos=0; posnext(); - - jweak ref = entry->object(); - oop oop = JNIHandles::resolve(ref); + JvmtiTagHashmapEntry** table = hashmap->table(); + int size = hashmap->size(); - // has object been GC'ed - if (oop == NULL) { - // grab the tag - jlong tag = entry->tag(); - guarantee(tag != 0, "checking"); + JvmtiTagHashmapEntry* delayed_add = NULL; - // remove GC'ed entry from hashmap and return the - // entry to the free list - hashmap->remove(prev, pos, entry); - destroy_entry(entry); + for (int pos = 0; pos < size; ++pos) { + JvmtiTagHashmapEntry* entry = table[pos]; + JvmtiTagHashmapEntry* prev = NULL; - // destroy the weak ref - JNIHandles::destroy_weak_global(ref); + while (entry != NULL) { + JvmtiTagHashmapEntry* next = entry->next(); - // post the event to the profiler - if (post_object_free) { - JvmtiExport::post_object_free(env(), tag); - } + oop* obj = entry->object_addr(); - freed++; - entry = next; - continue; - } + // has object been GC'ed + if (!is_alive->do_object_b(entry->object())) { + // grab the tag + jlong tag = entry->tag(); + guarantee(tag != 0, "checking"); - // if this is the young hashmap then the object is either promoted - // or moved. - // if this is the other hashmap then the object is moved. + // remove GC'ed entry from hashmap and return the + // entry to the free list + hashmap->remove(prev, pos, entry); + destroy_entry(entry); - bool same_gen; - if (i == 0) { - assert(hashmap == young_hashmap, "checking"); - same_gen = is_in_young(oop); - } else { - same_gen = true; + // post the event to the profiler + if (post_object_free) { + JvmtiExport::post_object_free(env(), tag); } - - if (same_gen) { - // if the object has moved then re-hash it and move its - // entry to its new location. - unsigned int new_pos = JvmtiTagHashmap::hash(oop, size); - if (new_pos != (unsigned int)pos) { - if (prev == NULL) { - table[pos] = next; - } else { - prev->set_next(next); - } + ++freed; + } else { + f->do_oop(entry->object_addr()); + oop new_oop = entry->object(); + + // if the object has moved then re-hash it and move its + // entry to its new location. + unsigned int new_pos = JvmtiTagHashmap::hash(new_oop, size); + if (new_pos != (unsigned int)pos) { + if (prev == NULL) { + table[pos] = next; + } else { + prev->set_next(next); + } + if (new_pos < (unsigned int)pos) { entry->set_next(table[new_pos]); table[new_pos] = entry; - moved++; } else { - // object didn't move - prev = entry; + // Delay adding this entry to it's new position as we'd end up + // hitting it again during this iteration. + entry->set_next(delayed_add); + delayed_add = entry; } + moved++; } else { - // object has been promoted so remove the entry from the - // young hashmap - assert(hashmap == young_hashmap, "checking"); - hashmap->remove(prev, pos, entry); - - // move the entry to the promoted list - entry->set_next(promoted_entries); - promoted_entries = entry; + // object didn't move + prev = entry; } - - entry = next; } + + entry = next; } } - - // add the entries, corresponding to the promoted objects, to the - // other hashmap. - JvmtiTagHashmapEntry* entry = promoted_entries; - while (entry != NULL) { - oop o = JNIHandles::resolve(entry->object()); - assert(hashmap_for(o) == other_hashmap, "checking"); - JvmtiTagHashmapEntry* next = entry->next(); - other_hashmap->add(o, entry); - entry = next; - promoted++; + // Re-add all the entries which were kept aside + while (delayed_add != NULL) { + JvmtiTagHashmapEntry* next = delayed_add->next(); + unsigned int pos = JvmtiTagHashmap::hash(delayed_add->object(), size); + delayed_add->set_next(table[pos]); + table[pos] = delayed_add; + delayed_add = next; } // stats if (TraceJVMTIObjectTagging) { - int total_moves = promoted + moved; - - int post_total = 0; - for (int i=0; i_entry_count; - } + int post_total = hashmap->_entry_count; int pre_total = post_total + freed; - tty->print("(%d->%d, %d freed, %d promoted, %d total moves)", - pre_total, post_total, freed, promoted, total_moves); + tty->print_cr("(%d->%d, %d freed, %d total moves)", + pre_total, post_total, freed, moved); } } diff --git a/hotspot/src/share/vm/prims/jvmtiTagMap.hpp b/hotspot/src/share/vm/prims/jvmtiTagMap.hpp index f94e74f71c2f5d0eae11d754fc04fc599b23569f..9abdd9b37a19d9878b9435575ec7fb36ef14f222 100644 --- a/hotspot/src/share/vm/prims/jvmtiTagMap.hpp +++ b/hotspot/src/share/vm/prims/jvmtiTagMap.hpp @@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj { private: enum{ - n_hashmaps = 2, // encapsulates 2 hashmaps - max_free_entries = 4096 // maximum number of free entries per env + max_free_entries = 4096 // maximum number of free entries per env }; - // memory region for young generation - static MemRegion _young_gen; - static void get_young_generation(); - JvmtiEnv* _env; // the jvmti environment Mutex _lock; // lock for this tag map - JvmtiTagHashmap* _hashmap[n_hashmaps]; // the hashmaps + JvmtiTagHashmap* _hashmap; // the hashmap JvmtiTagHashmapEntry* _free_entries; // free list for this environment int _free_entries_count; // number of entries on the free list @@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj { inline Mutex* lock() { return &_lock; } inline JvmtiEnv* env() const { return _env; } - // rehash tags maps for generation start to end - void rehash(int start, int end); - - // indicates if the object is in the young generation - static bool is_in_young(oop o); + void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f); // iterate over all entries in this tag map void entry_iterate(JvmtiTagHashmapEntryClosure* closure); @@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj { // indicates if this tag map is locked bool is_locked() { return lock()->is_locked(); } - // return the appropriate hashmap for a given object - JvmtiTagHashmap* hashmap_for(oop o); + JvmtiTagHashmap* hashmap() { return _hashmap; } // create/destroy entries - JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag); + JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag); void destroy_entry(JvmtiTagHashmapEntry* entry); // returns true if the hashmaps are empty @@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj { jint* count_ptr, jobject** object_result_ptr, jlong** tag_result_ptr); - // call post-GC to rehash the tag maps. - static void gc_epilogue(bool full); - - // call after referencing processing has completed (CMS) - static void cms_ref_processing_epilogue(); + static void weak_oops_do( + BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN; }; #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp index 07bda32a07fd98451ae1eb0c412a895182fbf959..7024dacb9bcf5768bef04d80c0cdf833bd46f309 100644 --- a/hotspot/src/share/vm/runtime/globals.hpp +++ b/hotspot/src/share/vm/runtime/globals.hpp @@ -1198,9 +1198,6 @@ class CommandLineFlags { product(ccstr, TraceJVMTI, NULL, \ "Trace flags for JVMTI functions and events") \ \ - product(bool, ForceFullGCJVMTIEpilogues, false, \ - "Force 'Full GC' was done semantics for JVMTI GC epilogues") \ - \ /* This option can change an EMCP method into an obsolete method. */ \ /* This can affect tests that except specific methods to be EMCP. */ \ /* This option should be used with caution. */ \ diff --git a/hotspot/src/share/vm/runtime/jniHandles.cpp b/hotspot/src/share/vm/runtime/jniHandles.cpp index 357061f3ea0d07db3cf0c067fd975045d45e5fd2..40f82864423ab8611ae1322786f57577498aae8e 100644 --- a/hotspot/src/share/vm/runtime/jniHandles.cpp +++ b/hotspot/src/share/vm/runtime/jniHandles.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "oops/oop.inline.hpp" +#include "prims/jvmtiTagMap.hpp" #include "runtime/jniHandles.hpp" #include "runtime/mutexLocker.hpp" #ifdef TARGET_OS_FAMILY_linux @@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive, break; } } + + /* + * JvmtiTagMap may also contain weak oops. The iteration of it is placed + * here so that we don't need to add it to each of the collectors. + */ + JvmtiTagMap::weak_oops_do(is_alive, f); }