提交 cc8f4f73 编写于 作者: N neugens

8229401: Fix JFR code cache test failures

8223689: Add JFR Thread Sampling Support
8223690: Add JFR BiasedLock Event Support
8223691: Add JFR G1 Region Type Change Event Support
8223692: Add JFR G1 Heap Summary Event Support
Summary: Backport JFR from JDK11, additional fixes
Reviewed-by: neugens, apetushkov
Contributed-by: denghui.ddh@alibaba-inc.com
上级 99b7699d
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#include "c1/c1_Runtime1.hpp" #include "c1/c1_Runtime1.hpp"
#endif #endif
unsigned int align_code_offset(int offset) { unsigned int CodeBlob::align_code_offset(int offset) {
// align the size to CodeEntryAlignment // align the size to CodeEntryAlignment
return return
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
......
...@@ -30,6 +30,15 @@ ...@@ -30,6 +30,15 @@
#include "runtime/frame.hpp" #include "runtime/frame.hpp"
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
// CodeBlob Types
// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
struct CodeBlobType {
enum {
All = 0, // All types (No code cache segmentation)
NumTypes = 1 // Number of CodeBlobTypes
};
};
// CodeBlob - superclass for all entries in the CodeCache. // CodeBlob - superclass for all entries in the CodeCache.
// //
// Suptypes are: // Suptypes are:
...@@ -71,6 +80,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC { ...@@ -71,6 +80,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
public: public:
// Returns the space needed for CodeBlob // Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size); static unsigned int allocation_size(CodeBuffer* cb, int header_size);
static unsigned int align_code_offset(int offset);
// Creation // Creation
// a) simple CodeBlob // a) simple CodeBlob
...@@ -205,6 +215,7 @@ class BufferBlob: public CodeBlob { ...@@ -205,6 +215,7 @@ class BufferBlob: public CodeBlob {
friend class AdapterBlob; friend class AdapterBlob;
friend class VtableBlob; friend class VtableBlob;
friend class MethodHandlesAdapterBlob; friend class MethodHandlesAdapterBlob;
friend class WhiteBox;
private: private:
// Creation support // Creation support
......
...@@ -189,6 +189,12 @@ CodeBlob* CodeCache::allocate(int size, bool is_critical) { ...@@ -189,6 +189,12 @@ CodeBlob* CodeCache::allocate(int size, bool is_critical) {
if (cb != NULL) break; if (cb != NULL) break;
if (!_heap->expand_by(CodeCacheExpansionSize)) { if (!_heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed // Expansion failed
if (CodeCache_lock->owned_by_self()) {
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
report_codemem_full();
} else {
report_codemem_full();
}
return NULL; return NULL;
} }
if (PrintCodeCacheExtension) { if (PrintCodeCacheExtension) {
...@@ -780,6 +786,7 @@ void CodeCache::report_codemem_full() { ...@@ -780,6 +786,7 @@ void CodeCache::report_codemem_full() {
_codemem_full_count++; _codemem_full_count++;
EventCodeCacheFull event; EventCodeCacheFull event;
if (event.should_commit()) { if (event.should_commit()) {
event.set_codeBlobType((u1)CodeBlobType::All);
event.set_startAddress((u8)low_bound()); event.set_startAddress((u8)low_bound());
event.set_commitedTopAddress((u8)high()); event.set_commitedTopAddress((u8)high());
event.set_reservedTopAddress((u8)high_bound()); event.set_reservedTopAddress((u8)high_bound());
......
...@@ -3572,6 +3572,28 @@ void G1CollectedHeap::print_all_rsets() { ...@@ -3572,6 +3572,28 @@ void G1CollectedHeap::print_all_rsets() {
} }
#endif // PRODUCT #endif // PRODUCT
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
size_t eden_used_bytes = _young_list->eden_used_bytes();
size_t survivor_used_bytes = _young_list->survivor_used_bytes();
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
eden_capacity_bytes, survivor_used_bytes, num_regions());
}
void G1CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
const G1HeapSummary& heap_summary = create_g1_heap_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary);
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
G1CollectedHeap* G1CollectedHeap::heap() { G1CollectedHeap* G1CollectedHeap::heap() {
assert(_sh->kind() == CollectedHeap::G1CollectedHeap, assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
"not a garbage-first heap"); "not a garbage-first heap");
......
...@@ -375,6 +375,8 @@ private: ...@@ -375,6 +375,8 @@ private:
size_t size, size_t size,
size_t translation_factor); size_t translation_factor);
void trace_heap(GCWhen::Type when, GCTracer* tracer);
double verify(bool guard, const char* msg); double verify(bool guard, const char* msg);
void verify_before_gc(); void verify_before_gc();
void verify_after_gc(); void verify_after_gc();
...@@ -1621,6 +1623,8 @@ public: ...@@ -1621,6 +1623,8 @@ public:
bool is_obj_dead_cond(const oop obj, bool is_obj_dead_cond(const oop obj,
const VerifyOption vo) const; const VerifyOption vo) const;
G1HeapSummary create_g1_heap_summary();
// Printing // Printing
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
......
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
#define SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
class G1HeapRegionTraceType : AllStatic {
public:
enum Type {
Free,
Eden,
Survivor,
StartsHumongous,
ContinuesHumongous,
Old,
G1HeapRegionTypeEndSentinel
};
static const char* to_string(G1HeapRegionTraceType::Type type) {
switch (type) {
case Free: return "Free";
case Eden: return "Eden";
case Survivor: return "Survivor";
case StartsHumongous: return "Starts Humongous";
case ContinuesHumongous: return "Continues Humongous";
case Old: return "Old";
default: ShouldNotReachHere(); return NULL;
}
}
};
#endif // SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "memory/space.inline.hpp" #include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/orderAccess.inline.hpp" #include "runtime/orderAccess.inline.hpp"
#include "gc_implementation/g1/heapRegionTracer.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
...@@ -211,6 +212,31 @@ void HeapRegion::calc_gc_efficiency() { ...@@ -211,6 +212,31 @@ void HeapRegion::calc_gc_efficiency() {
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
} }
void HeapRegion::set_free() {
report_region_type_change(G1HeapRegionTraceType::Free);
_type.set_free();
}
void HeapRegion::set_eden() {
report_region_type_change(G1HeapRegionTraceType::Eden);
_type.set_eden();
}
void HeapRegion::set_eden_pre_gc() {
report_region_type_change(G1HeapRegionTraceType::Eden);
_type.set_eden_pre_gc();
}
void HeapRegion::set_survivor() {
report_region_type_change(G1HeapRegionTraceType::Survivor);
_type.set_survivor();
}
void HeapRegion::set_old() {
report_region_type_change(G1HeapRegionTraceType::Old);
_type.set_old();
}
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
assert(!isHumongous(), "sanity / pre-condition"); assert(!isHumongous(), "sanity / pre-condition");
assert(end() == _orig_end, assert(end() == _orig_end,
...@@ -218,6 +244,7 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { ...@@ -218,6 +244,7 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
_type.set_starts_humongous(); _type.set_starts_humongous();
_humongous_start_region = this; _humongous_start_region = this;
...@@ -232,6 +259,7 @@ void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { ...@@ -232,6 +259,7 @@ void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(first_hr->startsHumongous(), "pre-condition"); assert(first_hr->startsHumongous(), "pre-condition");
report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
_type.set_continues_humongous(); _type.set_continues_humongous();
_humongous_start_region = first_hr; _humongous_start_region = first_hr;
} }
...@@ -303,6 +331,14 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { ...@@ -303,6 +331,14 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
record_timestamp(); record_timestamp();
} }
void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
HeapRegionTracer::send_region_type_change(_hrm_index,
get_trace_type(),
to,
(uintptr_t)bottom(),
used());
}
CompactibleSpace* HeapRegion::next_compaction_space() const { CompactibleSpace* HeapRegion::next_compaction_space() const {
return G1CollectedHeap::heap()->next_compaction_region(this); return G1CollectedHeap::heap()->next_compaction_region(this);
} }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "memory/space.inline.hpp" #include "memory/space.inline.hpp"
#include "memory/watermark.hpp" #include "memory/watermark.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
// A HeapRegion is the smallest piece of a G1CollectedHeap that // A HeapRegion is the smallest piece of a G1CollectedHeap that
// can be collected independently. // can be collected independently.
...@@ -211,6 +212,8 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -211,6 +212,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
void report_region_type_change(G1HeapRegionTraceType::Type to);
protected: protected:
// The index of this region in the heap region sequence. // The index of this region in the heap region sequence.
uint _hrm_index; uint _hrm_index;
...@@ -405,6 +408,7 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -405,6 +408,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
const char* get_type_str() const { return _type.get_str(); } const char* get_type_str() const { return _type.get_str(); }
const char* get_short_type_str() const { return _type.get_short_str(); } const char* get_short_type_str() const { return _type.get_short_str(); }
G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
bool is_free() const { return _type.is_free(); } bool is_free() const { return _type.is_free(); }
...@@ -667,13 +671,13 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -667,13 +671,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
} }
} }
void set_free() { _type.set_free(); } void set_free();
void set_eden() { _type.set_eden(); } void set_eden();
void set_eden_pre_gc() { _type.set_eden_pre_gc(); } void set_eden_pre_gc();
void set_survivor() { _type.set_survivor(); } void set_survivor();
void set_old() { _type.set_old(); } void set_old();
// Determine if an object has been allocated since the last // Determine if an object has been allocated since the last
// mark performed by the collector. This returns true iff the object // mark performed by the collector. This returns true iff the object
......
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionTracer.hpp"
#include "jfr/jfrEvents.hpp"
void HeapRegionTracer::send_region_type_change(uint index,
G1HeapRegionTraceType::Type from,
G1HeapRegionTraceType::Type to,
uintptr_t start,
size_t used) {
EventG1HeapRegionTypeChange e;
if (e.should_commit()) {
e.set_index(index);
e.set_from(from);
e.set_to(to);
e.set_start(start);
e.set_used(used);
e.commit();
}
}
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_HEAPREGIONTRACER_HPP
#define SHARE_GC_G1_HEAPREGIONTRACER_HPP
#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
#include "memory/allocation.hpp"
class HeapRegionTracer : AllStatic {
public:
static void send_region_type_change(uint index,
G1HeapRegionTraceType::Type from,
G1HeapRegionTraceType::Type to,
uintptr_t start,
size_t used);
};
#endif // SHARE_GC_G1_HEAPREGIONTRACER_HPP
...@@ -67,3 +67,18 @@ const char* HeapRegionType::get_short_str() const { ...@@ -67,3 +67,18 @@ const char* HeapRegionType::get_short_str() const {
// keep some compilers happy // keep some compilers happy
return NULL; return NULL;
} }
G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() {
hrt_assert_is_valid(_tag);
switch (_tag) {
case FreeTag: return G1HeapRegionTraceType::Free;
case EdenTag: return G1HeapRegionTraceType::Eden;
case SurvTag: return G1HeapRegionTraceType::Survivor;
case HumStartsTag: return G1HeapRegionTraceType::StartsHumongous;
case HumContTag: return G1HeapRegionTraceType::ContinuesHumongous;
case OldTag: return G1HeapRegionTraceType::Old;
default:
ShouldNotReachHere();
return G1HeapRegionTraceType::Free; // keep some compilers happy
}
}
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
#define hrt_assert_is_valid(tag) \ #define hrt_assert_is_valid(tag) \
assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag))) assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag)))
...@@ -127,6 +128,7 @@ public: ...@@ -127,6 +128,7 @@ public:
const char* get_str() const; const char* get_str() const;
const char* get_short_str() const; const char* get_short_str() const;
G1HeapRegionTraceType::Type get_trace_type();
HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); } HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
}; };
......
...@@ -78,11 +78,13 @@ class MetaspaceSizes : public StackObj { ...@@ -78,11 +78,13 @@ class MetaspaceSizes : public StackObj {
class GCHeapSummary; class GCHeapSummary;
class PSHeapSummary; class PSHeapSummary;
class G1HeapSummary;
class GCHeapSummaryVisitor { class GCHeapSummaryVisitor {
public: public:
virtual void visit(const GCHeapSummary* heap_summary) const = 0; virtual void visit(const GCHeapSummary* heap_summary) const = 0;
virtual void visit(const PSHeapSummary* heap_summary) const {} virtual void visit(const PSHeapSummary* heap_summary) const {}
virtual void visit(const G1HeapSummary* heap_summary) const {}
}; };
class GCHeapSummary : public StackObj { class GCHeapSummary : public StackObj {
...@@ -125,6 +127,24 @@ class PSHeapSummary : public GCHeapSummary { ...@@ -125,6 +127,24 @@ class PSHeapSummary : public GCHeapSummary {
} }
}; };
class G1HeapSummary : public GCHeapSummary {
size_t _edenUsed;
size_t _edenCapacity;
size_t _survivorUsed;
uint _numberOfRegions;
public:
G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, uint numberOfRegions) :
GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _numberOfRegions(numberOfRegions) { }
const size_t edenUsed() const { return _edenUsed; }
const size_t edenCapacity() const { return _edenCapacity; }
const size_t survivorUsed() const { return _survivorUsed; }
const uint numberOfRegions() const { return _numberOfRegions; }
virtual void accept(GCHeapSummaryVisitor* visitor) const {
visitor->visit(this);
}
};
class MetaspaceSummary : public StackObj { class MetaspaceSummary : public StackObj {
size_t _capacity_until_GC; size_t _capacity_until_GC;
MetaspaceSizes _meta_space; MetaspaceSizes _meta_space;
......
...@@ -349,20 +349,20 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { ...@@ -349,20 +349,20 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
} }
} }
// void visit(const G1HeapSummary* g1_heap_summary) const { void visit(const G1HeapSummary* g1_heap_summary) const {
// visit((GCHeapSummary*)g1_heap_summary); visit((GCHeapSummary*)g1_heap_summary);
//
// EventG1HeapSummary e; EventG1HeapSummary e;
// if (e.should_commit()) { if (e.should_commit()) {
// e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(_gc_id.id());
// e.set_when((u1)_when); e.set_when((u1)_when);
// e.set_edenUsedSize(g1_heap_summary->edenUsed()); e.set_edenUsedSize(g1_heap_summary->edenUsed());
// e.set_edenTotalSize(g1_heap_summary->edenCapacity()); e.set_edenTotalSize(g1_heap_summary->edenCapacity());
// e.set_survivorUsedSize(g1_heap_summary->survivorUsed()); e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
// e.set_numberOfRegions(g1_heap_summary->numberOfRegions()); e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
// e.commit(); e.commit();
// } }
// } }
void visit(const PSHeapSummary* ps_heap_summary) const { void visit(const PSHeapSummary* ps_heap_summary) const {
visit((GCHeapSummary*)ps_heap_summary); visit((GCHeapSummary*)ps_heap_summary);
......
...@@ -321,7 +321,8 @@ class JfrThreadSampler : public Thread { ...@@ -321,7 +321,8 @@ class JfrThreadSampler : public Thread {
volatile bool _disenrolled; volatile bool _disenrolled;
static Monitor* _transition_block_lock; static Monitor* _transition_block_lock;
// JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current); int find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target);
JavaThread* next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current);
void task_stacktrace(JfrSampleType type, JavaThread** last_thread); void task_stacktrace(JfrSampleType type, JavaThread** last_thread);
JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames); JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames);
~JfrThreadSampler(); ~JfrThreadSampler();
...@@ -344,7 +345,7 @@ class JfrThreadSampler : public Thread { ...@@ -344,7 +345,7 @@ class JfrThreadSampler : public Thread {
Monitor* JfrThreadSampler::_transition_block_lock = new Monitor(Mutex::leaf, "Trace block", true); Monitor* JfrThreadSampler::_transition_block_lock = new Monitor(Mutex::leaf, "Trace block", true);
static void clear_transition_block(JavaThread* jt) { static void clear_transition_block(JavaThread* jt) {
// jt->clear_trace_flag(); jt->clear_trace_flag();
JfrThreadLocal* const tl = jt->jfr_thread_local(); JfrThreadLocal* const tl = jt->jfr_thread_local();
if (tl->is_trace_block()) { if (tl->is_trace_block()) {
MutexLockerEx ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag);
...@@ -359,7 +360,7 @@ bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* ...@@ -359,7 +360,7 @@ bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame*
} }
bool ret = false; bool ret = false;
// thread->set_trace_flag(); thread->set_trace_flag();
if (!UseMembar) { if (!UseMembar) {
os::serialize_thread_states(); os::serialize_thread_states();
} }
...@@ -398,37 +399,61 @@ void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) { ...@@ -398,37 +399,61 @@ void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) {
JfrThreadLocal* const tl = thread->jfr_thread_local(); JfrThreadLocal* const tl = thread->jfr_thread_local();
tl->set_trace_block(); tl->set_trace_block();
{ {
// MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag);
// while (thread->is_trace_suspend()) { while (thread->is_trace_suspend()) {
// transition_block()->wait(true); transition_block()->wait(true);
// } }
// tl->clear_trace_block(); tl->clear_trace_block();
} }
} }
//JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) { int JfrThreadSampler::find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target) {
// assert(t_list != NULL, "invariant"); assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
// assert(Threads_lock->owned_by_self(), "Holding the thread table lock."); if (target == NULL) {
// assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant"); return -1;
// assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant"); }
// if ((uint)_cur_index + 1 == t_list->length()) { for (uint i = 0; i < length; i++) {
// // wrap if (target == t_list[i]) {
// _cur_index = 0; return (int)i;
// } else { }
// _cur_index++; }
// } return -1;
// assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant"); }
// JavaThread* const next = t_list->thread_at(_cur_index);
// return next != first_sampled ? next : NULL; JavaThread* JfrThreadSampler::next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current) {
//} assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
if (current == NULL) {
_cur_index = 0;
return t_list[_cur_index];
}
if (_cur_index == -1 || t_list[_cur_index] != current) {
// 'current' is not at '_cur_index' so find it:
_cur_index = find_index_of_JavaThread(t_list, length, current);
assert(_cur_index != -1, "current JavaThread should be findable.");
}
_cur_index++;
JavaThread* next = NULL;
// wrap
if ((uint)_cur_index >= length) {
_cur_index = 0;
}
next = t_list[_cur_index];
// sample wrap
if (next == first_sampled) {
return NULL;
}
return next;
}
void JfrThreadSampler::start_thread() { void JfrThreadSampler::start_thread() {
// XXX TODO implement sampling if (os::create_thread(this, os::os_thread)) {
// if (os::create_thread(this, os::os_thread)) { os::start_thread(this);
// os::start_thread(this); } else {
// } else { tty->print_cr("Failed to create thread for thread sampling");
// if (true) tty->print_cr("Failed to create thread for thread sampling"); }
// }
} }
void JfrThreadSampler::enroll() { void JfrThreadSampler::enroll() {
...@@ -510,28 +535,33 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr ...@@ -510,28 +535,33 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr
elapsedTimer sample_time; elapsedTimer sample_time;
sample_time.start(); sample_time.start();
{ {
// MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag); MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag);
// ThreadsListHandle tlh; int max_threads = Threads::number_of_threads();
// // Resolve a sample session relative start position index into the thread list array. assert(max_threads >= 0, "Threads list is empty");
// // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1. uint index = 0;
// _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread); JavaThread** threads_list = NEW_C_HEAP_ARRAY(JavaThread *, max_threads, mtInternal);
// JavaThread* current = _cur_index != -1 ? *last_thread : NULL; for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
// threads_list[index++] = tp;
// while (num_sample_attempts < sample_limit) { }
// current = next_thread(tlh.list(), start, current); JavaThread* current = Threads::includes(*last_thread) ? *last_thread : NULL;
// if (current == NULL) { JavaThread* start = NULL;
// break;
// } while (num_sample_attempts < sample_limit) {
// if (start == NULL) { current = next_thread(threads_list, index, start, current);
// start = current; // remember the thread where we started to attempt sampling if (current == NULL) {
// } break;
// if (current->is_Compiler_thread()) { }
// continue; if (start == NULL) {
// } start = current; // remember the thread where we started to attempt sampling
// sample_task.do_sample_thread(current, _frames, _max_frames, type); }
// num_sample_attempts++; if (current->is_Compiler_thread()) {
// } continue;
// *last_thread = current; // remember the thread we last attempted to sample }
sample_task.do_sample_thread(current, _frames, _max_frames, type);
num_sample_attempts++;
}
*last_thread = current; // remember the thread we last attempted to sample
FREE_C_HEAP_ARRAY(JavaThread *, threads_list, mtInternal);
} }
sample_time.stop(); sample_time.stop();
if (LogJFR && Verbose) tty->print_cr("JFR thread sampling done in %3.7f secs with %d java %d native samples", if (LogJFR && Verbose) tty->print_cr("JFR thread sampling done in %3.7f secs with %d java %d native samples",
......
...@@ -188,13 +188,12 @@ void GCWhenConstant::serialize(JfrCheckpointWriter& writer) { ...@@ -188,13 +188,12 @@ void GCWhenConstant::serialize(JfrCheckpointWriter& writer) {
} }
void G1HeapRegionTypeConstant::serialize(JfrCheckpointWriter& writer) { void G1HeapRegionTypeConstant::serialize(JfrCheckpointWriter& writer) {
// XXX TODO? static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel;
// static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel; writer.write_count(nof_entries);
// writer.write_count(nof_entries); for (u4 i = 0; i < nof_entries; ++i) {
// for (u4 i = 0; i < nof_entries; ++i) { writer.write_key(i);
// writer.write_key(i); writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i));
// writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i)); }
// }
} }
void GCThresholdUpdaterConstant::serialize(JfrCheckpointWriter& writer) { void GCThresholdUpdaterConstant::serialize(JfrCheckpointWriter& writer) {
...@@ -279,13 +278,10 @@ void CompilerPhaseTypeConstant::serialize(JfrCheckpointWriter& writer) { ...@@ -279,13 +278,10 @@ void CompilerPhaseTypeConstant::serialize(JfrCheckpointWriter& writer) {
} }
void CodeBlobTypeConstant::serialize(JfrCheckpointWriter& writer) { void CodeBlobTypeConstant::serialize(JfrCheckpointWriter& writer) {
// XXX no code blob types. need to send any stub value? static const u4 nof_entries = CodeBlobType::NumTypes;
// static const u4 nof_entries = CodeBlobType::NumTypes; writer.write_count(nof_entries);
// writer.write_count(nof_entries); writer.write_key((u4)CodeBlobType::All);
// for (u4 i = 0; i < nof_entries; ++i) { writer.write("CodeCache");
// writer.write_key(i);
// writer.write(CodeCache::get_code_heap_name(i));
// }
}; };
void VMOperationTypeConstant::serialize(JfrCheckpointWriter& writer) { void VMOperationTypeConstant::serialize(JfrCheckpointWriter& writer) {
......
...@@ -46,7 +46,11 @@ ...@@ -46,7 +46,11 @@
#define THREAD_LOCAL_WRITER_OFFSET_JFR \ #define THREAD_LOCAL_WRITER_OFFSET_JFR \
JfrThreadLocal::java_event_writer_offset() + THREAD_LOCAL_OFFSET_JFR JfrThreadLocal::java_event_writer_offset() + THREAD_LOCAL_OFFSET_JFR
// XXX consider implementing thread suspend tracing #define DEFINE_TRACE_SUSPEND_FLAG_METHODS \
#define SUSPEND_THREAD_CONDITIONAL(thread) if (false/*(thread)->is_trace_suspend()*/) JfrThreadSampling::on_javathread_suspend(thread) void set_trace_flag() { set_suspend_flag(_trace_flag); } \
void clear_trace_flag() { clear_suspend_flag(_trace_flag); } \
bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
#define SUSPEND_THREAD_CONDITIONAL(thread) if ((thread)->is_trace_suspend()) JfrThreadSampling::on_javathread_suspend(thread)
#endif // SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP #endif // SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/array.hpp" #include "utilities/array.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
...@@ -653,13 +654,13 @@ WB_ENTRY(void, WB_MarkMethodProfiled(JNIEnv* env, jobject o, jobject method)) ...@@ -653,13 +654,13 @@ WB_ENTRY(void, WB_MarkMethodProfiled(JNIEnv* env, jobject o, jobject method))
WB_END WB_END
template <typename T> template <typename T>
static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) { static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*, bool, bool)) {
if (name == NULL) { if (name == NULL) {
return false; return false;
} }
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL); const char* flag_name = env->GetStringUTFChars(name, NULL);
bool result = (*TAt)(flag_name, value); bool result = (*TAt)(flag_name, value, true, true);
env->ReleaseStringUTFChars(name, flag_name); env->ReleaseStringUTFChars(name, flag_name);
return result; return result;
} }
...@@ -851,6 +852,47 @@ WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o)) ...@@ -851,6 +852,47 @@ WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o))
return features_string; return features_string;
WB_END WB_END
int WhiteBox::get_blob_type(const CodeBlob* code) {
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled");
return CodeBlobType::All;;
}
struct CodeBlobStub {
CodeBlobStub(const CodeBlob* blob) :
name(os::strdup(blob->name())),
size(blob->size()),
blob_type(WhiteBox::get_blob_type(blob)),
address((jlong) blob) { }
~CodeBlobStub() { os::free((void*) name); }
const char* const name;
const jint size;
const jint blob_type;
const jlong address;
};
static jobjectArray codeBlob2objectArray(JavaThread* thread, JNIEnv* env, CodeBlobStub* cb) {
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
CHECK_JNI_EXCEPTION_(env, NULL);
jobjectArray result = env->NewObjectArray(4, clazz, NULL);
jstring name = env->NewStringUTF(cb->name);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 0, name);
jobject obj = integerBox(thread, env, cb->size);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 1, obj);
obj = integerBox(thread, env, cb->blob_type);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 2, obj);
obj = longBox(thread, env, cb->address);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetObjectArrayElement(result, 3, obj);
return result;
}
WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr)) WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
...@@ -888,6 +930,47 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo ...@@ -888,6 +930,47 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
return result; return result;
WB_END WB_END
CodeBlob* WhiteBox::allocate_code_blob(int size, int blob_type) {
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled");
BufferBlob* blob;
int full_size = CodeBlob::align_code_offset(sizeof(BufferBlob));
if (full_size < size) {
full_size += align_up(size - full_size, oopSize);
}
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = (BufferBlob*) CodeCache::allocate(full_size);
::new (blob) BufferBlob("WB::DummyBlob", full_size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
}
WB_ENTRY(jlong, WB_AllocateCodeBlob(JNIEnv* env, jobject o, jint size, jint blob_type))
if (size < 0) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("WB_AllocateCodeBlob: size is negative: " INT32_FORMAT, size));
}
return (jlong) WhiteBox::allocate_code_blob(size, blob_type);
WB_END
WB_ENTRY(void, WB_FreeCodeBlob(JNIEnv* env, jobject o, jlong addr))
if (addr == 0) {
return;
}
BufferBlob::free((BufferBlob*) addr);
WB_END
WB_ENTRY(jobjectArray, WB_GetCodeBlob(JNIEnv* env, jobject o, jlong addr))
if (addr == 0) {
THROW_MSG_NULL(vmSymbols::java_lang_NullPointerException(),
"WB_GetCodeBlob: addr is null");
}
ThreadToNativeFromVM ttn(thread);
CodeBlobStub stub((CodeBlob*) addr);
return codeBlob2objectArray(thread, env, &stub);
WB_END
int WhiteBox::array_bytes_to_length(size_t bytes) { int WhiteBox::array_bytes_to_length(size_t bytes) {
return Array<u1>::bytes_to_length(bytes); return Array<u1>::bytes_to_length(bytes);
...@@ -1167,6 +1250,9 @@ static JNINativeMethod methods[] = { ...@@ -1167,6 +1250,9 @@ static JNINativeMethod methods[] = {
{CC"fullGC", CC"()V", (void*)&WB_FullGC }, {CC"fullGC", CC"()V", (void*)&WB_FullGC },
{CC"youngGC", CC"()V", (void*)&WB_YoungGC }, {CC"youngGC", CC"()V", (void*)&WB_YoungGC },
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory }, {CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
{CC"allocateCodeBlob", CC"(II)J", (void*)&WB_AllocateCodeBlob },
{CC"freeCodeBlob", CC"(J)V", (void*)&WB_FreeCodeBlob },
{CC"getCodeBlob", CC"(J)[Ljava/lang/Object;",(void*)&WB_GetCodeBlob },
{CC"allocateMetaspace", {CC"allocateMetaspace",
CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace }, CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace },
{CC"freeMetaspace", {CC"freeMetaspace",
......
...@@ -64,7 +64,8 @@ class WhiteBox : public AllStatic { ...@@ -64,7 +64,8 @@ class WhiteBox : public AllStatic {
Symbol* signature_symbol); Symbol* signature_symbol);
static const char* lookup_jstring(const char* field_name, oop object); static const char* lookup_jstring(const char* field_name, oop object);
static bool lookup_bool(const char* field_name, oop object); static bool lookup_bool(const char* field_name, oop object);
static int get_blob_type(const CodeBlob* code);
static CodeBlob* allocate_code_blob(int size, int blob_type);
static int array_bytes_to_length(size_t bytes); static int array_bytes_to_length(size_t bytes);
static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
JNINativeMethod* method_array, int method_count); JNINativeMethod* method_array, int method_count);
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include "runtime/vframe.hpp" #include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp" #include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#include "jfr/support/jfrThreadId.hpp"
#include "jfr/jfrEvents.hpp"
static bool _biased_locking_enabled = false; static bool _biased_locking_enabled = false;
BiasedLockingCounters BiasedLocking::_counters; BiasedLockingCounters BiasedLocking::_counters;
...@@ -142,8 +144,9 @@ static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thre ...@@ -142,8 +144,9 @@ static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thre
return info; return info;
} }
// After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
markOop mark = obj->mark(); markOop mark = obj->mark();
if (!mark->has_bias_pattern()) { if (!mark->has_bias_pattern()) {
if (TraceBiasedLocking) { if (TraceBiasedLocking) {
...@@ -253,6 +256,11 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_ ...@@ -253,6 +256,11 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
} }
} }
// If requested, return information on which thread held the bias
if (biased_locker != NULL) {
*biased_locker = biased_thread;
}
return BiasedLocking::BIAS_REVOKED; return BiasedLocking::BIAS_REVOKED;
} }
...@@ -373,7 +381,7 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, ...@@ -373,7 +381,7 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
// At this point we're done. All we have to do is potentially // At this point we're done. All we have to do is potentially
// adjust the header of the given object to revoke its bias. // adjust the header of the given object to revoke its bias.
revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
} else { } else {
if (TraceBiasedLocking) { if (TraceBiasedLocking) {
ResourceMark rm; ResourceMark rm;
...@@ -395,14 +403,14 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, ...@@ -395,14 +403,14 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
oop owner = mon_info->owner(); oop owner = mon_info->owner();
markOop mark = owner->mark(); markOop mark = owner->mark();
if ((owner->klass() == k_o) && mark->has_bias_pattern()) { if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
revoke_bias(owner, false, true, requesting_thread); revoke_bias(owner, false, true, requesting_thread, NULL);
} }
} }
} }
// Must force the bias of the passed object to be forcibly revoked // Must force the bias of the passed object to be forcibly revoked
// as well to ensure guarantees to callers // as well to ensure guarantees to callers
revoke_bias(o, false, true, requesting_thread); revoke_bias(o, false, true, requesting_thread, NULL);
} }
if (TraceBiasedLocking) { if (TraceBiasedLocking) {
...@@ -445,19 +453,22 @@ protected: ...@@ -445,19 +453,22 @@ protected:
GrowableArray<Handle>* _objs; GrowableArray<Handle>* _objs;
JavaThread* _requesting_thread; JavaThread* _requesting_thread;
BiasedLocking::Condition _status_code; BiasedLocking::Condition _status_code;
traceid _biased_locker_id;
public: public:
VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
: _obj(obj) : _obj(obj)
, _objs(NULL) , _objs(NULL)
, _requesting_thread(requesting_thread) , _requesting_thread(requesting_thread)
, _status_code(BiasedLocking::NOT_BIASED) {} , _status_code(BiasedLocking::NOT_BIASED)
, _biased_locker_id(0) {}
VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
: _obj(NULL) : _obj(NULL)
, _objs(objs) , _objs(objs)
, _requesting_thread(requesting_thread) , _requesting_thread(requesting_thread)
, _status_code(BiasedLocking::NOT_BIASED) {} , _status_code(BiasedLocking::NOT_BIASED)
, _biased_locker_id(0) {}
virtual VMOp_Type type() const { return VMOp_RevokeBias; } virtual VMOp_Type type() const { return VMOp_RevokeBias; }
...@@ -486,7 +497,11 @@ public: ...@@ -486,7 +497,11 @@ public:
if (TraceBiasedLocking) { if (TraceBiasedLocking) {
tty->print_cr("Revoking bias with potentially per-thread safepoint:"); tty->print_cr("Revoking bias with potentially per-thread safepoint:");
} }
_status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); JavaThread* biased_locker = NULL;
_status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
if (biased_locker != NULL) {
_biased_locker_id = JFR_THREAD_ID(biased_locker);
}
clean_up_cached_monitor_info(); clean_up_cached_monitor_info();
return; return;
} else { } else {
...@@ -500,6 +515,10 @@ public: ...@@ -500,6 +515,10 @@ public:
BiasedLocking::Condition status_code() const { BiasedLocking::Condition status_code() const {
return _status_code; return _status_code;
} }
traceid biased_locker() const {
return _biased_locker_id;
}
}; };
...@@ -609,23 +628,44 @@ BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attem ...@@ -609,23 +628,44 @@ BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attem
if (TraceBiasedLocking) { if (TraceBiasedLocking) {
tty->print_cr("Revoking bias by walking my own stack:"); tty->print_cr("Revoking bias by walking my own stack:");
} }
BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); EventBiasedLockSelfRevocation event;
BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
((JavaThread*) THREAD)->set_cached_monitor_info(NULL); ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
assert(cond == BIAS_REVOKED, "why not?"); assert(cond == BIAS_REVOKED, "why not?");
if (event.should_commit()) {
event.set_lockClass(k);
event.commit();
}
return cond; return cond;
} else { } else {
EventBiasedLockRevocation event;
VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
VMThread::execute(&revoke); VMThread::execute(&revoke);
if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
event.set_lockClass(k);
// Subtract 1 to match the id of events committed inside the safepoint
event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
event.set_previousOwner(revoke.biased_locker());
event.commit();
}
return revoke.status_code(); return revoke.status_code();
} }
} }
assert((heuristics == HR_BULK_REVOKE) || assert((heuristics == HR_BULK_REVOKE) ||
(heuristics == HR_BULK_REBIAS), "?"); (heuristics == HR_BULK_REBIAS), "?");
EventBiasedLockClassRevocation event;
VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
(heuristics == HR_BULK_REBIAS), (heuristics == HR_BULK_REBIAS),
attempt_rebias); attempt_rebias);
VMThread::execute(&bulk_revoke); VMThread::execute(&bulk_revoke);
if (event.should_commit()) {
event.set_revokedClass(obj->klass());
event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
// Subtract 1 to match the id of events committed inside the safepoint
event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
event.commit();
}
return bulk_revoke.status_code(); return bulk_revoke.status_code();
} }
...@@ -645,7 +685,7 @@ void BiasedLocking::revoke_at_safepoint(Handle h_obj) { ...@@ -645,7 +685,7 @@ void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
oop obj = h_obj(); oop obj = h_obj();
HeuristicsResult heuristics = update_heuristics(obj, false); HeuristicsResult heuristics = update_heuristics(obj, false);
if (heuristics == HR_SINGLE_REVOKE) { if (heuristics == HR_SINGLE_REVOKE) {
revoke_bias(obj, false, false, NULL); revoke_bias(obj, false, false, NULL, NULL);
} else if ((heuristics == HR_BULK_REBIAS) || } else if ((heuristics == HR_BULK_REBIAS) ||
(heuristics == HR_BULK_REVOKE)) { (heuristics == HR_BULK_REVOKE)) {
bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
...@@ -661,7 +701,7 @@ void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { ...@@ -661,7 +701,7 @@ void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
oop obj = (objs->at(i))(); oop obj = (objs->at(i))();
HeuristicsResult heuristics = update_heuristics(obj, false); HeuristicsResult heuristics = update_heuristics(obj, false);
if (heuristics == HR_SINGLE_REVOKE) { if (heuristics == HR_SINGLE_REVOKE) {
revoke_bias(obj, false, false, NULL); revoke_bias(obj, false, false, NULL, NULL);
} else if ((heuristics == HR_BULK_REBIAS) || } else if ((heuristics == HR_BULK_REBIAS) ||
(heuristics == HR_BULK_REVOKE)) { (heuristics == HR_BULK_REVOKE)) {
bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
......
...@@ -616,8 +616,8 @@ static void trace_flag_changed(const char* name, const T old_value, const T new_ ...@@ -616,8 +616,8 @@ static void trace_flag_changed(const char* name, const T old_value, const T new_
e.commit(); e.commit();
} }
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) { bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_bool()) return false; if (!result->is_bool()) return false;
*value = result->get_bool(); *value = result->get_bool();
...@@ -644,8 +644,8 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Fla ...@@ -644,8 +644,8 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Fla
faddr->set_origin(origin); faddr->set_origin(origin);
} }
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) { bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_intx()) return false; if (!result->is_intx()) return false;
*value = result->get_intx(); *value = result->get_intx();
...@@ -672,8 +672,8 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Fla ...@@ -672,8 +672,8 @@ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Fla
faddr->set_origin(origin); faddr->set_origin(origin);
} }
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) { bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_uintx()) return false; if (!result->is_uintx()) return false;
*value = result->get_uintx(); *value = result->get_uintx();
...@@ -700,8 +700,8 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, F ...@@ -700,8 +700,8 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, F
faddr->set_origin(origin); faddr->set_origin(origin);
} }
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) { bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_uint64_t()) return false; if (!result->is_uint64_t()) return false;
*value = result->get_uint64_t(); *value = result->get_uint64_t();
...@@ -728,8 +728,8 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t va ...@@ -728,8 +728,8 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t va
faddr->set_origin(origin); faddr->set_origin(origin);
} }
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) { bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_double()) return false; if (!result->is_double()) return false;
*value = result->get_double(); *value = result->get_double();
...@@ -756,8 +756,8 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, ...@@ -756,8 +756,8 @@ void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value,
faddr->set_origin(origin); faddr->set_origin(origin);
} }
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) { bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len); Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false; if (result == NULL) return false;
if (!result->is_ccstr()) return false; if (!result->is_ccstr()) return false;
*value = result->get_ccstr(); *value = result->get_ccstr();
......
...@@ -369,33 +369,33 @@ class DoubleFlagSetting { ...@@ -369,33 +369,33 @@ class DoubleFlagSetting {
class CommandLineFlags { class CommandLineFlags {
public: public:
static bool boolAt(const char* name, size_t len, bool* value); static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
static bool boolAt(const char* name, bool* value) { return boolAt(name, strlen(name), value); } static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
static bool intxAt(const char* name, size_t len, intx* value); static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
static bool intxAt(const char* name, intx* value) { return intxAt(name, strlen(name), value); } static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
static bool uintxAt(const char* name, size_t len, uintx* value); static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
static bool uintxAt(const char* name, uintx* value) { return uintxAt(name, strlen(name), value); } static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value); static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); } static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
static bool doubleAt(const char* name, size_t len, double* value); static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
static bool doubleAt(const char* name, double* value) { return doubleAt(name, strlen(name), value); } static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
static bool ccstrAt(const char* name, size_t len, ccstr* value); static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
static bool ccstrAt(const char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); } static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
// Contract: Flag will make private copy of the incoming value. // Contract: Flag will make private copy of the incoming value.
// Outgoing value is always malloc-ed, and caller MUST call free. // Outgoing value is always malloc-ed, and caller MUST call free.
static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
......
...@@ -196,7 +196,9 @@ class Thread: public ThreadShadow { ...@@ -196,7 +196,9 @@ class Thread: public ThreadShadow {
_deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt
_has_async_exception = 0x00000001U, // there is a pending async exception _has_async_exception = 0x00000001U, // there is a pending async exception
_critical_native_unlock = 0x00000002U // Must call back to unlock JNI critical lock _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
JFR_ONLY(_trace_flag = 0x00000004U) // call jfr tracing
}; };
// various suspension related flags - atomically updated // various suspension related flags - atomically updated
...@@ -443,6 +445,7 @@ class Thread: public ThreadShadow { ...@@ -443,6 +445,7 @@ class Thread: public ThreadShadow {
inline jlong cooked_allocated_bytes(); inline jlong cooked_allocated_bytes();
JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
const ThreadExt& ext() const { return _ext; } const ThreadExt& ext() const { return _ext; }
ThreadExt& ext() { return _ext; } ThreadExt& ext() { return _ext; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册