提交 09121154 编写于 作者: D Denghui Dong 提交者: D-D-H

[Backport] 8225797: OldObjectSample event creates unexpected amount of checkpoint data

Summary:

Test Plan: jdk/jfr

Reviewed-by: yuleil

Issue: https://github.com/alibaba/dragonwell8/issues/112
上级 49c13aa0
......@@ -1461,7 +1461,7 @@ static void copy_method_trace_flags(const InstanceKlass* the_original_klass, con
assert(new_method != NULL, "invariant");
assert(new_method->name() == old_method->name(), "invariant");
assert(new_method->signature() == old_method->signature(), "invariant");
*new_method->trace_flags_addr() = old_method->trace_flags();
new_method->set_trace_flags(old_method->trace_flags());
assert(new_method->trace_flags() == old_method->trace_flags(), "invariant");
}
}
......
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -92,7 +92,9 @@ void Jfr::on_vm_shutdown(bool exception_handler) {
}
void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
LeakProfiler::oops_do(is_alive, f);
if (LeakProfiler::is_running()) {
LeakProfiler::oops_do(is_alive, f);
}
}
void Jfr::weak_oops_do(OopClosure* f) {
......
......@@ -55,18 +55,23 @@ bool EdgeStore::is_empty() const {
return !_edges->has_entries();
}
void EdgeStore::assign_id(EdgeEntry* entry) {
void EdgeStore::on_link(EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(++_edge_id_counter);
}
bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) {
bool EdgeStore::on_equals(uintptr_t hash, const EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
return true;
}
void EdgeStore::on_unlink(EdgeEntry* entry) {
assert(entry != NULL, "invariant");
// nothing
}
#ifdef ASSERT
bool EdgeStore::contains(const oop* reference) const {
return get(reference) != NULL;
......@@ -75,22 +80,21 @@ bool EdgeStore::contains(const oop* reference) const {
StoredEdge* EdgeStore::get(const oop* reference) const {
assert(reference != NULL, "invariant");
const StoredEdge e(NULL, reference);
EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
EdgeEntry* const entry = _edges->lookup_only((uintptr_t)reference);
return entry != NULL ? entry->literal_addr() : NULL;
}
StoredEdge* EdgeStore::put(const oop* reference) {
assert(reference != NULL, "invariant");
const StoredEdge e(NULL, reference);
assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
assert(NULL == _edges->lookup_only((uintptr_t)reference), "invariant");
EdgeEntry& entry = _edges->put((uintptr_t)reference, e);
return entry.literal_addr();
}
traceid EdgeStore::get_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
EdgeEntry* const entry = _edges->lookup_only((uintptr_t)edge->reference());
assert(entry != NULL, "invariant");
return entry->id();
}
......
......@@ -58,7 +58,7 @@ class StoredEdge : public Edge {
};
class EdgeStore : public CHeapObj<mtTracing> {
typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
typedef HashTableHost<StoredEdge, traceid, JfrHashtableEntry, EdgeStore> EdgeHashTable;
typedef EdgeHashTable::HashEntry EdgeEntry;
template <typename,
typename,
......@@ -74,8 +74,9 @@ class EdgeStore : public CHeapObj<mtTracing> {
EdgeHashTable* _edges;
// Hash table callbacks
void assign_id(EdgeEntry* entry);
bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
void on_link(EdgeEntry* entry);
bool on_equals(uintptr_t hash, const EdgeEntry* entry);
void on_unlink(EdgeEntry* entry);
StoredEdge* get(const oop* reference) const;
StoredEdge* put(const oop* reference);
......
......@@ -42,7 +42,6 @@
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "memory/universe.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/globalDefinitions.hpp"
......@@ -101,7 +100,7 @@ void PathToGcRootsOperation::doit() {
// Save the original markWord for the potential leak objects,
// to be restored on function exit
ObjectSampleMarker marker;
if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) {
// no valid samples to process
return;
}
......
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -24,10 +24,6 @@
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
......@@ -35,259 +31,442 @@
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
template <typename SampleProcessor>
static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) {
assert(sample != NULL, "invariant");
while (sample != end) {
processor.sample_do(sample);
sample = sample->next();
}
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/semaphore.hpp"
#include "runtime/thread.hpp"
#include "utilities/growableArray.hpp"
static bool predicate(GrowableArray<traceid>* set, traceid id) {
assert(set != NULL, "invariant");
bool found = false;
set->find_sorted<traceid, compare_traceid>(id, found);
return found;
}
class RootSystemType : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
writer.write_count(nof_root_systems);
for (u4 i = 0; i < nof_root_systems; ++i) {
writer.write_key(i);
writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
}
static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) {
assert(set != NULL, "invariant");
bool found = false;
const int location = set->find_sorted<traceid, compare_traceid>(id, found);
if (!found) {
set->insert_before(location, id);
}
};
return found;
}
class RootType : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
const u4 nof_root_types = OldObjectRoot::_number_of_types;
writer.write_count(nof_root_types);
for (u4 i = 0; i < nof_root_types; ++i) {
writer.write_key(i);
writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
}
}
};
static bool add(GrowableArray<traceid>* set, traceid id) {
assert(set != NULL, "invariant");
return mutable_predicate(set, id);
}
class CheckpointInstall {
private:
const JfrCheckpointBlobHandle& _cp;
public:
CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
void sample_do(ObjectSample* sample) {
assert(sample != NULL, "invariant");
if (!sample->is_dead()) {
sample->set_klass_checkpoint(_cp);
}
}
};
const int initial_array_size = 64;
template <typename T>
static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
}
class CheckpointWrite {
static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
class ThreadIdExclusiveAccess : public StackObj {
private:
JfrCheckpointWriter& _writer;
const jlong _last_sweep;
static Semaphore _mutex_semaphore;
public:
CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {}
void sample_do(ObjectSample* sample) {
assert(sample != NULL, "invariant");
if (sample->is_alive_and_older_than(_last_sweep)) {
if (sample->has_thread_checkpoint()) {
const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
thread_cp->exclusive_write(_writer);
}
if (sample->has_klass_checkpoint()) {
const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
klass_cp->exclusive_write(_writer);
}
}
}
ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
};
class CheckpointStateReset {
private:
const jlong _last_sweep;
public:
CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {}
void sample_do(ObjectSample* sample) {
assert(sample != NULL, "invariant");
if (sample->is_alive_and_older_than(_last_sweep)) {
if (sample->has_thread_checkpoint()) {
const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
thread_cp->reset_write_state();
}
if (sample->has_klass_checkpoint()) {
const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
klass_cp->reset_write_state();
}
}
Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
static bool has_thread_exited(traceid tid) {
assert(tid != 0, "invariant");
return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
}
static void add_to_unloaded_thread_set(traceid tid) {
ThreadIdExclusiveAccess lock;
if (unloaded_thread_id_set == NULL) {
unloaded_thread_id_set = c_heap_allocate_array<traceid>();
}
};
add(unloaded_thread_id_set, tid);
}
class StackTraceWrite {
private:
JfrStackTraceRepository& _stack_trace_repo;
JfrCheckpointWriter& _writer;
int _count;
public:
StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) :
_stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) {
JfrStacktrace_lock->lock();
void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
assert(jt != NULL, "invariant");
if (LeakProfiler::is_running()) {
add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
}
~StackTraceWrite() {
assert(JfrStacktrace_lock->owned_by_self(), "invariant");
JfrStacktrace_lock->unlock();
}
// Track the set of unloaded klasses during a chunk / epoch.
// Methods in stacktraces belonging to unloaded klasses must not be accessed.
static GrowableArray<traceid>* unloaded_klass_set = NULL;
static void add_to_unloaded_klass_set(traceid klass_id) {
if (unloaded_klass_set == NULL) {
unloaded_klass_set = c_heap_allocate_array<traceid>();
}
unloaded_klass_set->append(klass_id);
}
void sample_do(ObjectSample* sample) {
assert(sample != NULL, "invariant");
if (!sample->is_dead()) {
if (sample->has_stack_trace()) {
JfrTraceId::use(sample->klass(), true);
_stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash());
++_count;
}
}
static void sort_unloaded_klass_set() {
if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
unloaded_klass_set->sort(sort_traceid);
}
}
int count() const {
return _count;
void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
assert(k != NULL, "invariant");
add_to_unloaded_klass_set(TRACE_ID(k));
}
template <typename Processor>
static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
assert(sample != NULL, "invariant");
while (sample != end) {
processor.sample_do(sample);
sample = sample->next();
}
};
}
class SampleMark {
template <typename Processor>
static void iterate_samples(Processor& processor, bool all = false) {
ObjectSampler* const sampler = ObjectSampler::sampler();
assert(sampler != NULL, "invariant");
ObjectSample* const last = sampler->last();
assert(last != NULL, "invariant");
do_samples(last, all ? NULL : sampler->last_resolved(), processor);
}
class SampleMarker {
private:
ObjectSampleMarker& _marker;
jlong _last_sweep;
int _count;
public:
SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker),
_last_sweep(last_sweep),
_count(0) {}
SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
void sample_do(ObjectSample* sample) {
assert(sample != NULL, "invariant");
if (sample->is_alive_and_older_than(_last_sweep)) {
_marker.mark(sample->object());
++_count;
}
}
int count() const {
return _count;
}
};
void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) {
if (!writer.has_data()) {
return;
int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
assert(sampler != NULL, "invariant");
if (sampler->last() == NULL) {
return 0;
}
SampleMarker sample_marker(marker, emit_all ? max_jlong : sampler->last_sweep().value());
iterate_samples(sample_marker, true);
return sample_marker.count();
}
assert(writer.has_data(), "invariant");
const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
CheckpointInstall install(h_cp);
// Class unload implies a safepoint.
// Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
// Therefore: direct access the object sampler instance is safe.
ObjectSampler* const object_sampler = ObjectSampler::sampler();
assert(object_sampler != NULL, "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
const ObjectSample* const last_resolved = object_sampler->last_resolved();
// install only to new samples since last resolved checkpoint
if (last != last_resolved) {
do_samples(last, last_resolved, install);
if (class_unload) {
return;
}
if (type_set) {
object_sampler->set_last_resolved(last);
class BlobCache {
typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
typedef BlobTable::HashEntry BlobEntry;
private:
BlobTable _table;
traceid _lookup_id;
public:
BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
JfrBlobHandle get(const ObjectSample* sample);
void put(const ObjectSample* sample, const JfrBlobHandle& blob);
// Hash table callbacks
void on_link(const BlobEntry* entry) const;
bool on_equals(uintptr_t hash, const BlobEntry* entry) const;
void on_unlink(BlobEntry* entry) const;
};
JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
assert(sample != NULL, "invariant");
_lookup_id = sample->stack_trace_id();
assert(_lookup_id != 0, "invariant");
BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
return entry != NULL ? entry->literal() : JfrBlobHandle();
}
void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
assert(sample != NULL, "invariant");
assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
_lookup_id = sample->stack_trace_id();
assert(_lookup_id != 0, "invariant");
_table.put(sample->stack_trace_hash(), blob);
}
inline void BlobCache::on_link(const BlobEntry* entry) const {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(_lookup_id);
}
inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
return entry->id() == _lookup_id;
}
inline void BlobCache::on_unlink(BlobEntry* entry) const {
assert(entry != NULL, "invariant");
}
static GrowableArray<traceid>* id_set = NULL;
static void prepare_for_resolution() {
id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
sort_unloaded_klass_set();
}
static bool stack_trace_precondition(const ObjectSample* sample) {
assert(sample != NULL, "invariant");
return sample->has_stack_trace_id() && !sample->is_dead();
}
class StackTraceBlobInstaller {
private:
const JfrStackTraceRepository& _stack_trace_repo;
BlobCache _cache;
const JfrStackTrace* resolve(const ObjectSample* sample);
void install(ObjectSample* sample);
public:
StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo);
void sample_do(ObjectSample* sample) {
if (stack_trace_precondition(sample)) {
install(sample);
}
}
};
StackTraceBlobInstaller::StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) :
_stack_trace_repo(stack_trace_repo), _cache(JfrOptionSet::old_object_queue_size()) {
prepare_for_resolution();
}
const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) {
return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id());
}
#ifdef ASSERT
static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
assert(!sample->has_stacktrace(), "invariant");
assert(stack_trace != NULL, "invariant");
assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
}
#endif
void StackTraceBlobInstaller::install(ObjectSample* sample) {
JfrBlobHandle blob = _cache.get(sample);
if (blob.valid()) {
sample->set_stacktrace(blob);
return;
}
const JfrStackTrace* const stack_trace = resolve(sample);
DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
JfrCheckpointWriter writer(false, true, Thread::current());
writer.write_type(TYPE_STACKTRACE);
writer.write_count(1);
ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
blob = writer.move();
_cache.put(sample, blob);
sample->set_stacktrace(blob);
}
void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
assert(sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(thread != NULL, "invariant");
const ObjectSample* const last = sampler->last();
if (last != sampler->last_resolved()) {
StackTraceBlobInstaller installer(stack_trace_repo);
iterate_samples(installer);
}
}
// caller needs ResourceMark
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
assert(sampler != NULL, "invariant");
assert(LeakProfiler::is_running(), "invariant");
install_stack_traces(sampler, stack_trace_repo);
}
static traceid get_klass_id(traceid method_id) {
assert(method_id != 0, "invariant");
return method_id >> TRACE_ID_SHIFT;
}
static bool types_registered = false;
if (!types_registered) {
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
types_registered = true;
static bool is_klass_unloaded(traceid method_id) {
return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id));
}
static bool is_processed(traceid id) {
assert(id != 0, "invariant");
assert(id_set != NULL, "invariant");
return mutable_predicate(id_set, id);
}
void ObjectSampleCheckpoint::add_to_leakp_set(const Method* method, traceid method_id) {
if (is_processed(method_id) || is_klass_unloaded(method_id)) {
return;
}
JfrTraceId::set_leakp(method);
}
const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
{
JfrCheckpointWriter writer(false, false, thread);
CheckpointWrite checkpoint_write(writer, last_sweep);
do_samples(last, NULL, checkpoint_write);
void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
assert(trace != NULL, "invariant");
// JfrStackTrace
writer.write(trace->id());
writer.write((u1)!trace->_reached_root);
writer.write(trace->_nr_of_frames);
// JfrStackFrames
for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
const JfrStackFrame& frame = trace->_frames[i];
frame.write(writer);
add_to_leakp_set(frame._method, frame._methodid);
}
}
CheckpointStateReset state_reset(last_sweep);
do_samples(last, NULL, state_reset);
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
if (reset) {
blob->reset_write_state();
return;
}
blob->exclusive_write(writer);
}
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
if (sample->has_type_set()) {
write_blob(sample->type_set(), writer, reset);
}
}
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
assert(sample->has_thread(), "invariant");
if (has_thread_exited(sample->thread_id())) {
write_blob(sample->thread(), writer, reset);
}
}
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
if (sample->has_stacktrace()) {
write_blob(sample->stacktrace(), writer, reset);
}
}
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
assert(sample != NULL, "invariant");
write_stacktrace_blob(sample, writer, reset);
write_thread_blob(sample, writer, reset);
write_type_set_blob(sample, writer, reset);
}
class BlobWriter {
private:
const ObjectSampler* _sampler;
JfrCheckpointWriter& _writer;
const jlong _last_sweep;
bool _reset;
public:
BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
_sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {}
void sample_do(ObjectSample* sample) {
if (sample->is_alive_and_older_than(_last_sweep)) {
write_blobs(sample, _writer, _reset);
}
}
void set_reset() {
_reset = true;
}
};
static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
// sample set is predicated on time of last sweep
const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
JfrCheckpointWriter writer(false, false, thread);
BlobWriter cbw(sampler, writer, last_sweep);
iterate_samples(cbw, true);
// reset blob write states
cbw.set_reset();
iterate_samples(cbw, true);
}
void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
assert(sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(thread != NULL, "invariant");
write_sample_blobs(sampler, emit_all, thread);
// write reference chains
if (!edge_store->is_empty()) {
// java object and chain representations
JfrCheckpointWriter writer(false, true, thread);
ObjectSampleWriter osw(writer, edge_store);
edge_store->iterate(osw);
}
}
int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
assert(object_sampler != NULL, "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
if (last == NULL) {
return 0;
static void clear_unloaded_klass_set() {
if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
unloaded_klass_set->clear();
}
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
SampleMark mark(marker, last_sweep);
do_samples(last, NULL, mark);
return mark.count();
}
WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
_sampler(sampler), _stack_trace_repo(repo) {}
// A linked list of saved type set blobs for the epoch.
// The link consist of a reference counted handle.
static JfrBlobHandle saved_type_set_blobs;
bool WriteObjectSampleStacktrace::process() {
assert(LeakProfiler::is_running(), "invariant");
assert(_sampler != NULL, "invariant");
static void release_state_for_previous_epoch() {
// decrements the reference count and the list is reinitialized
saved_type_set_blobs = JfrBlobHandle();
clear_unloaded_klass_set();
}
ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
const ObjectSample* const last_resolved = _sampler->last_resolved();
if (last == last_resolved) {
return true;
class BlobInstaller {
public:
~BlobInstaller() {
release_state_for_previous_epoch();
}
void sample_do(ObjectSample* sample) {
if (!sample->is_dead()) {
sample->set_type_set(saved_type_set_blobs);
}
}
};
JfrCheckpointWriter writer(false, true, Thread::current());
const JfrCheckpointContext ctx = writer.context();
writer.write_type(TYPE_STACKTRACE);
const jlong count_offset = writer.reserve(sizeof(u4));
static void install_type_set_blobs() {
BlobInstaller installer;
iterate_samples(installer);
}
int count = 0;
{
StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock
do_samples(last, last_resolved, stack_trace_write);
count = stack_trace_write.count();
static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
assert(writer.has_data(), "invariant");
const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
if (saved_type_set_blobs.valid()) {
saved_type_set_blobs->set_next(blob);
} else {
saved_type_set_blobs = blob;
}
if (count == 0) {
writer.set_context(ctx);
return true;
}
void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
assert(LeakProfiler::is_running(), "invariant");
const ObjectSample* last = ObjectSampler::sampler()->last();
if (writer.has_data() && last != NULL) {
save_type_set_blob(writer);
install_type_set_blobs();
ObjectSampler::sampler()->set_last_resolved(last);
}
assert(count > 0, "invariant");
writer.write_count((u4)count, count_offset);
JfrStackTraceRepository::write_metadata(writer);
}
// install the stacktrace checkpoint information to the candidates
ObjectSampleCheckpoint::install(writer, false, false);
return true;
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(LeakProfiler::is_running(), "invariant");
if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
save_type_set_blob(writer, true);
}
}
......@@ -26,27 +26,35 @@
#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
class EdgeStore;
class JavaThread;
class JfrCheckpointWriter;
class JfrStackTrace;
class JfrStackTraceRepository;
class Klass;
class Method;
class ObjectSample;
class ObjectSampleMarker;
class ObjectSampler;
class Thread;
class ObjectSampleCheckpoint : AllStatic {
public:
static void install(JfrCheckpointWriter& writer, bool class_unload, bool type_set);
static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
};
class WriteObjectSampleStacktrace : public StackObj {
friend class EventEmitter;
friend class PathToGcRootsOperation;
friend class StackTraceBlobInstaller;
private:
ObjectSampler* const _sampler;
JfrStackTraceRepository& _stack_trace_repo;
static void add_to_leakp_set(const Method* method, traceid method_id);
static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
public:
WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
bool process();
static void on_klass_unload(const Klass* k);
static void on_type_set(JfrCheckpointWriter& writer);
static void on_type_set_unload(JfrCheckpointWriter& writer);
static void on_thread_exit(JavaThread* jt);
static void on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& repo);
};
#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
......@@ -33,8 +33,8 @@
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/writers/jfrTypeWriterHost.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "utilities/growableArray.hpp"
......@@ -137,30 +137,33 @@ class FieldTable : public ResourceObj {
typename,
size_t>
friend class HashTableHost;
typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, Entry, FieldTable, 109> FieldInfoTable;
typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, JfrHashtableEntry, FieldTable, 109> FieldInfoTable;
public:
typedef FieldInfoTable::HashEntry FieldInfoEntry;
private:
static traceid _field_id_counter;
FieldInfoTable* _table;
const ObjectSampleFieldInfo* _lookup;
void assign_id(FieldInfoEntry* entry) {
void on_link(FieldInfoEntry* entry) {
assert(entry != NULL, "invariant");
entry->set_id(++_field_id_counter);
}
bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) {
bool on_equals(uintptr_t hash, const FieldInfoEntry* entry) {
assert(hash == entry->hash(), "invariant");
assert(query != NULL, "invariant");
const ObjectSampleFieldInfo* stored = entry->literal();
assert(stored != NULL, "invariant");
assert(((Symbol*)stored->_field_name_symbol)->identity_hash() == ((Symbol*)query->_field_name_symbol)->identity_hash(), "invariant");
return stored->_field_modifiers == query->_field_modifiers;
assert(_lookup != NULL, "invariant");
return entry->literal()->_field_modifiers == _lookup->_field_modifiers;
}
void on_unlink(FieldInfoEntry* entry) {
assert(entry != NULL, "invariant");
// nothing
}
public:
FieldTable() : _table(new FieldInfoTable(this)) {}
FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {}
~FieldTable() {
assert(_table != NULL, "invariant");
delete _table;
......@@ -168,8 +171,8 @@ class FieldTable : public ResourceObj {
traceid store(const ObjectSampleFieldInfo* field_info) {
assert(field_info != NULL, "invariant");
const FieldInfoEntry& entry =_table->lookup_put(field_info,
((Symbol*)field_info->_field_name_symbol)->identity_hash());
_lookup = field_info;
const FieldInfoEntry& entry = _table->lookup_put(((Symbol*)field_info->_field_name_symbol)->identity_hash(), field_info);
return entry.id();
}
......@@ -196,7 +199,7 @@ static ArrayInfo* array_infos = NULL;
static FieldTable* field_infos = NULL;
static RootDescriptionInfo* root_infos = NULL;
int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) {
int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) {
assert(writer != NULL, "invariant");
assert(si != NULL, "invariant");
const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si;
......@@ -211,17 +214,17 @@ int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, c
return 1;
}
typedef JfrArtifactWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
typedef JfrArtifactWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
typedef JfrTypeWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
typedef JfrTypeWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
static void write_sample_infos(JfrCheckpointWriter& writer) {
if (sample_infos != NULL) {
SampleWriter sw(&writer, NULL, false);
SampleWriter sw(&writer);
sample_infos->iterate(sw);
}
}
int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) {
int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) {
assert(writer != NULL, "invariant");
assert(ri != NULL, "invariant");
const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri;
......@@ -233,17 +236,17 @@ int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused
return 1;
}
typedef JfrArtifactWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
typedef JfrArtifactWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
typedef JfrTypeWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
typedef JfrTypeWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
static void write_reference_infos(JfrCheckpointWriter& writer) {
if (ref_infos != NULL) {
ReferenceWriter rw(&writer, NULL, false);
ReferenceWriter rw(&writer);
ref_infos->iterate(rw);
}
}
int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) {
int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) {
assert(writer != NULL, "invariant");
assert(ai != NULL, "invariant");
const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai;
......@@ -270,17 +273,17 @@ static traceid get_array_info_id(const Edge& edge, traceid id) {
return array_infos->store(osai);
}
typedef JfrArtifactWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
typedef JfrArtifactWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
typedef JfrTypeWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
typedef JfrTypeWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
static void write_array_infos(JfrCheckpointWriter& writer) {
if (array_infos != NULL) {
ArrayWriter aw(&writer, NULL, false);
ArrayWriter aw(&writer);
array_infos->iterate(aw);
}
}
int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) {
int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) {
assert(writer != NULL, "invariant");
assert(fi != NULL, "invariant");
const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi;
......@@ -314,12 +317,12 @@ static traceid get_field_info_id(const Edge& edge) {
return field_infos->store(osfi);
}
typedef JfrArtifactWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
typedef JfrArtifactWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
typedef JfrTypeWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
typedef JfrTypeWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
static void write_field_infos(JfrCheckpointWriter& writer) {
if (field_infos != NULL) {
FieldWriter fw(&writer, NULL, false);
FieldWriter fw(&writer);
field_infos->iterate(fw);
}
}
......@@ -339,7 +342,7 @@ static const char* description(const ObjectSampleRootDescriptionInfo* osdi) {
return description.description();
}
int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) {
int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) {
assert(writer != NULL, "invariant");
assert(di != NULL, "invariant");
const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di;
......@@ -366,8 +369,8 @@ static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
return root_infos->store(oodi);
}
typedef JfrArtifactWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
typedef JfrArtifactWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
typedef JfrTypeWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
typedef JfrTypeWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) {
......@@ -513,7 +516,7 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) {
RootResolutionSet rrs(root_infos);
RootResolver::resolve(rrs);
// write roots
RootDescriptionWriter rw(&writer, NULL, false);
RootDescriptionWriter rw(&writer);
root_infos->iterate(rw);
}
}
......@@ -576,11 +579,45 @@ void ObjectSampleWriter::write(const StoredEdge* edge) {
}
}
class RootSystemType : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
writer.write_count(nof_root_systems);
for (u4 i = 0; i < nof_root_systems; ++i) {
writer.write_key(i);
writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
}
}
};
class RootType : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
const u4 nof_root_types = OldObjectRoot::_number_of_types;
writer.write_count(nof_root_types);
for (u4 i = 0; i < nof_root_types; ++i) {
writer.write_key(i);
writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
}
}
};
static void register_serializers() {
static bool is_registered = false;
if (!is_registered) {
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
is_registered = true;
}
}
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
_writer(writer),
_store(store) {
assert(store != NULL, "invariant");
assert(!store->is_empty(), "invariant");
register_serializers();
sample_infos = NULL;
ref_infos = NULL;
array_infos = NULL;
......
......@@ -29,7 +29,6 @@
#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
#include "memory/iterator.hpp"
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/privilegedStack.hpp"
......
......@@ -25,13 +25,14 @@
#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrBlob.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "utilities/ticks.hpp"
/*
* Handle for diagnosing Java memory leaks.
*
......@@ -44,8 +45,9 @@ class ObjectSample : public JfrCHeapObj {
private:
ObjectSample* _next;
ObjectSample* _previous;
JfrCheckpointBlobHandle _thread_cp;
JfrCheckpointBlobHandle _klass_cp;
JfrBlobHandle _stacktrace;
JfrBlobHandle _thread;
JfrBlobHandle _type_set;
oop _object;
Ticks _allocation_time;
traceid _stack_trace_id;
......@@ -62,17 +64,14 @@ class ObjectSample : public JfrCHeapObj {
}
void release_references() {
if (_thread_cp.valid()) {
_thread_cp.~JfrCheckpointBlobHandle();
}
if (_klass_cp.valid()) {
_klass_cp.~JfrCheckpointBlobHandle();
}
_stacktrace.~JfrBlobHandle();
_thread.~JfrBlobHandle();
_type_set.~JfrBlobHandle();
}
void reset() {
set_stack_trace_id(0);
set_stack_trace_hash(0),
set_stack_trace_hash(0);
release_references();
_dead = false;
}
......@@ -80,8 +79,9 @@ class ObjectSample : public JfrCHeapObj {
public:
ObjectSample() : _next(NULL),
_previous(NULL),
_thread_cp(),
_klass_cp(),
_stacktrace(),
_thread(),
_type_set(),
_object(NULL),
_allocation_time(),
_stack_trace_id(0),
......@@ -174,7 +174,7 @@ class ObjectSample : public JfrCHeapObj {
return _heap_used_at_last_gc;
}
bool has_stack_trace() const {
bool has_stack_trace_id() const {
return stack_trace_id() != 0;
}
......@@ -194,10 +194,6 @@ class ObjectSample : public JfrCHeapObj {
_stack_trace_hash = hash;
}
bool has_thread() const {
return _thread_id != 0;
}
traceid thread_id() const {
return _thread_id;
}
......@@ -211,37 +207,51 @@ class ObjectSample : public JfrCHeapObj {
_allocation_time.ft_value() : _allocation_time.value()) < time_stamp;
}
const JfrCheckpointBlobHandle& thread_checkpoint() const {
return _thread_cp;
const JfrBlobHandle& stacktrace() const {
return _stacktrace;
}
bool has_thread_checkpoint() const {
return _thread_cp.valid();
bool has_stacktrace() const {
return _stacktrace.valid();
}
// JfrCheckpointBlobHandle assignment operator
// JfrBlobHandle assignment operator
// maintains proper reference counting
void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
if (_thread_cp != ref) {
_thread_cp = ref;
void set_stacktrace(const JfrBlobHandle& ref) {
if (_stacktrace != ref) {
_stacktrace = ref;
}
}
const JfrBlobHandle& thread() const {
return _thread;
}
bool has_thread() const {
return _thread.valid();
}
void set_thread(const JfrBlobHandle& ref) {
if (_thread != ref) {
_thread = ref;
}
}
const JfrCheckpointBlobHandle& klass_checkpoint() const {
return _klass_cp;
const JfrBlobHandle& type_set() const {
return _type_set;
}
bool has_klass_checkpoint() const {
return _klass_cp.valid();
bool has_type_set() const {
return _type_set.valid();
}
void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) {
if (_klass_cp != ref) {
if (_klass_cp.valid()) {
_klass_cp->set_next(ref);
void set_type_set(const JfrBlobHandle& ref) {
if (_type_set != ref) {
if (_type_set.valid()) {
_type_set->set_next(ref);
return;
}
_klass_cp = ref;
_type_set = ref;
}
}
};
......
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -108,47 +108,28 @@ static traceid get_thread_id(JavaThread* thread) {
}
const JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
if (!tl->has_thread_checkpoint()) {
JfrCheckpointManager::create_thread_checkpoint(thread);
if (!tl->has_thread_blob()) {
JfrCheckpointManager::create_thread_blob(thread);
}
assert(tl->has_thread_checkpoint(), "invariant");
assert(tl->has_thread_blob(), "invariant");
return tl->thread_id();
}
// Populates the thread local stack frames, but does not add them
// to the stacktrace repository (...yet, see stacktrace_id() below)
//
void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
static void record_stacktrace(JavaThread* thread) {
assert(thread != NULL, "invariant");
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0, WALK_BY_DEFAULT);
JfrStackTraceRepository::record_and_cache(thread, 0, WALK_BY_DEFAULT);
}
}
// We were successful in acquiring the try lock and have been selected for adding a sample.
// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
//
traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(stacktrace->hash() != 0, "invariant");
const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
return stacktrace_id;
}
void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(is_created(), "invariant");
const traceid thread_id = get_thread_id(thread);
if (thread_id == 0) {
return;
}
const JfrThreadLocal* const tl = thread->jfr_thread_local();
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
fill_stacktrace(&stacktrace, thread);
record_stacktrace(thread);
// try enter critical section
JfrTryLock tryLock(&_lock);
if (!tryLock.has_lock()) {
......@@ -156,14 +137,14 @@ void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread)
return;
}
instance().add(obj, allocated, thread_id, &stacktrace, thread);
instance().add(obj, allocated, thread_id, thread);
}
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {
assert(obj != NULL, "invariant");
assert(thread_id != 0, "invariant");
assert(thread != NULL, "invariant");
assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
if (_dead_samples) {
scavenge();
......@@ -187,11 +168,13 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrS
assert(sample != NULL, "invariant");
sample->set_thread_id(thread_id);
sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
const unsigned int stacktrace_hash = stacktrace->hash();
const JfrThreadLocal* const tl = thread->jfr_thread_local();
sample->set_thread(tl->thread_blob());
const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
if (stacktrace_hash != 0) {
sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
sample->set_stack_trace_id(tl->cached_stack_trace_id());
sample->set_stack_trace_hash(stacktrace_hash);
}
......@@ -250,7 +233,7 @@ void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
sampler._last_sweep = JfrTicks::now();
}
const ObjectSample* ObjectSampler::last() const {
ObjectSample* ObjectSampler::last() const {
return _list->last();
}
......
......@@ -31,25 +31,19 @@
typedef u8 traceid;
class BoolObjectClosure;
class JfrStackTrace;
class JavaThread;
class OopClosure;
class ObjectSample;
class ObjectSampler;
class SampleList;
class SamplePriorityQueue;
class Thread;
// Class reponsible for holding samples and
// making sure the samples are evenly distributed as
// new entries are added and removed.
class ObjectSampler : public CHeapObj<mtTracing> {
friend class EventEmitter;
friend class JfrRecorderService;
friend class LeakProfiler;
friend class StartOperation;
friend class StopOperation;
friend class ObjectSampleCheckpoint;
friend class WriteObjectSampleStacktrace;
private:
SamplePriorityQueue* _priority_queue;
SampleList* _list;
......@@ -64,20 +58,11 @@ class ObjectSampler : public CHeapObj<mtTracing> {
~ObjectSampler();
static bool create(size_t size);
static bool is_created();
static ObjectSampler* sampler();
static void destroy();
// For operations that require exclusive access (non-safepoint)
static ObjectSampler* acquire();
static void release();
// Stacktrace
static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
// Sampling
static void sample(HeapWord* object, size_t size, JavaThread* thread);
void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
void add(HeapWord* object, size_t size, traceid thread_id, JavaThread* thread);
void scavenge();
void remove_dead(ObjectSample* sample);
......@@ -87,8 +72,15 @@ class ObjectSampler : public CHeapObj<mtTracing> {
const ObjectSample* item_at(int index) const;
ObjectSample* item_at(int index);
int item_count() const;
public:
static ObjectSampler* sampler();
// For operations that require exclusive access (non-safepoint)
static ObjectSampler* acquire();
static void release();
const ObjectSample* first() const;
const ObjectSample* last() const;
ObjectSample* last() const;
const ObjectSample* last_resolved() const;
void set_last_resolved(const ObjectSample* sample);
const JfrTicks& last_sweep() const;
......
......@@ -50,12 +50,12 @@ class SampleList : public JfrCHeapObj {
SampleList(size_t limit, size_t cache_size = 0);
~SampleList();
void set_last_resolved(const ObjectSample* sample);
ObjectSample* get();
ObjectSample* last() const;
ObjectSample* first() const;
void release(ObjectSample* sample);
ObjectSample* last() const;
const ObjectSample* last_resolved() const;
void set_last_resolved(const ObjectSample* sample);
void release(ObjectSample* sample);
ObjectSample* reuse(ObjectSample* sample);
bool is_full() const;
size_t count() const;
......
......@@ -36,6 +36,7 @@
#include "jfr/utilities/jfrBigEndian.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
......@@ -86,22 +87,18 @@ static const size_t unlimited_mspace_size = 0;
static const size_t checkpoint_buffer_cache_count = 2;
static const size_t checkpoint_buffer_size = 512 * K;
static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) {
JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system);
if (mspace != NULL) {
mspace->initialize();
}
return mspace;
static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) {
return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(size, limit, cache_count, mgr);
}
bool JfrCheckpointManager::initialize() {
assert(_free_list_mspace == NULL, "invariant");
_free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
_free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
if (_free_list_mspace == NULL) {
return false;
}
assert(_epoch_transition_mspace == NULL, "invariant");
_epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
_epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
if (_epoch_transition_mspace == NULL) {
return false;
}
......@@ -113,22 +110,6 @@ bool JfrCheckpointManager::initialize() {
return JfrTypeManager::initialize();
}
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
return _service_thread != thread && OrderAccess::load_acquire((u1*)&_checkpoint_epoch_state) != JfrTraceIdEpoch::current();
}
void JfrCheckpointManager::synchronize_epoch() {
assert(_checkpoint_epoch_state != JfrTraceIdEpoch::current(), "invariant");
OrderAccess::storestore();
_checkpoint_epoch_state = JfrTraceIdEpoch::current();
}
void JfrCheckpointManager::shift_epoch() {
debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
JfrTraceIdEpoch::shift_epoch();
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
}
void JfrCheckpointManager::register_service_thread(const Thread* thread) {
_service_thread = thread;
}
......@@ -150,7 +131,6 @@ void JfrCheckpointManager::unlock() {
}
#ifdef ASSERT
bool JfrCheckpointManager::is_locked() const {
return _lock->owned_by_self();
}
......@@ -166,7 +146,6 @@ static void assert_release(const BufferPtr buffer) {
assert(buffer->lease(), "invariant");
assert(buffer->acquired_by_self(), "invariant");
}
#endif // ASSERT
static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
......@@ -184,6 +163,10 @@ static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t ret
return buffer;
}
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
return _service_thread != thread && OrderAccess::load_acquire((u1*)&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
}
static const size_t lease_retry = 10;
BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
......@@ -255,33 +238,33 @@ static juint number_of_types(const u1* data) {
return read_data<juint>(data + types_offset);
}
static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) {
static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) {
cw.reserve(sizeof(u4));
cw.write((u8)EVENT_CHECKPOINT);
cw.write<u8>(EVENT_CHECKPOINT);
cw.write(starttime(data));
cw.write(duration(data));
cw.write((jlong)offset_prev_cp_event);
cw.write(offset_prev_cp_event);
cw.write(is_flushpoint(data));
cw.write(number_of_types(data));
}
static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
assert(data != NULL, "invariant");
cw.write_unbuffered(data + payload_offset, size);
cw.write_unbuffered(data + payload_offset, size - sizeof(JfrCheckpointEntry));
}
static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
assert(data != NULL, "invariant");
const intptr_t previous_checkpoint_event = cw.previous_checkpoint_offset();
const intptr_t event_begin = cw.current_offset();
const intptr_t offset_to_previous_checkpoint_event = 0 == previous_checkpoint_event ? 0 : previous_checkpoint_event - event_begin;
const jlong total_checkpoint_size = total_size(data);
write_checkpoint_header(cw, offset_to_previous_checkpoint_event, data);
write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry));
const jlong checkpoint_event_size = cw.current_offset() - event_begin;
cw.write_padded_at_offset<u4>(checkpoint_event_size, event_begin);
cw.set_previous_checkpoint_offset(event_begin);
return (size_t)total_checkpoint_size;
const int64_t event_begin = cw.current_offset();
const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
const int64_t checkpoint_size = total_size(data);
write_checkpoint_header(cw, delta, data);
write_checkpoint_content(cw, data, checkpoint_size);
const int64_t event_size = cw.current_offset() - event_begin;
cw.write_padded_at_offset<u4>(event_size, event_begin);
cw.set_last_checkpoint_offset(event_begin);
return (size_t)checkpoint_size;
}
static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) {
......@@ -289,14 +272,14 @@ static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size)
assert(data != NULL, "invariant");
assert(size > 0, "invariant");
const u1* const limit = data + size;
const u1* next_entry = data;
const u1* next = data;
size_t processed = 0;
while (next_entry < limit) {
const size_t checkpoint_size = write_checkpoint_event(cw, next_entry);
while (next < limit) {
const size_t checkpoint_size = write_checkpoint_event(cw, next);
processed += checkpoint_size;
next_entry += checkpoint_size;
next += checkpoint_size;
}
assert(next_entry == limit, "invariant");
assert(next == limit, "invariant");
return processed;
}
......@@ -331,6 +314,12 @@ static size_t write_mspace_exclusive(JfrCheckpointMspace* mspace, JfrChunkWriter
return wo.processed();
}
void JfrCheckpointManager::synchronize_epoch() {
assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
OrderAccess::storestore();
_checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
}
size_t JfrCheckpointManager::write() {
const size_t processed = write_mspace_exclusive(_free_list_mspace, _chunkwriter);
synchronize_epoch();
......@@ -372,10 +361,16 @@ void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
JfrTypeManager::write_type_set_for_unloaded_classes();
}
void JfrCheckpointManager::create_thread_checkpoint(JavaThread* jt) {
JfrTypeManager::create_thread_checkpoint(jt);
void JfrCheckpointManager::create_thread_blob(JavaThread* jt) {
JfrTypeManager::create_thread_blob(jt);
}
void JfrCheckpointManager::write_thread_checkpoint(JavaThread* jt) {
JfrTypeManager::write_thread_checkpoint(jt);
}
void JfrCheckpointManager::shift_epoch() {
debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
JfrTraceIdEpoch::shift_epoch();
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
}
......@@ -92,7 +92,7 @@ class JfrCheckpointManager : public JfrCHeapObj {
public:
void register_service_thread(const Thread* t);
static void write_type_set_for_unloaded_classes();
static void create_thread_checkpoint(JavaThread* jt);
static void create_thread_blob(JavaThread* jt);
static void write_thread_checkpoint(JavaThread* jt);
friend class JfrRecorder;
......
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/utilities/jfrBlob.hpp"
#include "jfr/writers/jfrBigEndianWriter.hpp"
JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t) :
......@@ -126,7 +127,7 @@ void JfrCheckpointWriter::write_count(u4 nof_entries, jlong offset) {
write_padded_at_offset(nof_entries, offset);
}
const u1* JfrCheckpointWriter::session_data(size_t* size, const JfrCheckpointContext* ctx /* 0 */) {
const u1* JfrCheckpointWriter::session_data(size_t* size, bool move /* false */, const JfrCheckpointContext* ctx /* 0 */) {
assert(this->is_acquired(), "wrong state!");
if (!this->is_valid()) {
*size = 0;
......@@ -140,8 +141,10 @@ const u1* JfrCheckpointWriter::session_data(size_t* size, const JfrCheckpointCon
*size = this->used_size();
assert(this->start_pos() + *size == this->current_pos(), "invariant");
write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, is_flushpoint(), count());
this->seek(_offset + (_header ? sizeof(JfrCheckpointEntry) : 0));
set_count(0);
_header = false; // the header was just written
if (move) {
this->seek(_offset);
}
return this->start_pos();
}
......@@ -160,26 +163,19 @@ bool JfrCheckpointWriter::has_data() const {
return this->used_size() > sizeof(JfrCheckpointEntry);
}
JfrCheckpointBlobHandle JfrCheckpointWriter::checkpoint_blob() {
JfrBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
size_t size = 0;
const u1* data = session_data(&size);
return JfrCheckpointBlob::make(data, size);
const u1* data = session_data(&size, false, ctx);
return JfrBlob::make(data, size);
}
JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
if (ctx == NULL) {
return checkpoint_blob();
}
JfrBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
size_t size = 0;
const u1* data = session_data(&size, ctx);
return JfrCheckpointBlob::make(data, size);
}
JfrCheckpointBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
JfrCheckpointBlobHandle data = copy(ctx);
const u1* data = session_data(&size, true, ctx);
JfrBlobHandle blob = JfrBlob::make(data, size);
if (ctx != NULL) {
const_cast<JfrCheckpointContext*>(ctx)->count = 0;
set_context(*ctx);
}
return data;
return blob;
}
......@@ -25,8 +25,8 @@
#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/utilities/jfrBlob.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jfr/writers/jfrEventWriterHost.inline.hpp"
......@@ -67,9 +67,8 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase {
void increment();
void set_flushpoint(bool flushpoint);
bool is_flushpoint() const;
const u1* session_data(size_t* size, const JfrCheckpointContext* ctx = NULL);
const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL);
void release();
public:
JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread);
~JfrCheckpointWriter();
......@@ -80,9 +79,8 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase {
const JfrCheckpointContext context() const;
void set_context(const JfrCheckpointContext ctx);
bool has_data() const;
JfrCheckpointBlobHandle checkpoint_blob();
JfrCheckpointBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
JfrCheckpointBlobHandle move(const JfrCheckpointContext* ctx = NULL);
JfrBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
JfrBlobHandle move(const JfrCheckpointContext* ctx = NULL);
};
#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
......@@ -30,7 +30,6 @@
#include "gc_interface/gcName.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
......@@ -295,34 +294,26 @@ void VMOperationTypeConstant::serialize(JfrCheckpointWriter& writer) {
class TypeSetSerialization {
private:
JfrCheckpointWriter* _leakp_writer;
bool _class_unload;
public:
explicit TypeSetSerialization(bool class_unload) : _class_unload(class_unload) {}
void write(JfrCheckpointWriter& writer, JfrCheckpointWriter* leakp_writer) {
JfrTypeSet::serialize(&writer, leakp_writer, _class_unload);
TypeSetSerialization(bool class_unload, JfrCheckpointWriter* leakp_writer = NULL) :
_leakp_writer(leakp_writer), _class_unload(class_unload) {}
void write(JfrCheckpointWriter& writer) {
JfrTypeSet::serialize(&writer, _leakp_writer, _class_unload);
}
};
void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
TypeSetSerialization type_set(true);
if (LeakProfiler::is_running()) {
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
type_set.write(writer, &leakp_writer);
ObjectSampleCheckpoint::install(leakp_writer, true, true);
return;
}
type_set.write(writer, NULL);
type_set.write(writer);
};
TypeSet::TypeSet(JfrCheckpointWriter* leakp_writer) : _leakp_writer(leakp_writer) {}
void TypeSet::serialize(JfrCheckpointWriter& writer) {
TypeSetSerialization type_set(false);
if (LeakProfiler::is_running()) {
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
type_set.write(writer, &leakp_writer);
ObjectSampleCheckpoint::install(leakp_writer, false, true);
return;
}
type_set.write(writer, NULL);
TypeSetSerialization type_set(false, _leakp_writer);
type_set.write(writer);
};
void ThreadStateConstant::serialize(JfrCheckpointWriter& writer) {
......@@ -333,7 +324,6 @@ void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
assert(_thread != NULL, "invariant");
assert(_thread == Thread::current(), "invariant");
assert(_thread->is_Java_thread(), "invariant");
assert(!_thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
ResourceMark rm(_thread);
const oop threadObj = _thread->threadObj();
assert(threadObj != NULL, "invariant");
......
......@@ -118,7 +118,10 @@ class VMOperationTypeConstant : public JfrSerializer {
};
class TypeSet : public JfrSerializer {
private:
JfrCheckpointWriter* _leakp_writer;
public:
explicit TypeSet(JfrCheckpointWriter* leakp_writer = NULL);
void serialize(JfrCheckpointWriter& writer);
};
......
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -23,12 +23,17 @@
*/
#include "precompiled.hpp"
#include "jfr/jfr.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
#include "jfr/utilities/jfrDoublyLinkedList.hpp"
#include "jfr/utilities/jfrIterator.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/exceptions.hpp"
......@@ -39,7 +44,7 @@ class JfrSerializerRegistration : public JfrCHeapObj {
JfrSerializerRegistration* _next;
JfrSerializerRegistration* _prev;
JfrSerializer* _serializer;
mutable JfrCheckpointBlobHandle _cache;
mutable JfrBlobHandle _cache;
JfrTypeId _id;
bool _permit_cache;
......@@ -148,43 +153,58 @@ void JfrTypeManager::write_safepoint_types(JfrCheckpointWriter& writer) {
}
void JfrTypeManager::write_type_set() {
// can safepoint here because of PackageTable_lock
MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : PackageTable_lock);
JfrCheckpointWriter writer(true, true, Thread::current());
TypeSet set;
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
// can safepoint here
MutexLocker module_lock(PackageTable_lock);
if (!LeakProfiler::is_running()) {
JfrCheckpointWriter writer(true, true, Thread::current());
TypeSet set;
set.serialize(writer);
return;
}
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
JfrCheckpointWriter writer(false, true, Thread::current());
TypeSet set(&leakp_writer);
set.serialize(writer);
ObjectSampleCheckpoint::on_type_set(leakp_writer);
}
void JfrTypeManager::write_type_set_for_unloaded_classes() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
JfrCheckpointWriter writer(false, true, Thread::current());
const JfrCheckpointContext ctx = writer.context();
ClassUnloadTypeSet class_unload_set;
class_unload_set.serialize(writer);
if (LeakProfiler::is_running()) {
ObjectSampleCheckpoint::on_type_set_unload(writer);
}
if (!Jfr::is_recording()) {
// discard anything written
writer.set_context(ctx);
}
}
void JfrTypeManager::create_thread_checkpoint(JavaThread* jt) {
void JfrTypeManager::create_thread_blob(JavaThread* jt) {
assert(jt != NULL, "invariant");
ResourceMark rm(jt);
HandleMark hm(jt);
JfrThreadConstant type_thread(jt);
JfrCheckpointWriter writer(false, true, jt);
writer.write_type(TYPE_THREAD);
type_thread.serialize(writer);
// create and install a checkpoint blob
jt->jfr_thread_local()->set_thread_checkpoint(writer.checkpoint_blob());
assert(jt->jfr_thread_local()->has_thread_checkpoint(), "invariant");
jt->jfr_thread_local()->set_thread_blob(writer.move());
assert(jt->jfr_thread_local()->has_thread_blob(), "invariant");
}
void JfrTypeManager::write_thread_checkpoint(JavaThread* jt) {
assert(jt != NULL, "JavaThread is NULL!");
ResourceMark rm(jt);
if (jt->jfr_thread_local()->has_thread_checkpoint()) {
JfrCheckpointWriter writer(false, false, jt);
jt->jfr_thread_local()->thread_checkpoint()->write(writer);
} else {
JfrThreadConstant type_thread(jt);
JfrCheckpointWriter writer(false, true, jt);
writer.write_type(TYPE_THREAD);
type_thread.serialize(writer);
}
HandleMark hm(jt);
JfrThreadConstant type_thread(jt);
JfrCheckpointWriter writer(false, true, jt);
writer.write_type(TYPE_THREAD);
type_thread.serialize(writer);
}
#ifdef ASSERT
......
......@@ -37,7 +37,7 @@ class JfrTypeManager : public AllStatic {
static void write_safepoint_types(JfrCheckpointWriter& writer);
static void write_type_set();
static void write_type_set_for_unloaded_classes();
static void create_thread_checkpoint(JavaThread* jt);
static void create_thread_blob(JavaThread* jt);
static void write_thread_checkpoint(JavaThread* jt);
};
......
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -23,37 +23,26 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
// XXX #include "classfile/packageEntry.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "jfr/jfr.hpp"
#include "jfr/jni/jfrGetAllEventClasses.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jfr/writers/jfrTypeWriterHost.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "memory/resourceArea.hpp"
#include "utilities/accessFlags.hpp"
// incremented on each checkpoint
static u8 checkpoint_id = 0;
// creates a unique id by combining a checkpoint relative symbol id (2^24)
// with the current checkpoint id (2^40)
#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
#define CREATE_PACKAGE_ID(pkg_id) (((u8)((checkpoint_id << 24) | pkg_id)))
typedef const Klass* KlassPtr;
// XXX typedef const PackageEntry* PkgPtr;
typedef const ClassLoaderData* CldPtr;
......@@ -62,6 +51,55 @@ typedef const Symbol* SymbolPtr;
typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
static JfrCheckpointWriter* _writer = NULL;
static JfrCheckpointWriter* _leakp_writer = NULL;
static JfrArtifactSet* _artifacts = NULL;
static JfrArtifactClosure* _subsystem_callback = NULL;
static bool _class_unload = false;
static bool _flushpoint = false;
// incremented on each rotation
static u8 checkpoint_id = 1;
// creates a unique id by combining a checkpoint relative symbol id (2^24)
// with the current checkpoint id (2^40)
#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
#define CREATE_PACKAGE_ID(pkg_id) (((u8)((checkpoint_id << 24) | pkg_id)))
static traceid create_symbol_id(traceid artifact_id) {
return artifact_id != 0 ? CREATE_SYMBOL_ID(artifact_id) : 0;
}
static bool current_epoch() {
return _class_unload;
}
static bool previous_epoch() {
return !current_epoch();
}
static bool is_complete() {
return !_artifacts->has_klass_entries() && current_epoch();
}
static traceid mark_symbol(KlassPtr klass, bool leakp) {
return klass != NULL ? create_symbol_id(_artifacts->mark(klass, leakp)) : 0;
}
static traceid mark_symbol(Symbol* symbol, bool leakp) {
return symbol != NULL ? create_symbol_id(_artifacts->mark(symbol, leakp)) : 0;
}
static traceid get_bootstrap_name(bool leakp) {
return create_symbol_id(_artifacts->bootstrap_name(leakp));
}
template <typename T>
static traceid artifact_id(const T* ptr) {
assert(ptr != NULL, "invariant");
return TRACE_ID(ptr);
}
inline uintptr_t package_name_hash(const char *s) {
uintptr_t val = 0;
while (*s != 0) {
......@@ -70,6 +108,7 @@ inline uintptr_t package_name_hash(const char *s) {
return val;
}
/**
static traceid package_id(KlassPtr klass, JfrArtifactSet* artifacts) {
assert(klass != NULL, "invariant");
char* klass_name = klass->name()->as_C_string(); // uses ResourceMark declared in JfrTypeSet::serialize()
......@@ -79,43 +118,38 @@ static traceid package_id(KlassPtr klass, JfrArtifactSet* artifacts) {
}
return CREATE_PACKAGE_ID(artifacts->markPackage(pkg_name, package_name_hash(pkg_name)));
}
*/
static traceid cld_id(CldPtr cld) {
assert(cld != NULL, "invariant");
return cld->is_anonymous() ? 0 : TRACE_ID(cld);
static traceid method_id(KlassPtr klass, MethodPtr method) {
assert(klass != NULL, "invariant");
assert(method != NULL, "invariant");
return METHOD_ID(klass, method);
}
static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) {
assert(k != NULL, "invariant");
// XXX
// PkgPtr pkg = k->package();
// if (pkg != NULL) {
// tag_leakp_artifact(pkg, class_unload);
// }
CldPtr cld = k->class_loader_data();
static traceid cld_id(CldPtr cld, bool leakp) {
assert(cld != NULL, "invariant");
if (!cld->is_anonymous()) {
tag_leakp_artifact(cld, class_unload);
if (cld->is_anonymous()) {
return 0;
}
if (leakp) {
SET_LEAKP(cld);
} else {
SET_TRANSIENT(cld);
}
return artifact_id(cld);
}
class TagLeakpKlassArtifact {
bool _class_unload;
public:
TagLeakpKlassArtifact(bool class_unload) : _class_unload(class_unload) {}
bool operator()(KlassPtr klass) {
if (_class_unload) {
if (LEAKP_USED_THIS_EPOCH(klass)) {
tag_leakp_klass_artifacts(klass, _class_unload);
}
} else {
if (LEAKP_USED_PREV_EPOCH(klass)) {
tag_leakp_klass_artifacts(klass, _class_unload);
}
}
return true;
}
};
template <typename T>
static s4 get_flags(const T* ptr) {
assert(ptr != NULL, "invariant");
return ptr->access_flags().get_flags();
}
template <typename T>
static void set_serialized(const T* ptr) {
assert(ptr != NULL, "invariant");
SET_SERIALIZED(ptr);
assert(IS_SERIALIZED(ptr), "invariant");
}
/*
* In C++03, functions used as template parameters must have external linkage;
......@@ -125,11 +159,10 @@ class TagLeakpKlassArtifact {
* The weird naming is an effort to decrease the risk of name clashes.
*/
int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) {
assert(writer != NULL, "invariant");
assert(artifacts != NULL, "invariant");
assert(k != NULL, "invariant");
KlassPtr klass = (KlassPtr)k;
assert(_artifacts != NULL, "invariant");
assert(klass != NULL, "invariant");
traceid pkg_id = 0;
KlassPtr theklass = klass;
if (theklass->oop_is_objArray()) {
......@@ -137,17 +170,16 @@ int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifact
theklass = obj_arr_klass->bottom_klass();
}
if (theklass->oop_is_instance()) {
pkg_id = package_id(theklass, artifacts);
// pkg_id = package_id(theklass, _artifacts);
pkg_id = 0;
} else {
assert(theklass->oop_is_typeArray(), "invariant");
}
const traceid symbol_id = artifacts->mark(klass);
assert(symbol_id > 0, "need to have an address for symbol!");
writer->write(TRACE_ID(klass));
writer->write(cld_id(klass->class_loader_data()));
writer->write((traceid)CREATE_SYMBOL_ID(symbol_id));
writer->write(artifact_id(klass));
writer->write(cld_id(klass->class_loader_data(), leakp));
writer->write(mark_symbol(klass, leakp));
writer->write(pkg_id);
writer->write((s4)klass->access_flags().get_flags());
writer->write(get_flags(klass));
if (klass->oop_is_array()) {
// The object array size can not be determined statically from klass.
// It is determined by the elements length in object layout.
......@@ -161,43 +193,114 @@ int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifact
return 1;
}
typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
typedef JfrPredicatedArtifactWriterImplHost<KlassPtr, LeakKlassPredicate, write__artifact__klass> LeakKlassWriterImpl;
typedef JfrArtifactWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
typedef JfrArtifactWriterImplHost<KlassPtr, write__artifact__klass> KlassWriterImpl;
typedef JfrArtifactWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
int write__klass(JfrCheckpointWriter* writer, const void* k) {
assert(k != NULL, "invariant");
KlassPtr klass = (KlassPtr)k;
set_serialized(klass);
return write_klass(writer, klass, false);
}
int write__artifact__method(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
assert(writer != NULL, "invariant");
assert(artifacts != NULL, "invariant");
assert(m != NULL, "invariant");
MethodPtr method = (MethodPtr)m;
const traceid method_name_symbol_id = artifacts->mark(method->name());
assert(method_name_symbol_id > 0, "invariant");
const traceid method_sig_symbol_id = artifacts->mark(method->signature());
assert(method_sig_symbol_id > 0, "invariant");
KlassPtr klass = method->method_holder();
int write__klass__leakp(JfrCheckpointWriter* writer, const void* k) {
assert(k != NULL, "invariant");
KlassPtr klass = (KlassPtr)k;
return write_klass(writer, klass, true);
}
static void do_implied(Klass* klass) {
assert(klass != NULL, "invariant");
assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
writer->write((u8)METHOD_ID(klass, method));
writer->write((u8)TRACE_ID(klass));
writer->write((u8)CREATE_SYMBOL_ID(method_name_symbol_id));
writer->write((u8)CREATE_SYMBOL_ID(method_sig_symbol_id));
writer->write((u2)method->access_flags().get_flags());
writer->write(const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0);
return 1;
if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
if (_leakp_writer != NULL) {
SET_LEAKP(klass);
}
_subsystem_callback->do_artifact(klass);
}
}
static void do_unloaded_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
JfrEventClasses::increment_unloaded_event_class();
}
if (USED_THIS_EPOCH(klass)) {
ObjectSampleCheckpoint::on_klass_unload(klass);
_subsystem_callback->do_artifact(klass);
return;
}
do_implied(klass);
}
typedef JfrArtifactWriterImplHost<MethodPtr, write__artifact__method> MethodWriterImplTarget;
typedef JfrArtifactWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
static void do_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
if (current_epoch()) {
if (USED_THIS_EPOCH(klass)) {
_subsystem_callback->do_artifact(klass);
return;
}
} else {
if (USED_PREV_EPOCH(klass)) {
_subsystem_callback->do_artifact(klass);
return;
}
}
do_implied(klass);
}
int write__artifact__package(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* p) {
static void do_klasses() {
if (_class_unload) {
ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
return;
}
ClassLoaderDataGraph::classes_do(&do_klass);
}
typedef SerializePredicate<KlassPtr> KlassPredicate;
typedef JfrPredicatedTypeWriterImplHost<KlassPtr, KlassPredicate, write__klass> KlassWriterImpl;
typedef JfrTypeWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
typedef CompositeFunctor<KlassPtr, KlassWriter, KlassArtifactRegistrator> KlassWriterRegistration;
typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
typedef JfrPredicatedTypeWriterImplHost<KlassPtr, LeakKlassPredicate, write__klass__leakp> LeakKlassWriterImpl;
typedef JfrTypeWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
typedef CompositeFunctor<KlassPtr, LeakKlassWriter, KlassWriter> CompositeKlassWriter;
typedef CompositeFunctor<KlassPtr, CompositeKlassWriter, KlassArtifactRegistrator> CompositeKlassWriterRegistration;
typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
static bool write_klasses() {
assert(!_artifacts->has_klass_entries(), "invariant");
assert(_writer != NULL, "invariant");
KlassArtifactRegistrator reg(_artifacts);
KlassWriter kw(_writer, _class_unload);
KlassWriterRegistration kwr(&kw, &reg);
if (_leakp_writer == NULL) {
KlassCallback callback(&kwr);
_subsystem_callback = &callback;
do_klasses();
} else {
LeakKlassWriter lkw(_leakp_writer, _artifacts, _class_unload);
CompositeKlassWriter ckw(&lkw, &kw);
CompositeKlassWriterRegistration ckwr(&ckw, &reg);
CompositeKlassCallback callback(&ckwr);
_subsystem_callback = &callback;
do_klasses();
}
if (is_complete()) {
return false;
}
_artifacts->tally(kw);
return true;
}
int write__artifact__package(JfrCheckpointWriter* writer, const void* p) {
assert(writer != NULL, "invariant");
assert(artifacts != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(p != NULL, "invariant");
CStringEntryPtr entry = (CStringEntryPtr)p;
const traceid package_name_symbol_id = artifacts->mark(entry->value(), package_name_hash(entry->value()));
const traceid package_name_symbol_id = _artifacts->mark(package_name_hash(entry->value()), entry->value(), false);
assert(package_name_symbol_id > 0, "invariant");
writer->write((traceid)CREATE_PACKAGE_ID(entry->id()));
writer->write((traceid)CREATE_SYMBOL_ID(package_name_symbol_id));
......@@ -205,628 +308,400 @@ int write__artifact__package(JfrCheckpointWriter* writer, JfrArtifactSet* artifa
return 1;
}
int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) {
assert(c != NULL, "invariant");
CldPtr cld = (CldPtr)c;
/**
typedef JfrTypeWriterImplHost<CStringEntryPtr, write__artifact__package> PackageEntryWriterImpl;
typedef JfrTypeWriterHost<PackageEntryWriterImpl, TYPE_PACKAGE> PackageEntryWriter;
void write_packages() {
// below jdk9 there is no oop for packages, so nothing to do with leakp_writer
// just write packages
PackageEntryWriter pw(_writer, _class_unload);
_artifacts->iterate_packages(pw);
}
*/
template <typename T>
static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
assert(callback != NULL, "invariant");
assert(value != NULL, "invariant");
if (USED_PREV_EPOCH(value)) {
callback->do_artifact(value);
assert(IS_NOT_SERIALIZED(value), "invariant");
return;
}
if (IS_SERIALIZED(value)) {
CLEAR_SERIALIZED(value);
}
assert(IS_NOT_SERIALIZED(value), "invariant");
}
static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) {
assert(cld != NULL, "invariant");
assert(!cld->is_anonymous(), "invariant");
const traceid cld_id = TRACE_ID(cld);
// class loader type
const Klass* class_loader_klass = cld->class_loader() != NULL ? cld->class_loader()->klass() : NULL;
if (class_loader_klass == NULL) {
// (primordial) boot class loader
writer->write(cld_id); // class loader instance id
writer->write(artifact_id(cld)); // class loader instance id
writer->write((traceid)0); // class loader type id (absence of)
writer->write((traceid)CREATE_SYMBOL_ID(1)); // 1 maps to synthetic name -> "bootstrap"
writer->write(get_bootstrap_name(leakp)); // maps to synthetic name -> "bootstrap"
} else {
Symbol* symbol_name = class_loader_klass->name();
const traceid symbol_name_id = symbol_name != NULL ? artifacts->mark(symbol_name) : 0;
writer->write(cld_id); // class loader instance id
writer->write(TRACE_ID(class_loader_klass)); // class loader type id
writer->write(symbol_name_id == 0 ? (traceid)0 :
(traceid)CREATE_SYMBOL_ID(symbol_name_id)); // class loader instance name
writer->write(artifact_id(cld)); // class loader instance id
writer->write(artifact_id(class_loader_klass)); // class loader type id
writer->write(mark_symbol(class_loader_klass->name(), leakp)); // class loader instance name
}
return 1;
}
typedef LeakPredicate<CldPtr> LeakCldPredicate;
int _compare_cld_ptr_(CldPtr const& lhs, CldPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
typedef UniquePredicate<CldPtr, _compare_cld_ptr_> CldPredicate;
typedef JfrPredicatedArtifactWriterImplHost<CldPtr, LeakCldPredicate, write__artifact__classloader> LeakCldWriterImpl;
typedef JfrPredicatedArtifactWriterImplHost<CldPtr, CldPredicate, write__artifact__classloader> CldWriterImpl;
typedef JfrArtifactWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
typedef JfrArtifactWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
static int write__artifact__symbol__entry__(JfrCheckpointWriter* writer,
SymbolEntryPtr entry) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
ResourceMark rm;
writer->write(CREATE_SYMBOL_ID(entry->id()));
writer->write(entry->value()->as_C_string());
return 1;
}
int write__artifact__symbol__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
assert(e != NULL, "invariant");
return write__artifact__symbol__entry__(writer, (SymbolEntryPtr)e);
}
typedef JfrArtifactWriterImplHost<SymbolEntryPtr, write__artifact__symbol__entry> SymbolEntryWriterImpl;
typedef JfrArtifactWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
static int write__artifact__cstring__entry__(JfrCheckpointWriter* writer, CStringEntryPtr entry) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
writer->write(CREATE_SYMBOL_ID(entry->id()));
writer->write(entry->value());
return 1;
}
int write__artifact__cstring__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
assert(e != NULL, "invariant");
return write__artifact__cstring__entry__(writer, (CStringEntryPtr)e);
int write__classloader(JfrCheckpointWriter* writer, const void* c) {
assert(c != NULL, "invariant");
CldPtr cld = (CldPtr)c;
set_serialized(cld);
return write_classloader(writer, cld, false);
}
typedef JfrArtifactWriterImplHost<CStringEntryPtr, write__artifact__cstring__entry> CStringEntryWriterImpl;
typedef JfrArtifactWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
assert(writer != NULL, "invariant");
assert(artifacts != NULL, "invaiant");
assert(k != NULL, "invariant");
const InstanceKlass* const ik = (const InstanceKlass*)k;
if (ik->is_anonymous()) {
CStringEntryPtr entry =
artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik));
assert(entry != NULL, "invariant");
return write__artifact__cstring__entry__(writer, entry);
}
SymbolEntryPtr entry = artifacts->map_symbol(JfrSymbolId::regular_klass_name_hash_code(ik));
return write__artifact__symbol__entry__(writer, entry);
int write__classloader__leakp(JfrCheckpointWriter* writer, const void* c) {
assert(c != NULL, "invariant");
CldPtr cld = (CldPtr)c;
CLEAR_LEAKP(cld);
return write_classloader(writer, cld, true);
}
int _compare_traceid_(const traceid& lhs, const traceid& rhs) {
return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
static void do_class_loader_data(ClassLoaderData* cld) {
do_previous_epoch_artifact(_subsystem_callback, cld);
}
template <template <typename> class Predicate>
class KlassSymbolWriterImpl {
private:
JfrCheckpointWriter* _writer;
JfrArtifactSet* _artifacts;
Predicate<KlassPtr> _predicate;
MethodUsedPredicate<true> _method_used_predicate;
MethodFlagPredicate _method_flag_predicate;
UniquePredicate<traceid, _compare_traceid_> _unique_predicate;
int klass_symbols(KlassPtr klass);
// XXX int package_symbols(PkgPtr pkg);
int class_loader_symbols(CldPtr cld);
int method_symbols(KlassPtr klass);
class CldFieldSelector {
public:
typedef KlassPtr Type;
KlassSymbolWriterImpl(JfrCheckpointWriter* writer,
JfrArtifactSet* artifacts,
bool class_unload) : _writer(writer),
_artifacts(artifacts),
_predicate(class_unload),
_method_used_predicate(class_unload),
_method_flag_predicate(class_unload),
_unique_predicate(class_unload) {}
int operator()(KlassPtr klass) {
typedef CldPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
int count = 0;
if (_predicate(klass)) {
count += klass_symbols(klass);
// XXX
// PkgPtr pkg = klass->package();
// if (pkg != NULL) {
// count += package_symbols(pkg);
// }
CldPtr cld = klass->class_loader_data();
assert(cld != NULL, "invariant");
if (!cld->is_anonymous()) {
count += class_loader_symbols(cld);
}
if (_method_used_predicate(klass)) {
count += method_symbols(klass);
}
}
return count;
CldPtr cld = klass->class_loader_data();
return cld->is_anonymous() ? NULL : cld;
}
};
template <template <typename> class Predicate>
int KlassSymbolWriterImpl<Predicate>::klass_symbols(KlassPtr klass) {
assert(klass != NULL, "invariant");
assert(_predicate(klass), "invariant");
const InstanceKlass* const ik = (const InstanceKlass*)klass;
if (ik->is_anonymous()) {
CStringEntryPtr entry =
this->_artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik));
assert(entry != NULL, "invariant");
return _unique_predicate(entry->id()) ? write__artifact__cstring__entry__(this->_writer, entry) : 0;
}
SymbolEntryPtr entry = this->_artifacts->map_symbol(ik->name());
assert(entry != NULL, "invariant");
return _unique_predicate(entry->id()) ? write__artifact__symbol__entry__(this->_writer, entry) : 0;
}
// XXX
// template <template <typename> class Predicate>
// int KlassSymbolWriterImpl<Predicate>::package_symbols(PkgPtr pkg) {
// assert(pkg != NULL, "invariant");
// SymbolPtr pkg_name = pkg->name();
// assert(pkg_name != NULL, "invariant");
// SymbolEntryPtr package_symbol = this->_artifacts->map_symbol(pkg_name);
// assert(package_symbol != NULL, "invariant");
// return _unique_predicate(package_symbol->id()) ?
// write__artifact__symbol__entry__(this->_writer, package_symbol) : 0;
// }
// XXX
// template <template <typename> class Predicate>
// int KlassSymbolWriterImpl<Predicate>::module_symbols(ModPtr module) {
// assert(module != NULL, "invariant");
// assert(module->is_named(), "invariant");
// int count = 0;
// SymbolPtr sym = module->name();
// SymbolEntryPtr entry = NULL;
// if (sym != NULL) {
// entry = this->_artifacts->map_symbol(sym);
// assert(entry != NULL, "invariant");
// if (_unique_predicate(entry->id())) {
// count += write__artifact__symbol__entry__(this->_writer, entry);
// }
// }
// sym = module->version();
// if (sym != NULL) {
// entry = this->_artifacts->map_symbol(sym);
// assert(entry != NULL, "invariant");
// if (_unique_predicate(entry->id())) {
// count += write__artifact__symbol__entry__(this->_writer, entry);
// }
// }
// sym = module->location();
// if (sym != NULL) {
// entry = this->_artifacts->map_symbol(sym);
// assert(entry != NULL, "invariant");
// if (_unique_predicate(entry->id())) {
// count += write__artifact__symbol__entry__(this->_writer, entry);
// }
// }
// return count;
// }
template <template <typename> class Predicate>
int KlassSymbolWriterImpl<Predicate>::class_loader_symbols(CldPtr cld) {
assert(cld != NULL, "invariant");
assert(!cld->is_anonymous(), "invariant");
int count = 0;
// class loader type
const Klass* class_loader_klass = cld->class_loader() != NULL ? cld->class_loader()->klass() : NULL;
if (class_loader_klass == NULL) {
// (primordial) boot class loader
CStringEntryPtr entry = this->_artifacts->map_cstring(0);
assert(entry != NULL, "invariant");
assert(strncmp(entry->literal(),
BOOTSTRAP_LOADER_NAME,
BOOTSTRAP_LOADER_NAME_LEN) == 0, "invariant");
if (_unique_predicate(entry->id())) {
count += write__artifact__cstring__entry__(this->_writer, entry);
}
} else {
const Symbol* class_loader_name = class_loader_klass->name()/* XXX TODO cld->name()*/;
if (class_loader_name != NULL) {
SymbolEntryPtr entry = this->_artifacts->map_symbol(class_loader_name);
assert(entry != NULL, "invariant");
if (_unique_predicate(entry->id())) {
count += write__artifact__symbol__entry__(this->_writer, entry);
}
}
}
return count;
}
template <template <typename> class Predicate>
int KlassSymbolWriterImpl<Predicate>::method_symbols(KlassPtr klass) {
assert(_predicate(klass), "invariant");
assert(_method_used_predicate(klass), "invariant");
assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
int count = 0;
const InstanceKlass* const ik = InstanceKlass::cast((Klass*)klass);
const int len = ik->methods()->length();
for (int i = 0; i < len; ++i) {
MethodPtr method = ik->methods()->at(i);
if (_method_flag_predicate(method)) {
SymbolEntryPtr entry = this->_artifacts->map_symbol(method->name());
assert(entry != NULL, "invariant");
if (_unique_predicate(entry->id())) {
count += write__artifact__symbol__entry__(this->_writer, entry);
}
entry = this->_artifacts->map_symbol(method->signature());
assert(entry != NULL, "invariant");
if (_unique_predicate(entry->id())) {
count += write__artifact__symbol__entry__(this->_writer, entry);
}
}
}
return count;
}
typedef KlassSymbolWriterImpl<LeakPredicate> LeakKlassSymbolWriterImpl;
typedef JfrArtifactWriterHost<LeakKlassSymbolWriterImpl, TYPE_SYMBOL> LeakKlassSymbolWriter;
class ClearKlassAndMethods {
private:
ClearArtifact<KlassPtr> _clear_klass_tag_bits;
ClearArtifact<MethodPtr> _clear_method_flag;
MethodUsedPredicate<false> _method_used_predicate;
class CLDCallback : public CLDClosure {
public:
ClearKlassAndMethods(bool class_unload) : _clear_klass_tag_bits(class_unload),
_clear_method_flag(class_unload),
_method_used_predicate(class_unload) {}
bool operator()(KlassPtr klass) {
if (_method_used_predicate(klass)) {
const InstanceKlass* ik = InstanceKlass::cast((Klass*)klass);
const int len = ik->methods()->length();
for (int i = 0; i < len; ++i) {
MethodPtr method = ik->methods()->at(i);
_clear_method_flag(method);
}
CLDCallback() {}
void do_cld(ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
if (cld->is_anonymous()) {
return;
}
_clear_klass_tag_bits(klass);
return true;
do_class_loader_data(cld);
}
};
typedef CompositeFunctor<KlassPtr,
TagLeakpKlassArtifact,
LeakKlassWriter> LeakpKlassArtifactTagging;
typedef CompositeFunctor<KlassPtr,
LeakpKlassArtifactTagging,
KlassWriter> CompositeKlassWriter;
typedef CompositeFunctor<KlassPtr,
CompositeKlassWriter,
KlassArtifactRegistrator> CompositeKlassWriterRegistration;
typedef CompositeFunctor<KlassPtr,
KlassWriter,
KlassArtifactRegistrator> KlassWriterRegistration;
typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
/*
* Composite operation
*
* TagLeakpKlassArtifact ->
* LeakpPredicate ->
* LeakpKlassWriter ->
* KlassPredicate ->
* KlassWriter ->
* KlassWriterRegistration
*/
void JfrTypeSet::write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
assert(!_artifacts->has_klass_entries(), "invariant");
KlassArtifactRegistrator reg(_artifacts);
KlassWriter kw(writer, _artifacts, _class_unload);
KlassWriterRegistration kwr(&kw, &reg);
if (leakp_writer == NULL) {
KlassCallback callback(&kwr);
_subsystem_callback = &callback;
do_klasses();
return;
}
TagLeakpKlassArtifact tagging(_class_unload);
LeakKlassWriter lkw(leakp_writer, _artifacts, _class_unload);
LeakpKlassArtifactTagging lpkat(&tagging, &lkw);
CompositeKlassWriter ckw(&lpkat, &kw);
CompositeKlassWriterRegistration ckwr(&ckw, &reg);
CompositeKlassCallback callback(&ckwr);
_subsystem_callback = &callback;
do_klasses();
static void do_class_loaders() {
CLDCallback cld_cb;
ClassLoaderDataGraph::cld_do(&cld_cb);
}
typedef JfrArtifactWriterImplHost<CStringEntryPtr, write__artifact__package> PackageEntryWriterImpl;
typedef JfrArtifactWriterHost<PackageEntryWriterImpl, TYPE_PACKAGE> PackageEntryWriter;
typedef SerializePredicate<CldPtr> CldPredicate;
typedef JfrPredicatedTypeWriterImplHost<CldPtr, CldPredicate, write__classloader> CldWriterImpl;
typedef JfrTypeWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
typedef KlassToFieldEnvelope<CldFieldSelector, CldWriter> KlassCldWriter;
void JfrTypeSet::write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
assert(_artifacts->has_klass_entries(), "invariant");
assert(writer != NULL, "invariant");
// below jdk9 there is no oop for packages, so nothing to do with leakp_writer
// just write packages
PackageEntryWriter pw(writer, _artifacts, _class_unload);
_artifacts->iterate_packages(pw);
}
typedef LeakPredicate<CldPtr> LeakCldPredicate;
typedef JfrPredicatedTypeWriterImplHost<CldPtr, LeakCldPredicate, write__classloader__leakp> LeakCldWriterImpl;
typedef JfrTypeWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriter> KlassCompositeCldWriter;
typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
class CldFieldSelector {
public:
typedef CldPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
CldPtr cld = klass->class_loader_data();
return cld->is_anonymous() ? NULL : cld;
static void write_classloaders() {
assert(_writer != NULL, "invariant");
CldWriter cldw(_writer, _class_unload);
KlassCldWriter kcw(&cldw);
if (current_epoch()) {
_artifacts->iterate_klasses(kcw);
_artifacts->tally(cldw);
return;
}
};
typedef KlassToFieldEnvelope<CldFieldSelector, CldWriterWithClear> KlassCldWriterWithClear;
typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriterWithClear> KlassCompositeCldWriterWithClear;
/*
* Composite operation
*
* LeakpClassLoaderWriter ->
* ClassLoaderWriter ->
* ClearArtifact<ClassLoaderData>
*/
void JfrTypeSet::write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
assert(_artifacts->has_klass_entries(), "invariant");
ClearArtifact<CldPtr> clear(_class_unload);
CldWriter cldw(writer, _artifacts, _class_unload);
if (leakp_writer == NULL) {
assert(previous_epoch(), "invariant");
if (_leakp_writer == NULL) {
_artifacts->iterate_klasses(kcw);
ClearArtifact<CldPtr> clear;
CldWriterWithClear cldwwc(&cldw, &clear);
KlassCldWriterWithClear kcldwwc(&cldwwc);
_artifacts->iterate_klasses(kcldwwc);
CldCallback callback(&cldwwc);
_subsystem_callback = &callback;
do_class_loaders();
return;
} else {
LeakCldWriter lcldw(_leakp_writer, _class_unload);
CompositeCldWriter ccldw(&lcldw, &cldw);
KlassCompositeCldWriter kccldw(&ccldw);
_artifacts->iterate_klasses(kccldw);
ClearArtifact<CldPtr> clear;
CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
CompositeCldCallback callback(&ccldwwc);
_subsystem_callback = &callback;
do_class_loaders();
}
LeakCldWriter lcldw(leakp_writer, _artifacts, _class_unload);
CompositeCldWriter ccldw(&lcldw, &cldw);
CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
KlassCompositeCldWriterWithClear kcclwwc(&ccldwwc);
_artifacts->iterate_klasses(kcclwwc);
CompositeCldCallback callback(&ccldwwc);
_subsystem_callback = &callback;
do_class_loaders();
_artifacts->tally(cldw);
}
static u1 get_visibility(MethodPtr method) {
assert(method != NULL, "invariant");
return const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0;
}
template <>
void set_serialized<Method>(MethodPtr method) {
assert(method != NULL, "invariant");
SET_METHOD_SERIALIZED(method);
assert(IS_METHOD_SERIALIZED(method), "invariant");
}
static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) {
assert(writer != NULL, "invariant");
assert(method != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
KlassPtr klass = method->method_holder();
assert(klass != NULL, "invariant");
writer->write(method_id(klass, method));
writer->write(artifact_id(klass));
writer->write(mark_symbol(method->name(), leakp));
writer->write(mark_symbol(method->signature(), leakp));
writer->write((u2)get_flags(method));
writer->write(get_visibility(method));
return 1;
}
template <bool predicate_bool, typename MethodFunctor>
int write__method(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
MethodPtr method = (MethodPtr)m;
set_serialized(method);
return write_method(writer, method, false);
}
int write__method__leakp(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
MethodPtr method = (MethodPtr)m;
return write_method(writer, method, true);
}
template <typename MethodCallback, typename KlassCallback, bool leakp>
class MethodIteratorHost {
private:
MethodFunctor _method_functor;
MethodUsedPredicate<predicate_bool> _method_used_predicate;
MethodFlagPredicate _method_flag_predicate;
MethodCallback _method_cb;
KlassCallback _klass_cb;
MethodUsedPredicate<leakp> _method_used_predicate;
MethodFlagPredicate<leakp> _method_flag_predicate;
public:
MethodIteratorHost(JfrCheckpointWriter* writer,
JfrArtifactSet* artifacts,
bool class_unload,
bool current_epoch = false,
bool class_unload = false,
bool skip_header = false) :
_method_functor(writer, artifacts, class_unload, skip_header),
_method_used_predicate(class_unload),
_method_flag_predicate(class_unload) {}
_method_cb(writer, class_unload, skip_header),
_klass_cb(writer, class_unload, skip_header),
_method_used_predicate(current_epoch),
_method_flag_predicate(current_epoch) {}
bool operator()(KlassPtr klass) {
if (_method_used_predicate(klass)) {
assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
const InstanceKlass* ik = InstanceKlass::cast((Klass*)klass);
const InstanceKlass* const ik = InstanceKlass::cast((Klass*)klass);
const int len = ik->methods()->length();
for (int i = 0; i < len; ++i) {
MethodPtr method = ik->methods()->at(i);
if (_method_flag_predicate(method)) {
_method_functor(method);
_method_cb(method);
}
}
}
return true;
return _klass_cb(klass);
}
int count() const { return _method_functor.count(); }
void add(int count) { _method_functor.add(count); }
int count() const { return _method_cb.count(); }
void add(int count) { _method_cb.add(count); }
};
template <typename T, template <typename> class Impl>
class Wrapper {
Impl<T> _t;
public:
Wrapper(JfrCheckpointWriter*, bool, bool) : _t() {}
bool operator()(T const& value) {
return _t(value);
}
};
typedef MethodIteratorHost<true /*leakp */, MethodWriterImpl> LeakMethodWriter;
typedef MethodIteratorHost<false, MethodWriterImpl> MethodWriter;
typedef SerializePredicate<MethodPtr> MethodPredicate;
typedef JfrPredicatedTypeWriterImplHost<MethodPtr, MethodPredicate, write__method> MethodWriterImplTarget;
typedef Wrapper<KlassPtr, JfrStub> KlassCallbackStub;
typedef JfrTypeWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
typedef MethodIteratorHost<MethodWriterImpl, KlassCallbackStub, false> MethodWriter;
typedef LeakPredicate<MethodPtr> LeakMethodPredicate;
typedef JfrPredicatedTypeWriterImplHost<MethodPtr, LeakMethodPredicate, write__method__leakp> LeakMethodWriterImplTarget;
typedef JfrTypeWriterHost<LeakMethodWriterImplTarget, TYPE_METHOD> LeakMethodWriterImpl;
typedef MethodIteratorHost<LeakMethodWriterImpl, KlassCallbackStub, true> LeakMethodWriter;
typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
/*
* Composite operation
*
* LeakpMethodWriter ->
* MethodWriter
*/
void JfrTypeSet::write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
assert(_artifacts->has_klass_entries(), "invariant");
MethodWriter mw(writer, _artifacts, _class_unload);
if (leakp_writer == NULL) {
static void write_methods() {
assert(_writer != NULL, "invariant");
MethodWriter mw(_writer, current_epoch(), _class_unload);
if (_leakp_writer == NULL) {
_artifacts->iterate_klasses(mw);
return;
}
LeakMethodWriter lpmw(leakp_writer, _artifacts, _class_unload);
CompositeMethodWriter cmw(&lpmw, &mw);
_artifacts->iterate_klasses(cmw);
}
static void write_symbols_leakp(JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
assert(leakp_writer != NULL, "invariant");
assert(artifacts != NULL, "invariant");
LeakKlassSymbolWriter lpksw(leakp_writer, artifacts, class_unload);
artifacts->iterate_klasses(lpksw);
}
static void write_symbols(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
assert(writer != NULL, "invariant");
assert(artifacts != NULL, "invariant");
if (leakp_writer != NULL) {
write_symbols_leakp(leakp_writer, artifacts, class_unload);
} else {
LeakMethodWriter lpmw(_leakp_writer, current_epoch(), _class_unload);
CompositeMethodWriter cmw(&lpmw, &mw);
_artifacts->iterate_klasses(cmw);
}
// iterate all registered symbols
SymbolEntryWriter symbol_writer(writer, artifacts, class_unload);
artifacts->iterate_symbols(symbol_writer);
CStringEntryWriter cstring_writer(writer, artifacts, class_unload, true); // skip header
artifacts->iterate_cstrings(cstring_writer);
symbol_writer.add(cstring_writer.count());
_artifacts->tally(mw);
}
bool JfrTypeSet::_class_unload = false;
JfrArtifactSet* JfrTypeSet::_artifacts = NULL;
JfrArtifactClosure* JfrTypeSet::_subsystem_callback = NULL;
void JfrTypeSet::write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
assert(writer != NULL, "invariant");
assert(_artifacts->has_klass_entries(), "invariant");
write_symbols(writer, leakp_writer, _artifacts, _class_unload);
template <>
void set_serialized<JfrSymbolId::SymbolEntry>(SymbolEntryPtr ptr) {
assert(ptr != NULL, "invariant");
ptr->set_serialized();
assert(ptr->is_serialized(), "invariant");
}
void JfrTypeSet::do_unloaded_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
JfrEventClasses::increment_unloaded_event_class();
}
if (USED_THIS_EPOCH(klass)) { // includes leakp subset
_subsystem_callback->do_artifact(klass);
return;
}
if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
SET_LEAKP_USED_THIS_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
_subsystem_callback->do_artifact(klass);
}
template <>
void set_serialized<JfrSymbolId::CStringEntry>(CStringEntryPtr ptr) {
assert(ptr != NULL, "invariant");
ptr->set_serialized();
assert(ptr->is_serialized(), "invariant");
}
void JfrTypeSet::do_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
if (USED_PREV_EPOCH(klass)) { // includes leakp subset
_subsystem_callback->do_artifact(klass);
return;
}
if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
SET_LEAKP_USED_PREV_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
_subsystem_callback->do_artifact(klass);
}
static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool leakp) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
ResourceMark rm;
writer->write(create_symbol_id(entry->id()));
writer->write(entry->value()->as_C_string());
return 1;
}
void JfrTypeSet::do_klasses() {
if (_class_unload) {
ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
return;
}
ClassLoaderDataGraph::classes_do(&do_klass);
int write__symbol(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
SymbolEntryPtr entry = (SymbolEntryPtr)e;
set_serialized(entry);
return write_symbol(writer, entry, false);
}
// XXX
// void JfrTypeSet::do_unloaded_package(PackageEntry* entry) {
// assert(entry != NULL, "invariant");
// assert(_subsystem_callback != NULL, "invariant");
// if (ANY_USED_THIS_EPOCH(entry)) { // includes leakp subset
// _subsystem_callback->do_artifact(entry);
// }
// }
// void JfrTypeSet::do_package(PackageEntry* entry) {
// assert(_subsystem_callback != NULL, "invariant");
// if (ANY_USED_PREV_EPOCH(entry)) { // includes leakp subset
// _subsystem_callback->do_artifact(entry);
// }
// }
// void JfrTypeSet::do_packages() {
// if (_class_unload) {
// ClassLoaderDataGraph::packages_unloading_do(&do_unloaded_package);
// return;
// }
// ClassLoaderDataGraph::packages_do(&do_package);
// }
void JfrTypeSet::do_unloaded_class_loader_data(ClassLoaderData* cld) {
assert(_subsystem_callback != NULL, "invariant");
if (ANY_USED_THIS_EPOCH(cld)) { // includes leakp subset
_subsystem_callback->do_artifact(cld);
}
int write__symbol__leakp(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
SymbolEntryPtr entry = (SymbolEntryPtr)e;
return write_symbol(writer, entry, true);
}
void JfrTypeSet::do_class_loader_data(ClassLoaderData* cld) {
assert(_subsystem_callback != NULL, "invariant");
if (ANY_USED_PREV_EPOCH(cld)) { // includes leakp subset
_subsystem_callback->do_artifact(cld);
}
static int write_cstring(JfrCheckpointWriter* writer, CStringEntryPtr entry, bool leakp) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
writer->write(create_symbol_id(entry->id()));
writer->write(entry->value());
return 1;
}
class CLDCallback : public CLDClosure {
private:
bool _class_unload;
public:
CLDCallback(bool class_unload) : _class_unload(class_unload) {}
void do_cld(ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
if (cld->is_anonymous()) {
return;
}
if (_class_unload) {
JfrTypeSet::do_unloaded_class_loader_data(cld);
return;
}
JfrTypeSet::do_class_loader_data(cld);
}
};
int write__cstring(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
CStringEntryPtr entry = (CStringEntryPtr)e;
set_serialized(entry);
return write_cstring(writer, entry, false);
}
void JfrTypeSet::do_class_loaders() {
CLDCallback cld_cb(_class_unload);
if (_class_unload) {
ClassLoaderDataGraph::cld_unloading_do(&cld_cb);
int write__cstring__leakp(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
CStringEntryPtr entry = (CStringEntryPtr)e;
return write_cstring(writer, entry, true);
}
typedef SymbolPredicate<SymbolEntryPtr, false> SymPredicate;
typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, SymPredicate, write__symbol> SymbolEntryWriterImpl;
typedef JfrTypeWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
typedef SymbolPredicate<CStringEntryPtr, false> CStringPredicate;
typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, CStringPredicate, write__cstring> CStringEntryWriterImpl;
typedef JfrTypeWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
typedef SymbolPredicate<SymbolEntryPtr, true> LeakSymPredicate;
typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, LeakSymPredicate, write__symbol__leakp> LeakSymbolEntryWriterImpl;
typedef JfrTypeWriterHost<LeakSymbolEntryWriterImpl, TYPE_SYMBOL> LeakSymbolEntryWriter;
typedef CompositeFunctor<SymbolEntryPtr, LeakSymbolEntryWriter, SymbolEntryWriter> CompositeSymbolWriter;
typedef SymbolPredicate<CStringEntryPtr, true> LeakCStringPredicate;
typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, LeakCStringPredicate, write__cstring__leakp> LeakCStringEntryWriterImpl;
typedef JfrTypeWriterHost<LeakCStringEntryWriterImpl, TYPE_SYMBOL> LeakCStringEntryWriter;
typedef CompositeFunctor<CStringEntryPtr, LeakCStringEntryWriter, CStringEntryWriter> CompositeCStringWriter;
static void write_symbols_with_leakp() {
assert(_leakp_writer != NULL, "invariant");
SymbolEntryWriter sw(_writer, _class_unload);
LeakSymbolEntryWriter lsw(_leakp_writer, _class_unload);
CompositeSymbolWriter csw(&lsw, &sw);
_artifacts->iterate_symbols(csw);
CStringEntryWriter ccsw(_writer, _class_unload, true); // skip header
LeakCStringEntryWriter lccsw(_leakp_writer, _class_unload, true); // skip header
CompositeCStringWriter cccsw(&lccsw, &ccsw);
_artifacts->iterate_cstrings(cccsw);
sw.add(ccsw.count());
lsw.add(lccsw.count());
_artifacts->tally(sw);
}
static void write_symbols() {
assert(_writer != NULL, "invariant");
if (_leakp_writer != NULL) {
write_symbols_with_leakp();
return;
}
ClassLoaderDataGraph::cld_do(&cld_cb);
SymbolEntryWriter sw(_writer, _class_unload);
_artifacts->iterate_symbols(sw);
CStringEntryWriter csw(_writer, _class_unload, true); // skip header
_artifacts->iterate_cstrings(csw);
sw.add(csw.count());
_artifacts->tally(sw);
}
static void clear_artifacts(JfrArtifactSet* artifacts,
bool class_unload) {
assert(artifacts != NULL, "invariant");
assert(artifacts->has_klass_entries(), "invariant");
typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits, false> ClearKlassAndMethods;
// untag
ClearKlassAndMethods clear(class_unload);
artifacts->iterate_klasses(clear);
artifacts->clear();
static size_t teardown() {
assert(_artifacts != NULL, "invariant");
const size_t total_count = _artifacts->total_count();
if (previous_epoch()) {
assert(_writer != NULL, "invariant");
ClearKlassAndMethods clear(_writer);
_artifacts->iterate_klasses(clear);
_artifacts->clear();
++checkpoint_id;
}
return total_count;
}
/**
* Write all "tagged" (in-use) constant artifacts and their dependencies.
*/
void JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
assert(writer != NULL, "invariant");
ResourceMark rm;
// initialization begin
static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
_writer = writer;
_leakp_writer = leakp_writer;
_class_unload = class_unload;
++checkpoint_id;
if (_artifacts == NULL) {
_artifacts = new JfrArtifactSet(class_unload);
_subsystem_callback = NULL;
} else {
_artifacts->initialize(class_unload);
_subsystem_callback = NULL;
}
assert(_artifacts != NULL, "invariant");
assert(!_artifacts->has_klass_entries(), "invariant");
assert(_subsystem_callback == NULL, "invariant");
// initialization complete
}
/**
* Write all "tagged" (in-use) constant artifacts and their dependencies.
*/
size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
assert(writer != NULL, "invariant");
ResourceMark rm;
setup(writer, leakp_writer, class_unload);
// write order is important because an individual write step
// might tag an artifact to be written in a subsequent step
write_klass_constants(writer, leakp_writer);
if (_artifacts->has_klass_entries()) {
write_package_constants(writer, leakp_writer);
write_class_loader_constants(writer, leakp_writer);
write_method_constants(writer, leakp_writer);
write_symbol_constants(writer, leakp_writer);
clear_artifacts(_artifacts, class_unload);
if (!write_klasses()) {
return 0;
}
// write_packages();
write_classloaders();
write_methods();
write_symbols();
return teardown();
}
......@@ -27,42 +27,11 @@
#include "jfr/utilities/jfrAllocation.hpp"
class ClassLoaderData;
class JfrArtifactClosure;
class JfrArtifactSet;
class JfrCheckpointWriter;
class Klass;
// XXX class PackageEntry;
class JfrTypeSet : AllStatic {
friend class CLDCallback;
friend class JfrTypeManager;
friend class TypeSetSerialization;
private:
static JfrArtifactSet* _artifacts;
static JfrArtifactClosure* _subsystem_callback;
static bool _class_unload;
static void do_klass(Klass* k);
static void do_unloaded_klass(Klass* k);
static void do_klasses();
// XXX
// static void do_package(PackageEntry* entry);
// static void do_unloaded_package(PackageEntry* entry);
// static void do_packages();
static void do_class_loader_data(ClassLoaderData* cld);
static void do_unloaded_class_loader_data(ClassLoaderData* cld);
static void do_class_loaders();
static void write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
static void write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
static void write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
static void write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
static void write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
static void serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
public:
static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
};
#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -28,17 +28,30 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
JfrSymbolId::JfrSymbolId() : _symbol_id_counter(0), _sym_table(new SymbolTable(this)),
_cstring_table(new CStringTable(this)), _pkg_table(new CStringTable(this)) {
static JfrSymbolId::CStringEntry* bootstrap = NULL;
JfrSymbolId::JfrSymbolId() :
_sym_table(new SymbolTable(this)),
_cstring_table(new CStringTable(this)),
_pkg_table(new CStringTable(this)),
_sym_list(NULL),
_cstring_list(NULL),
_symbol_id_counter(1),
_class_unload(false) {
assert(_sym_table != NULL, "invariant");
assert(_cstring_table != NULL, "invariant");
assert(_pkg_table != NULL, "invariant");
initialize();
bootstrap = new CStringEntry(0, (const char*)&BOOTSTRAP_LOADER_NAME);
assert(bootstrap != NULL, "invariant");
bootstrap->set_id(1);
_cstring_list = bootstrap;
}
void JfrSymbolId::initialize() {
JfrSymbolId::~JfrSymbolId() {
clear();
assert(_symbol_id_counter == 0, "invariant");
delete _sym_table;
delete _cstring_table;
delete bootstrap;
}
void JfrSymbolId::clear() {
......@@ -53,117 +66,139 @@ void JfrSymbolId::clear() {
_cstring_table->clear_entries();
}
assert(!_cstring_table->has_entries(), "invariant");
_symbol_id_counter = 0;
assert(_pkg_table != NULL, "invariant");
if (_pkg_table->has_entries()) {
_pkg_table->clear_entries();
}
assert(!_pkg_table->has_entries(), "invariant");
}
JfrSymbolId::~JfrSymbolId() {
delete _sym_table;
delete _cstring_table;
}
traceid JfrSymbolId::mark_anonymous_klass_name(const Klass* k) {
assert(k != NULL, "invariant");
assert(k->oop_is_instance(), "invariant");
assert(is_anonymous_klass(k), "invariant");
uintptr_t anonymous_symbol_hash_code = 0;
const char* const anonymous_symbol =
create_anonymous_klass_symbol((const InstanceKlass*)k, anonymous_symbol_hash_code);
if (anonymous_symbol == NULL) {
return 0;
}
assert(anonymous_symbol_hash_code != 0, "invariant");
traceid symbol_id = mark(anonymous_symbol, anonymous_symbol_hash_code);
assert(mark(anonymous_symbol, anonymous_symbol_hash_code) == symbol_id, "invariant");
return symbol_id;
}
const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(const Symbol* symbol) const {
return _sym_table->lookup_only(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
}
_sym_list = NULL;
_cstring_list = NULL;
_symbol_id_counter = 1;
const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(uintptr_t hash) const {
return _sym_table->lookup_only(NULL, hash);
assert(bootstrap != NULL, "invariant");
bootstrap->reset();
_cstring_list = bootstrap;
}
const JfrSymbolId::CStringEntry* JfrSymbolId::map_cstring(uintptr_t hash) const {
return _cstring_table->lookup_only(NULL, hash);
void JfrSymbolId::set_class_unload(bool class_unload) {
_class_unload = class_unload;
}
void JfrSymbolId::assign_id(SymbolEntry* entry) {
void JfrSymbolId::on_link(const SymbolEntry* entry) {
assert(entry != NULL, "invariant");
const_cast<Symbol*>(entry->literal())->increment_refcount();
assert(entry->id() == 0, "invariant");
entry->set_id(++_symbol_id_counter);
entry->set_list_next(_sym_list);
_sym_list = entry;
}
bool JfrSymbolId::equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry) {
bool JfrSymbolId::on_equals(uintptr_t hash, const SymbolEntry* entry) {
// query might be NULL
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
return true;
}
void JfrSymbolId::assign_id(CStringEntry* entry) {
void JfrSymbolId::on_unlink(const SymbolEntry* entry) {
assert(entry != NULL, "invariant");
const_cast<Symbol*>(entry->literal())->decrement_refcount();
}
void JfrSymbolId::on_link(const CStringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(++_symbol_id_counter);
entry->set_list_next(_cstring_list);
_cstring_list = entry;
}
bool JfrSymbolId::equals(const char* query, uintptr_t hash, const CStringEntry* entry) {
// query might be NULL
bool JfrSymbolId::on_equals(uintptr_t hash, const CStringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
return true;
}
traceid JfrSymbolId::mark(const Klass* k) {
assert(k != NULL, "invariant");
traceid symbol_id = 0;
if (is_anonymous_klass(k)) {
symbol_id = mark_anonymous_klass_name(k);
}
if (0 == symbol_id) {
const Symbol* const sym = k->name();
if (sym != NULL) {
symbol_id = mark(sym);
}
void JfrSymbolId::on_unlink(const CStringEntry* entry) {
assert(entry != NULL, "invariant");
JfrCHeapObj::free(const_cast<char*>(entry->literal()), strlen(entry->literal() + 1));
}
traceid JfrSymbolId::bootstrap_name(bool leakp) {
assert(bootstrap != NULL, "invariant");
if (leakp) {
bootstrap->set_leakp();
}
assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
return symbol_id;
return 1;
}
traceid JfrSymbolId::mark(const Symbol* symbol) {
traceid JfrSymbolId::mark(const Symbol* symbol, bool leakp) {
assert(symbol != NULL, "invariant");
return mark(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
return mark((uintptr_t)((Symbol*)symbol)->identity_hash(), symbol, leakp);
}
traceid JfrSymbolId::mark(const Symbol* data, uintptr_t hash) {
static unsigned int last_symbol_hash = 0;
static traceid last_symbol_id = 0;
traceid JfrSymbolId::mark(uintptr_t hash, const Symbol* data, bool leakp) {
assert(data != NULL, "invariant");
assert(_sym_table != NULL, "invariant");
return _sym_table->id(data, hash);
if (hash == last_symbol_hash) {
assert(last_symbol_id != 0, "invariant");
return last_symbol_id;
}
const SymbolEntry& entry = _sym_table->lookup_put(hash, data);
if (_class_unload) {
entry.set_unloading();
}
if (leakp) {
entry.set_leakp();
}
last_symbol_hash = hash;
last_symbol_id = entry.id();
return last_symbol_id;
}
traceid JfrSymbolId::mark(const char* str, uintptr_t hash) {
assert(str != NULL, "invariant");
return _cstring_table->id(str, hash);
}
static unsigned int last_cstring_hash_for_pkg = 0;
static traceid last_cstring_id_for_pkg = 0;
static unsigned int last_cstring_hash = 0;
static traceid last_cstring_id = 0;
traceid JfrSymbolId::markPackage(const char* name, uintptr_t hash) {
assert(name != NULL, "invariant");
assert(_pkg_table != NULL, "invariant");
return _pkg_table->id(name, hash);
if (hash == last_cstring_hash_for_pkg) {
assert(last_cstring_id_for_pkg != 0, "invariant");
return last_cstring_id_for_pkg;
}
const CStringEntry& entry = _pkg_table->lookup_put(hash, name);
if (_class_unload) {
entry.set_unloading();
}
last_cstring_hash_for_pkg = hash;
last_cstring_id_for_pkg = entry.id();
return last_cstring_id_for_pkg;
}
bool JfrSymbolId::is_anonymous_klass(const Klass* k) {
assert(k != NULL, "invariant");
return k->oop_is_instance() && ((const InstanceKlass*)k)->is_anonymous();
traceid JfrSymbolId::mark(uintptr_t hash, const char* str, bool leakp) {
assert(str != NULL, "invariant");
assert(_cstring_table != NULL, "invariant");
if (hash == last_cstring_hash) {
assert(last_cstring_id != 0, "invariant");
return last_cstring_id;
}
const CStringEntry& entry = _cstring_table->lookup_put(hash, str);
if (_class_unload) {
entry.set_unloading();
}
if (leakp) {
entry.set_leakp();
}
last_cstring_hash = hash;
last_cstring_id = entry.id();
return last_cstring_id;
}
/*
......@@ -174,7 +209,7 @@ bool JfrSymbolId::is_anonymous_klass(const Klass* k) {
* caller needs ResourceMark
*/
uintptr_t JfrSymbolId::anonymous_klass_name_hash_code(const InstanceKlass* ik) {
uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash(const InstanceKlass* ik) {
assert(ik != NULL, "invariant");
assert(ik->is_anonymous(), "invariant");
const oop mirror = ik->java_mirror();
......@@ -182,19 +217,18 @@ uintptr_t JfrSymbolId::anonymous_klass_name_hash_code(const InstanceKlass* ik) {
return (uintptr_t)mirror->identity_hash();
}
const char* JfrSymbolId::create_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode) {
static const char* create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t hash) {
assert(ik != NULL, "invariant");
assert(ik->is_anonymous(), "invariant");
assert(0 == hashcode, "invariant");
assert(hash != 0, "invariant");
char* anonymous_symbol = NULL;
const oop mirror = ik->java_mirror();
assert(mirror != NULL, "invariant");
char hash_buf[40];
hashcode = anonymous_klass_name_hash_code(ik);
sprintf(hash_buf, "/" UINTX_FORMAT, hashcode);
sprintf(hash_buf, "/" UINTX_FORMAT, hash);
const size_t hash_len = strlen(hash_buf);
const size_t result_len = ik->name()->utf8_length();
anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
anonymous_symbol = JfrCHeapObj::new_array<char>(result_len + hash_len + 1);
ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
assert(strlen(anonymous_symbol) == result_len, "invariant");
strcpy(anonymous_symbol + result_len, hash_buf);
......@@ -202,74 +236,107 @@ const char* JfrSymbolId::create_anonymous_klass_symbol(const InstanceKlass* ik,
return anonymous_symbol;
}
uintptr_t JfrSymbolId::regular_klass_name_hash_code(const Klass* k) {
bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
assert(k != NULL, "invariant");
const Symbol* const sym = k->name();
assert(sym != NULL, "invariant");
return (uintptr_t)const_cast<Symbol*>(sym)->identity_hash();
return k->oop_is_instance() && ((const InstanceKlass*)k)->is_anonymous();
}
static unsigned int last_anonymous_hash = 0;
static traceid last_anonymous_id = 0;
traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const InstanceKlass* ik, bool leakp) {
assert(ik != NULL, "invariant");
assert(ik->is_anonymous(), "invariant");
const uintptr_t hash = unsafe_anonymous_klass_name_hash(ik);
if (hash == last_anonymous_hash) {
assert(last_anonymous_id != 0, "invariant");
return last_anonymous_id;
}
last_anonymous_hash = hash;
const CStringEntry* const entry = _cstring_table->lookup_only(hash);
last_anonymous_id = entry != NULL ? entry->id() : mark(hash, create_unsafe_anonymous_klass_symbol(ik, hash), leakp);
return last_anonymous_id;
}
traceid JfrSymbolId::mark(const Klass* k, bool leakp) {
assert(k != NULL, "invariant");
traceid symbol_id = 0;
if (is_unsafe_anonymous_klass(k)) {
assert(k->oop_is_instance(), "invariant");
symbol_id = mark_unsafe_anonymous_klass_name((const InstanceKlass*)k, leakp);
}
if (0 == symbol_id) {
Symbol* const sym = k->name();
if (sym != NULL) {
symbol_id = mark(sym, leakp);
}
}
assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
return symbol_id;
}
static void reset_symbol_caches() {
last_anonymous_hash = 0;
last_symbol_hash = 0;
last_cstring_hash = 0;
last_cstring_hash_for_pkg = 0;
}
JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
_klass_list(NULL),
_class_unload(class_unload) {
_klass_list(NULL),
_total_count(0) {
initialize(class_unload);
assert(_klass_list != NULL, "invariant");
}
static const size_t initial_class_list_size = 200;
void JfrArtifactSet::initialize(bool class_unload) {
assert(_symbol_id != NULL, "invariant");
_symbol_id->initialize();
assert(!_symbol_id->has_entries(), "invariant");
_symbol_id->mark(BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap"
_class_unload = class_unload;
_symbol_id->set_class_unload(class_unload);
_total_count = 0;
// resource allocation
_klass_list = new GrowableArray<const Klass*>(initial_class_list_size, false, mtTracing);
}
JfrArtifactSet::~JfrArtifactSet() {
clear();
delete _symbol_id;
}
void JfrArtifactSet::clear() {
reset_symbol_caches();
_symbol_id->clear();
// _klass_list will be cleared by a ResourceMark
}
traceid JfrArtifactSet::mark_anonymous_klass_name(const Klass* klass) {
return _symbol_id->mark_anonymous_klass_name(klass);
}
traceid JfrArtifactSet::mark(const Symbol* sym, uintptr_t hash) {
return _symbol_id->mark(sym, hash);
traceid JfrArtifactSet::bootstrap_name(bool leakp) {
return _symbol_id->bootstrap_name(leakp);
}
traceid JfrArtifactSet::mark(const Klass* klass) {
return _symbol_id->mark(klass);
traceid JfrArtifactSet::mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp) {
assert(klass->oop_is_instance(), "invariant");
return _symbol_id->mark_unsafe_anonymous_klass_name((const InstanceKlass*)klass, leakp);
}
traceid JfrArtifactSet::mark(const Symbol* symbol) {
return _symbol_id->mark(symbol);
traceid JfrArtifactSet::mark(uintptr_t hash, const Symbol* sym, bool leakp) {
return _symbol_id->mark(hash, sym, leakp);
}
traceid JfrArtifactSet::mark(const char* const str, uintptr_t hash) {
return _symbol_id->mark(str, hash);
traceid JfrArtifactSet::mark(const Klass* klass, bool leakp) {
return _symbol_id->mark(klass, leakp);
}
traceid JfrArtifactSet::markPackage(const char* const name, uintptr_t hash) {
return _symbol_id->markPackage(name, hash);
}
const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(const Symbol* symbol) const {
return _symbol_id->map_symbol(symbol);
traceid JfrArtifactSet::mark(const Symbol* symbol, bool leakp) {
return _symbol_id->mark(symbol, leakp);
}
const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(uintptr_t hash) const {
return _symbol_id->map_symbol(hash);
}
const JfrSymbolId::CStringEntry* JfrArtifactSet::map_cstring(uintptr_t hash) const {
return _symbol_id->map_cstring(hash);
traceid JfrArtifactSet::mark(uintptr_t hash, const char* const str, bool leakp) {
return _symbol_id->mark(hash, str, leakp);
}
bool JfrArtifactSet::has_klass_entries() const {
......@@ -286,3 +353,7 @@ void JfrArtifactSet::register_klass(const Klass* k) {
assert(_klass_list->find(k) == -1, "invariant");
_klass_list->append(k);
}
size_t JfrArtifactSet::total_count() const {
return _total_count;
}
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
#define SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
......@@ -81,183 +81,185 @@ class KlassToFieldEnvelope {
};
template <typename T>
void tag_leakp_artifact(T const& value, bool class_unload) {
assert(value != NULL, "invariant");
if (class_unload) {
SET_LEAKP_USED_THIS_EPOCH(value);
assert(LEAKP_USED_THIS_EPOCH(value), "invariant");
} else {
SET_LEAKP_USED_PREV_EPOCH(value);
assert(LEAKP_USED_PREV_EPOCH(value), "invariant");
class ClearArtifact {
public:
bool operator()(T const& value) {
CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
CLEAR_SERIALIZED(value);
assert(IS_NOT_SERIALIZED(value), "invariant");
return true;
}
}
};
template <typename T>
class LeakpClearArtifact {
bool _class_unload;
template <>
class ClearArtifact<const Method*> {
public:
LeakpClearArtifact(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
if (_class_unload) {
if (LEAKP_USED_THIS_EPOCH(value)) {
LEAKP_UNUSE_THIS_EPOCH(value);
}
} else {
if (LEAKP_USED_PREV_EPOCH(value)) {
LEAKP_UNUSE_PREV_EPOCH(value);
}
}
bool operator()(const Method* method) {
assert(METHOD_FLAG_USED_PREV_EPOCH(method), "invariant");
CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
CLEAR_METHOD_SERIALIZED(method);
assert(METHOD_NOT_SERIALIZED(method), "invariant");
return true;
}
};
template <typename T>
class ClearArtifact {
class JfrStub {
public:
bool operator()(T const& value) { return true; }
};
template <typename T>
class SerializePredicate {
bool _class_unload;
public:
ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
if (_class_unload) {
if (LEAKP_USED_THIS_EPOCH(value)) {
LEAKP_UNUSE_THIS_EPOCH(value);
}
if (USED_THIS_EPOCH(value)) {
UNUSE_THIS_EPOCH(value);
}
if (METHOD_USED_THIS_EPOCH(value)) {
UNUSE_METHOD_THIS_EPOCH(value);
}
} else {
if (LEAKP_USED_PREV_EPOCH(value)) {
LEAKP_UNUSE_PREV_EPOCH(value);
}
if (USED_PREV_EPOCH(value)) {
UNUSE_PREV_EPOCH(value);
}
if (METHOD_USED_PREV_EPOCH(value)) {
UNUSE_METHOD_PREV_EPOCH(value);
}
}
return true;
assert(value != NULL, "invariant");
return _class_unload ? true : IS_NOT_SERIALIZED(value);
}
};
template <>
class ClearArtifact<const Method*> {
class SerializePredicate<const Method*> {
bool _class_unload;
public:
ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(const Method* method) {
if (_class_unload) {
if (METHOD_FLAG_USED_THIS_EPOCH(method)) {
CLEAR_METHOD_FLAG_USED_THIS_EPOCH(method);
}
} else {
if (METHOD_FLAG_USED_PREV_EPOCH(method)) {
CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
}
}
return true;
assert(method != NULL, "invariant");
return _class_unload ? true : METHOD_NOT_SERIALIZED(method);
}
};
template <typename T>
class LeakPredicate {
template <typename T, bool leakp>
class SymbolPredicate {
bool _class_unload;
public:
LeakPredicate(bool class_unload) : _class_unload(class_unload) {}
SymbolPredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
return _class_unload ? LEAKP_USED_THIS_EPOCH(value) : LEAKP_USED_PREV_EPOCH(value);
assert(value != NULL, "invariant");
if (_class_unload) {
return leakp ? value->is_leakp() : value->is_unloading();
}
return leakp ? value->is_leakp() : !value->is_serialized();
}
};
template <typename T>
class UsedPredicate {
bool _class_unload;
template <bool leakp>
class MethodUsedPredicate {
bool _current_epoch;
public:
MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
bool operator()(const Klass* klass) {
if (_current_epoch) {
return leakp ? IS_LEAKP(klass) : METHOD_USED_THIS_EPOCH(klass);
}
return leakp ? IS_LEAKP(klass) : METHOD_USED_PREV_EPOCH(klass);
}
};
template <bool leakp>
class MethodFlagPredicate {
bool _current_epoch;
public:
UsedPredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
return _class_unload ? USED_THIS_EPOCH(value) : USED_PREV_EPOCH(value);
MethodFlagPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
bool operator()(const Method* method) {
if (_current_epoch) {
return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_THIS_EPOCH(method);
}
return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
}
};
template <typename T, int compare(const T&, const T&)>
class UniquePredicate {
private:
GrowableArray<T> _seen;
template <typename T>
class LeakPredicate {
public:
UniquePredicate(bool) : _seen() {}
LeakPredicate(bool class_unload) {}
bool operator()(T const& value) {
bool not_unique;
_seen.template find_sorted<T, compare>(value, not_unique);
if (not_unique) {
return false;
}
_seen.template insert_sorted<compare>(value);
return true;
return IS_LEAKP(value);
}
};
class MethodFlagPredicate {
bool _class_unload;
template <>
class LeakPredicate<const Method*> {
public:
MethodFlagPredicate(bool class_unload) : _class_unload(class_unload) {}
LeakPredicate(bool class_unload) {}
bool operator()(const Method* method) {
return _class_unload ? METHOD_FLAG_USED_THIS_EPOCH(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
assert(method != NULL, "invariant");
return IS_METHOD_LEAKP_USED(method);
}
};
template <bool leakp>
class MethodUsedPredicate {
bool _class_unload;
template <typename T, typename IdType>
class ListEntry : public JfrHashtableEntry<T, IdType> {
public:
MethodUsedPredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(const Klass* klass) {
assert(ANY_USED(klass), "invariant");
if (_class_unload) {
return leakp ? LEAKP_METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_THIS_EPOCH(klass);
}
return leakp ? LEAKP_METHOD_USED_PREV_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
ListEntry(uintptr_t hash, const T& data) : JfrHashtableEntry<T, IdType>(hash, data),
_list_next(NULL), _serialized(false), _unloading(false), _leakp(false) {}
const ListEntry<T, IdType>* list_next() const { return _list_next; }
void reset() const {
_list_next = NULL; _serialized = false; _unloading = false; _leakp = false;
}
void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
bool is_serialized() const { return _serialized; }
void set_serialized() const { _serialized = true; }
bool is_unloading() const { return _unloading; }
void set_unloading() const { _unloading = true; }
bool is_leakp() const { return _leakp; }
void set_leakp() const { _leakp = true; }
private:
mutable const ListEntry<T, IdType>* _list_next;
mutable bool _serialized;
mutable bool _unloading;
mutable bool _leakp;
};
class JfrSymbolId : public JfrCHeapObj {
template <typename, typename, template<typename, typename> class, typename, size_t>
friend class HashTableHost;
typedef HashTableHost<const Symbol*, traceid, Entry, JfrSymbolId> SymbolTable;
typedef HashTableHost<const char*, traceid, Entry, JfrSymbolId> CStringTable;
typedef HashTableHost<const Symbol*, traceid, ListEntry, JfrSymbolId> SymbolTable;
typedef HashTableHost<const char*, traceid, ListEntry, JfrSymbolId> CStringTable;
friend class JfrArtifactSet;
public:
typedef SymbolTable::HashEntry SymbolEntry;
typedef CStringTable::HashEntry CStringEntry;
private:
traceid _symbol_id_counter;
SymbolTable* _sym_table;
CStringTable* _cstring_table;
CStringTable* _pkg_table;
const SymbolEntry* _sym_list;
const CStringEntry* _cstring_list;
traceid _symbol_id_counter;
bool _class_unload;
// hashtable(s) callbacks
void assign_id(SymbolEntry* entry);
bool equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry);
void assign_id(CStringEntry* entry);
bool equals(const char* query, uintptr_t hash, const CStringEntry* entry);
void on_link(const SymbolEntry* entry);
bool on_equals(uintptr_t hash, const SymbolEntry* entry);
void on_unlink(const SymbolEntry* entry);
void on_link(const CStringEntry* entry);
bool on_equals(uintptr_t hash, const CStringEntry* entry);
void on_unlink(const CStringEntry* entry);
template <typename Functor, typename T>
void iterate(Functor& functor, const T* list) {
const T* symbol = list;
while (symbol != NULL) {
const T* next = symbol->list_next();
functor(symbol);
symbol = next;
}
}
public:
static bool is_anonymous_klass(const Klass* k);
static const char* create_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode);
static uintptr_t anonymous_klass_name_hash_code(const InstanceKlass* ik);
static uintptr_t regular_klass_name_hash_code(const Klass* k);
traceid mark_unsafe_anonymous_klass_name(const InstanceKlass* k, bool leakp);
bool is_unsafe_anonymous_klass(const Klass* k);
uintptr_t unsafe_anonymous_klass_name_hash(const InstanceKlass* ik);
public:
JfrSymbolId();
~JfrSymbolId();
void initialize();
void clear();
void set_class_unload(bool class_unload);
traceid mark_anonymous_klass_name(const Klass* k);
traceid mark(const Symbol* sym, uintptr_t hash);
traceid mark(const Klass* k);
traceid mark(const Symbol* symbol);
traceid mark(const char* str, uintptr_t hash);
traceid markPackage(const char* name, uintptr_t hash);
template <typename T>
......@@ -265,46 +267,25 @@ class JfrSymbolId : public JfrCHeapObj {
_pkg_table->iterate_entry(functor);
}
const SymbolEntry* map_symbol(const Symbol* symbol) const;
const SymbolEntry* map_symbol(uintptr_t hash) const;
const CStringEntry* map_cstring(uintptr_t hash) const;
traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
traceid mark(const Klass* k, bool leakp);
traceid mark(const Symbol* symbol, bool leakp);
traceid mark(uintptr_t hash, const char* str, bool leakp);
traceid bootstrap_name(bool leakp);
template <typename T>
void symbol(T& functor, const Klass* k) {
if (is_anonymous_klass(k)) {
return;
}
functor(map_symbol(regular_klass_name_hash_code(k)));
}
template <typename T>
void symbol(T& functor, const Method* method) {
assert(method != NULL, "invariant");
functor(map_symbol((uintptr_t)method->name()->identity_hash()));
functor(map_symbol((uintptr_t)method->signature()->identity_hash()));
}
template <typename T>
void cstring(T& functor, const Klass* k) {
if (!is_anonymous_klass(k)) {
return;
}
functor(map_cstring(anonymous_klass_name_hash_code((const InstanceKlass*)k)));
}
template <typename T>
void iterate_symbols(T& functor) {
_sym_table->iterate_entry(functor);
template <typename Functor>
void iterate_symbols(Functor& functor) {
iterate(functor, _sym_list);
}
template <typename T>
void iterate_cstrings(T& functor) {
_cstring_table->iterate_entry(functor);
template <typename Functor>
void iterate_cstrings(Functor& functor) {
iterate(functor, _cstring_list);
}
bool has_entries() const { return has_symbol_entries() || has_cstring_entries(); }
bool has_symbol_entries() const { return _sym_table->has_entries(); }
bool has_cstring_entries() const { return _cstring_table->has_entries(); }
bool has_symbol_entries() const { return _sym_list != NULL; }
bool has_cstring_entries() const { return _cstring_list != NULL; }
};
/**
......@@ -325,7 +306,7 @@ class JfrArtifactSet : public JfrCHeapObj {
private:
JfrSymbolId* _symbol_id;
GrowableArray<const Klass*>* _klass_list;
bool _class_unload;
size_t _total_count;
public:
JfrArtifactSet(bool class_unload);
......@@ -335,11 +316,13 @@ class JfrArtifactSet : public JfrCHeapObj {
void initialize(bool class_unload);
void clear();
traceid mark(const Symbol* sym, uintptr_t hash);
traceid mark(const Klass* klass);
traceid mark(const Symbol* symbol);
traceid mark(const char* const str, uintptr_t hash);
traceid mark_anonymous_klass_name(const Klass* klass);
traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
traceid mark(const Klass* klass, bool leakp);
traceid mark(const Symbol* symbol, bool leakp);
traceid mark(uintptr_t hash, const char* const str, bool leakp);
traceid mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp);
traceid bootstrap_name(bool leakp);
traceid markPackage(const char* const name, uintptr_t hash);
......@@ -349,6 +332,7 @@ class JfrArtifactSet : public JfrCHeapObj {
bool has_klass_entries() const;
int entries() const;
size_t total_count() const;
void register_klass(const Klass* k);
template <typename Functor>
......@@ -374,6 +358,12 @@ class JfrArtifactSet : public JfrCHeapObj {
void iterate_packages(T& functor) {
_symbol_id->iterate_packages(functor);
}
template <typename Writer>
void tally(Writer& writer) {
_total_count += writer.count();
}
};
class KlassArtifactRegistrator {
......@@ -392,4 +382,4 @@ class KlassArtifactRegistrator {
}
};
#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETUTILS_HPP
......@@ -130,7 +130,7 @@ void JfrTraceId::remove(const Klass* k) {
// This mechanism will retain the event specific flags
// in the archive, allowing for event flag restoration
// when renewing the traceid on klass revival.
k->set_trace_id(EVENT_FLAGS_MASK(k));
k->set_trace_id(EVENT_KLASS_MASK(k));
}
// used by CDS / APPCDS as part of "restore_unshareable_info"
......@@ -152,12 +152,12 @@ traceid JfrTraceId::get(jclass jc) {
return get(java_lang_Class::as_Klass(my_oop));
}
traceid JfrTraceId::use(jclass jc, bool leakp /* false */) {
traceid JfrTraceId::use(jclass jc) {
assert(jc != NULL, "invariant");
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
const oop my_oop = JNIHandles::resolve(jc);
assert(my_oop != NULL, "invariant");
return use(java_lang_Class::as_Klass(my_oop), leakp);
return use(java_lang_Class::as_Klass(my_oop));
}
bool JfrTraceId::in_visible_set(const jclass jc) {
......
......@@ -87,11 +87,15 @@ class JfrTraceId : public AllStatic {
static traceid get(const Thread* thread);
// tag construct as used, returns pre-tagged traceid
static traceid use(const Klass* klass, bool leakp = false);
static traceid use(jclass jc, bool leakp = false);
static traceid use(const Method* method, bool leakp = false);
// XXX static traceid use(const PackageEntry* package, bool leakp = false);
static traceid use(const ClassLoaderData* cld, bool leakp = false);
static traceid use(const Klass* klass);
static traceid use(jclass jc);
static traceid use(const Method* method);
static traceid use(const Klass* klass, const Method* method);
// XXX static traceid use(const PackageEntry* package);
static traceid use(const ClassLoaderData* cld);
// leak profiler
static void set_leakp(const Method* method);
static void remove(const Klass* klass);
static void restore(const Klass* klass);
......
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,12 +22,15 @@
*
*/
#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
#include "classfile/classLoaderData.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
#include "jfr/support/jfrKlassExtension.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/klass.hpp"
#include "oops/instanceKlass.hpp"
......@@ -36,21 +39,11 @@
#include "utilities/debug.hpp"
template <typename T>
inline traceid set_used_and_get(const T* type, bool leakp) {
inline traceid set_used_and_get(const T* type) {
assert(type != NULL, "invariant");
if (leakp) {
SET_LEAKP_USED_THIS_EPOCH(type);
assert(LEAKP_USED_THIS_EPOCH(type), "invariant");
}
SET_USED_THIS_EPOCH(type);
assert(USED_THIS_EPOCH(type), "invariant");
return TRACE_ID_MASKED_PTR(type);
}
template <typename T>
inline traceid set_used_and_get_shifted(const T* type, bool leakp) {
assert(type != NULL, "invariant");
return set_used_and_get(type, leakp) >> TRACE_ID_SHIFT;
return TRACE_ID(type);
}
inline traceid JfrTraceId::get(const Klass* klass) {
......@@ -63,34 +56,45 @@ inline traceid JfrTraceId::get(const Thread* t) {
return TRACE_ID_RAW(t->jfr_thread_local());
}
inline traceid JfrTraceId::use(const Klass* klass, bool leakp /* false */) {
inline traceid JfrTraceId::use(const Klass* klass) {
assert(klass != NULL, "invariant");
return set_used_and_get_shifted(klass, leakp);
return set_used_and_get(klass);
}
inline traceid JfrTraceId::use(const Method* method, bool leakp /* false */) {
inline traceid JfrTraceId::use(const Method* method) {
assert(method != NULL, "invariant");
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
const Klass* const klass = method->method_holder();
return use(method->method_holder(), method);
}
inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
assert(klass != NULL, "invariant");
if (leakp) {
SET_LEAKP_USED_THIS_EPOCH(klass);
assert(LEAKP_USED_THIS_EPOCH(klass), "invariant");
}
assert(method != NULL, "invariant");
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
return (METHOD_ID(klass, method));
}
// XXX
//inline traceid JfrTraceId::use(const PackageEntry* package, bool leakp /* false */) {
//inline traceid JfrTraceId::use(const PackageEntry* package) {
// assert(package != NULL, "invariant");
// return set_used_and_get_shifted(package, leakp);
// return set_used_and_get(package);
//}
inline traceid JfrTraceId::use(const ClassLoaderData* cld, bool leakp /* false */) {
inline traceid JfrTraceId::use(const ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
return cld->is_anonymous() ? 0 : set_used_and_get_shifted(cld, leakp);
return cld->is_anonymous() ? 0 : set_used_and_get(cld);
}
inline void JfrTraceId::set_leakp(const Method* method) {
assert(method != NULL, "invariant");
const Klass* const klass = method->method_holder();
assert(klass != NULL, "invariant");
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
SET_LEAKP(klass);
SET_METHOD_LEAKP(method);
}
inline bool JfrTraceId::in_visible_set(const Klass* klass) {
......@@ -106,10 +110,10 @@ inline bool JfrTraceId::is_jdk_jfr_event(const Klass* k) {
inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(IS_NOT_AN_EVENT_KLASS(klass), "invariant");
SET_TAG(klass, JDK_JFR_EVENT_KLASS);
// assert(IS_NOT_AN_EVENT_KLASS(klass), "invariant");
SET_JDK_JFR_EVENT_KLASS(klass);
assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant");
assert(IS_NOT_AN_EVENT_SUB_KLASS(klass), "invariant");
// assert(IS_NOT_AN_EVENT_SUB_KLASS(klass), "invariant");
}
inline bool JfrTraceId::is_jdk_jfr_event_sub(const Klass* k) {
......@@ -119,8 +123,8 @@ inline bool JfrTraceId::is_jdk_jfr_event_sub(const Klass* k) {
inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
assert(k != NULL, "invariant");
if (IS_NOT_AN_EVENT_KLASS(k)) {
SET_TAG(k, JDK_JFR_EVENT_SUBKLASS);
if (IS_NOT_AN_EVENT_SUB_KLASS(k)) {
SET_JDK_JFR_EVENT_SUBKLASS(k);
}
assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
}
......@@ -141,8 +145,8 @@ inline bool JfrTraceId::is_event_host(const Klass* k) {
inline void JfrTraceId::tag_as_event_host(const Klass* k) {
assert(k != NULL, "invariant");
SET_TAG(k, EVENT_HOST_KLASS);
SET_EVENT_HOST_KLASS(k);
assert(IS_EVENT_HOST_KLASS(k), "invariant");
}
#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTYPEIDS_INLINE_HPP
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/atomic.inline.hpp"
......@@ -31,10 +31,10 @@
#ifdef VM_LITTLE_ENDIAN
static const int low_offset = 0;
static const int leakp_offset = low_offset + 1;
static const int meta_offset = low_offset + 1;
#else
static const int low_offset = 7;
static const int leakp_offset = low_offset - 1;
static const int meta_offset = low_offset - 1;
#endif
inline void set_bits(jbyte bits, jbyte* const dest) {
......@@ -92,17 +92,28 @@ inline void set_traceid_mask(jbyte mask, traceid* dest) {
set_mask(mask, ((jbyte*)dest) + low_offset);
}
inline void set_leakp_traceid_bits(jbyte bits, traceid* dest) {
set_bits(bits, ((jbyte*)dest) + leakp_offset);
inline void set_meta_bits(jbyte bits, jbyte* const dest) {
assert(dest != NULL, "invariant");
*dest |= bits;
}
inline void set_traceid_meta_bits(jbyte bits, traceid* dest) {
set_meta_bits(bits, ((jbyte*)dest) + meta_offset);
}
inline void set_leakp_traceid_bits_cas(jbyte bits, traceid* dest) {
set_bits_cas(bits, ((jbyte*)dest) + leakp_offset);
inline void set_meta_mask(jbyte mask, jbyte* const dest) {
assert(dest != NULL, "invariant");
*dest &= mask;
}
inline void set_leakp_traceid_mask(jbyte mask, traceid* dest) {
set_mask(mask, ((jbyte*)dest) + leakp_offset);
inline void set_traceid_meta_mask(jbyte mask, traceid* dest) {
set_meta_mask(mask, ((jbyte*)dest) + meta_offset);
}
#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
// only used by a single thread with no visibility requirements
inline void clear_meta_bits(jbyte bits, jbyte* const dest) {
assert(dest != NULL, "invariant");
*dest ^= bits;
}
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
......@@ -32,12 +32,8 @@
#define METHOD_USED_BIT (USED_BIT << 2)
#define EPOCH_1_SHIFT 0
#define EPOCH_2_SHIFT 1
#define LEAKP_SHIFT 8
#define USED_EPOCH_1_BIT (USED_BIT << EPOCH_1_SHIFT)
#define USED_EPOCH_2_BIT (USED_BIT << EPOCH_2_SHIFT)
#define LEAKP_USED_EPOCH_1_BIT (USED_EPOCH_1_BIT << LEAKP_SHIFT)
#define LEAKP_USED_EPOCH_2_BIT (USED_EPOCH_2_BIT << LEAKP_SHIFT)
#define METHOD_USED_EPOCH_1_BIT (METHOD_USED_BIT << EPOCH_1_SHIFT)
#define METHOD_USED_EPOCH_2_BIT (METHOD_USED_BIT << EPOCH_2_SHIFT)
#define METHOD_AND_CLASS_IN_USE_BITS (METHOD_USED_BIT | USED_BIT)
......@@ -75,14 +71,6 @@ class JfrTraceIdEpoch : AllStatic {
return _epoch_state ? USED_EPOCH_1_BIT : USED_EPOCH_2_BIT;
}
static traceid leakp_in_use_this_epoch_bit() {
return _epoch_state ? LEAKP_USED_EPOCH_2_BIT : LEAKP_USED_EPOCH_1_BIT;
}
static traceid leakp_in_use_prev_epoch_bit() {
return _epoch_state ? LEAKP_USED_EPOCH_1_BIT : LEAKP_USED_EPOCH_2_BIT;
}
static traceid method_in_use_this_epoch_bit() {
return _epoch_state ? METHOD_USED_EPOCH_2_BIT : METHOD_USED_EPOCH_1_BIT;
}
......
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,13 +22,8 @@
*
*/
#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
#define SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/support/jfrKlassExtension.hpp"
#include "utilities/globalDefinitions.hpp"
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
/**
*
......@@ -38,150 +33,111 @@
*
* LeakProfiler subsystem gets its own byte and uses the same tagging scheme but is shifted up 8.
*
* We also tag the individual method by using the TraceFlag field,
* We also tag individual methods by using the _trace_flags field,
* (see jfr/support/jfrTraceIdExtension.hpp for details)
*
*/
// these are defined in jfr/support/jfrKlassExtension.hpp
// the following are defined in jfr/support/jfrKlassExtension.hpp
//
// #define JDK_JFR_EVENT_SUBKLASS 16
// #define JDK_JFR_EVENT_KLASS 32
// #define EVENT_HOST_KLASS 64
#define IS_JDK_JFR_EVENT_SUBKLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_SUBKLASS)) != 0)
#define ANY_USED_BITS (USED_EPOCH_2_BIT | \
USED_EPOCH_1_BIT | \
METHOD_USED_EPOCH_2_BIT | \
METHOD_USED_EPOCH_1_BIT | \
LEAKP_USED_EPOCH_2_BIT | \
LEAKP_USED_EPOCH_1_BIT)
#define TRACE_ID_META_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | ANY_USED_BITS)
#define ANY_EVENT (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
#define IS_JDK_JFR_EVENT_KLASS(ptr) (((ptr)->trace_id() & JDK_JFR_EVENT_KLASS) != 0)
#define IS_EVENT_HOST_KLASS(ptr) (((ptr)->trace_id() & EVENT_HOST_KLASS) != 0)
#define IS_NOT_AN_EVENT_KLASS(ptr) (!IS_EVENT_KLASS(ptr))
#define IS_NOT_AN_EVENT_SUB_KLASS(ptr) (!IS_JDK_JFR_EVENT_SUBKLASS(ptr))
#define IS_NOT_JDK_JFR_EVENT_KLASS(ptr) (!IS_JDK_JFR_EVENT_KLASS(ptr))
#define EVENT_FLAGS_MASK(ptr) (((ptr)->trace_id() & ANY_EVENT) != 0)
#define UNEVENT(ptr) ((ptr)->set_trace_id(((ptr)->trace_id()) & ~ANY_EVENT))
#define TRACE_ID_SHIFT 16
#define TRACE_ID_MASKED(id) (id & ~TRACE_ID_META_BITS)
#define TRACE_ID_VALUE(id) (TRACE_ID_MASKED(id) >> TRACE_ID_SHIFT)
#define TRACE_ID_MASKED_PTR(ptr) (TRACE_ID_MASKED((ptr)->trace_id()))
#define TRACE_ID_RAW(ptr) ((ptr)->trace_id())
#define TRACE_ID(ptr) (TRACE_ID_MASKED_PTR(ptr) >> TRACE_ID_SHIFT)
#define METHOD_ID(kls, meth) (TRACE_ID_MASKED_PTR(kls) | (meth)->method_idnum())
#define SET_TAG(ptr, tag) (set_traceid_bits(tag, (ptr)->trace_id_addr()))
#define SET_LEAKP_TAG(ptr, tag) (set_leakp_traceid_bits(tag, (ptr)->trace_id_addr()))
#define SET_TAG_CAS(ptr, tag) (set_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
#define SET_LEAKP_TAG_CAS(ptr, tag) (set_leakp_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
#define IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::in_use_this_epoch_bit())
#define IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::in_use_prev_epoch_bit())
#define LEAKP_IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::leakp_in_use_this_epoch_bit())
#define LEAKP_IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::leakp_in_use_prev_epoch_bit())
#define METHOD_IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
#define METHOD_IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
#define UNUSE_THIS_EPOCH_MASK (~(IN_USE_THIS_EPOCH_BIT))
#define UNUSE_PREV_EPOCH_MASK (~(IN_USE_PREV_EPOCH_BIT))
#define LEAKP_UNUSE_THIS_EPOCH_MASK UNUSE_THIS_EPOCH_MASK
#define LEAKP_UNUSE_PREV_EPOCH_MASK UNUSE_PREV_EPOCH_MASK
#define UNUSE_METHOD_THIS_EPOCH_MASK (~(METHOD_IN_USE_THIS_EPOCH_BIT))
#define UNUSE_METHOD_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT))
#define LEAKP_UNUSE_METHOD_THIS_EPOCH_MASK (~(UNUSE_METHOD_THIS_EPOCH_MASK))
#define LEAKP_UNUSE_METHOD_PREV_EPOCH_MASK (~UNUSE_METHOD_PREV_EPOCH_MASK))
#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK (~(METHOD_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT))
#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
#define SET_USED_THIS_EPOCH(ptr) (SET_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
#define SET_USED_PREV_EPOCH(ptr) (SET_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
#define SET_LEAKP_USED_THIS_EPOCH(ptr) (SET_LEAKP_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
#define SET_LEAKP_USED_PREV_EPOCH(ptr) (SET_LEAKP_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (SET_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
#define USED_THIS_EPOCH(ptr) (((ptr)->trace_id() & IN_USE_THIS_EPOCH_BIT) != 0)
#define NOT_USED_THIS_EPOCH(ptr) (!USED_THIS_EPOCH(ptr))
#define USED_PREV_EPOCH(ptr) (((ptr)->trace_id() & IN_USE_PREV_EPOCH_BIT) != 0)
#define NOT_USED_PREV_EPOCH(ptr) (!USED_PREV_EPOCH(ptr))
#define USED_ANY_EPOCH(ptr) (((ptr)->trace_id() & (USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)) != 0)
#define NOT_USED_ANY_EPOCH(ptr) (!USED_ANY_EPOCH(ptr))
#define LEAKP_USED_THIS_EPOCH(ptr) (((ptr)->trace_id() & LEAKP_IN_USE_THIS_EPOCH_BIT) != 0)
#define LEAKP_NOT_USED_THIS_EPOCH(ptr) (!LEAKP_USED_THIS_EPOCH(ptr))
#define LEAKP_USED_PREV_EPOCH(ptr) (((ptr)->trace_id() & LEAKP_IN_USE_PREV_EPOCH_BIT) != 0)
#define LEAKP_NOT_USED_PREV_EPOCH(ptr) (!LEAKP_USED_PREV_EPOCH(ptr))
#define LEAKP_USED_ANY_EPOCH(ptr) (((ptr)->trace_id() & (LEAKP_USED_EPOCH_2_BIT | LEAKP_USED_EPOCH_1_BIT)) != 0)
#define LEAKP_NOT_USED_ANY_EPOCH(ptr) (!LEAKP_USED_ANY_EPOCH(ptr))
#define ANY_USED_THIS_EPOCH(ptr) (((ptr)->trace_id() & (LEAKP_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT)) != 0)
#define ANY_NOT_USED_THIS_EPOCH(ptr) (!ANY_USED_THIS_EPOCH(ptr))
#define ANY_USED_PREV_EPOCH(ptr) (((ptr)->trace_id() & (LEAKP_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT)) != 0)
#define ANY_NOT_USED_PREV_EPOCH(ptr) (!ANY_USED_PREV_EPOCH(ptr))
#define METHOD_USED_THIS_EPOCH(kls) (((kls)->trace_id() & METHOD_IN_USE_THIS_EPOCH_BIT) != 0)
#define METHOD_NOT_USED_THIS_EPOCH(kls) (!METHOD_USED_THIS_EPOCH(kls))
#define METHOD_USED_PREV_EPOCH(kls) (((kls)->trace_id() & METHOD_IN_USE_PREV_EPOCH_BIT) != 0)
#define METHOD_NOT_USED_PREV_EPOCH(kls) (!METHOD_USED_PREV_EPOCH(kls))
#define METHOD_USED_ANY_EPOCH(kls) (((kls)->trace_id() & (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)) != 0)
#define METHOD_NOT_USED_ANY_EPOCH(kls) (!METHOD_USED_ANY_EPOCH(kls))
#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) == \
METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) != 0)
#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) == \
METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) != 0)
#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls) ((METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls)) != 0)
#define METHOD_AND_CLASS_NOT_USED_ANY_EPOCH(kls) (!METHOD_AND_CLASS_USED_ANY_EPOCH(kls))
#define LEAKP_METHOD_IN_USE_THIS_EPOCH (LEAKP_IN_USE_THIS_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)
#define LEAKP_METHOD_IN_USE_PREV_EPOCH (LEAKP_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)
#define LEAKP_METHOD_USED_THIS_EPOCH(ptr) ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_THIS_EPOCH) == \
LEAKP_METHOD_IN_USE_THIS_EPOCH) != 0)
#define LEAKP_METHOD_NOT_USED_THIS_EPOCH(kls) (!LEAKP_METHOD_USED_THIS_EPOCH(kls))
#define LEAKP_METHOD_USED_PREV_EPOCH(ptr) ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_PREV_EPOCH) == \
LEAKP_METHOD_IN_USE_PREV_EPOCH) != 0)
#define LEAKP_METHOD_NOT_USED_PREV_EPOCH(kls) (!LEAKP_METHOD_USED_PREV_EPOCH(kls))
#define UNUSE_THIS_EPOCH(ptr) (set_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
#define UNUSE_PREV_EPOCH(ptr) (set_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
#define UNUSE_METHOD_THIS_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
#define UNUSE_METHOD_PREV_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
#define LEAKP_UNUSE_THIS_EPOCH(ptr) (set_leakp_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
#define LEAKP_UNUSE_PREV_EPOCH(ptr) (set_leakp_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
#define LEAKP_UNUSE_METHOD_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
#define LEAKP_UNUSE_METHOD_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
#define ANY_USED(ptr) (((ptr)->trace_id() & ANY_USED_BITS) != 0)
#define ANY_NOT_USED(ptr) (!ANY_USED(ptr))
#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
#define LEAKP_UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
#define LEAKP_UNUSE_METHODS_AND_CLASS_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
#define METHOD_FLAG_USED_THIS_EPOCH(m) ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
#define METHOD_FLAG_NOT_USED_THIS_EPOCH(m) (!METHOD_FLAG_USED_THIS_EPOCH(m))
#define SET_METHOD_FLAG_USED_THIS_EPOCH(m) ((m)->set_trace_flag((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
#define METHOD_FLAG_USED_PREV_EPOCH(m) ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit()))
#define METHOD_FLAG_NOT_USED_PREV_EPOCH(m) (!METHOD_FLAG_USED_PREV_EPOCH(m))
#define METHOD_FLAG_USED_ANY_EPOCH(m) ((METHOD_FLAG_USED_THIS_EPOCH(m) || METHOD_FLAG_USED_PREV_EPOCH(m)) != 0)
#define METHOD_FLAG_NOT_USED_ANY_EPOCH(m) ((METHOD_FLAG_NOT_USED_THIS_EPOCH(m) && METHOD_FLAG_NOT_USED_PREV_EPOCH(m)) != 0)
#define CLEAR_METHOD_FLAG_USED_THIS_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit(), (m)->trace_flags_addr()))
#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit(), (m)->trace_flags_addr()))
#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
// static bits
#define META_SHIFT 8
#define LEAKP_META_BIT USED_BIT
#define LEAKP_BIT (LEAKP_META_BIT << META_SHIFT)
#define TRANSIENT_META_BIT (USED_BIT << 1)
#define TRANSIENT_BIT (TRANSIENT_META_BIT << META_SHIFT)
#define SERIALIZED_META_BIT (USED_BIT << 2)
#define SERIALIZED_BIT (SERIALIZED_META_BIT << META_SHIFT)
#define TRACE_ID_SHIFT 16
#define METHOD_ID_NUM_MASK ((1 << TRACE_ID_SHIFT) - 1)
#define META_BITS (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT)
#define EVENT_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
#define USED_BITS (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
#define ALL_BITS (META_BITS | EVENT_BITS | USED_BITS)
#define ALL_BITS_MASK (~(ALL_BITS))
// epoch relative bits
#define IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::in_use_this_epoch_bit())
#define IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::in_use_prev_epoch_bit())
#define METHOD_IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
#define METHOD_IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
#define METHOD_FLAG_IN_USE_THIS_EPOCH_BIT ((jbyte)IN_USE_THIS_EPOCH_BIT)
#define METHOD_FLAG_IN_USE_PREV_EPOCH_BIT ((jbyte)IN_USE_PREV_EPOCH_BIT)
// operators
#define TRACE_ID_RAW(ptr) ((ptr)->trace_id())
#define TRACE_ID(ptr) (TRACE_ID_RAW(ptr) >> TRACE_ID_SHIFT)
#define TRACE_ID_MASKED(ptr) (TRACE_ID_RAW(ptr) & ALL_BITS_MASK)
#define TRACE_ID_PREDICATE(ptr, bits) ((TRACE_ID_RAW(ptr) & bits) != 0)
#define TRACE_ID_TAG(ptr, bits) (set_traceid_bits(bits, (ptr)->trace_id_addr()))
#define TRACE_ID_TAG_CAS(ptr, bits) (set_traceid_bits_cas(bits, (ptr)->trace_id_addr()))
#define TRACE_ID_CLEAR(ptr, bits) (set_traceid_mask(bits, (ptr)->trace_id_addr()))
#define TRACE_ID_META_TAG(ptr, bits) (set_traceid_meta_bits(bits, (ptr)->trace_id_addr()))
#define TRACE_ID_META_CLEAR(ptr, bits) (set_traceid_meta_mask(bits, (ptr)->trace_id_addr()))
#define METHOD_ID(kls, method) (TRACE_ID_MASKED(kls) | (method)->orig_method_idnum())
#define METHOD_FLAG_PREDICATE(method, bits) ((method)->is_trace_flag_set(bits))
#define METHOD_FLAG_TAG(method, bits) (set_bits(bits, (method)->trace_flags_addr()))
#define METHOD_META_TAG(method, bits) (set_meta_bits(bits, (method)->trace_meta_addr()))
#define METHOD_FLAG_CLEAR(method, bits) (clear_bits_cas(bits, (method)->trace_flags_addr()))
#define METHOD_META_CLEAR(method, bits) (set_meta_mask(bits, (method)->trace_meta_addr()))
// predicates
#define USED_THIS_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_THIS_EPOCH_BIT)))
#define NOT_USED_THIS_EPOCH(ptr) (!(USED_THIS_EPOCH(ptr)))
#define USED_PREV_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_PREV_EPOCH_BIT)))
#define USED_ANY_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)))
#define METHOD_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_THIS_EPOCH_BIT)))
#define METHOD_NOT_USED_THIS_EPOCH(kls) (!(METHOD_USED_THIS_EPOCH(kls)))
#define METHOD_USED_PREV_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT)))
#define METHOD_USED_ANY_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls) (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
#define METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
#define METHOD_FLAG_NOT_USED_THIS_EPOCH(method) (!(METHOD_FLAG_USED_THIS_EPOCH(method)))
#define METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
// setters
#define SET_USED_THIS_EPOCH(ptr) (TRACE_ID_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
#define SET_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_TAG(method, METHOD_FLAG_IN_USE_THIS_EPOCH_BIT))
#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH(kls) (TRACE_ID_CLEAR(kls, CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK))
#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_CLEAR(method, METHOD_FLAG_IN_USE_PREV_EPOCH_BIT))
// types
#define IS_JDK_JFR_EVENT_KLASS(kls) (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_KLASS))
#define IS_JDK_JFR_EVENT_SUBKLASS(kls) (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_SUBKLASS))
#define IS_NOT_AN_EVENT_SUB_KLASS(kls) (!(IS_JDK_JFR_EVENT_SUBKLASS(kls)))
#define IS_EVENT_HOST_KLASS(kls) (TRACE_ID_PREDICATE(kls, EVENT_HOST_KLASS))
#define SET_JDK_JFR_EVENT_KLASS(kls) (TRACE_ID_TAG(kls, JDK_JFR_EVENT_KLASS))
#define SET_JDK_JFR_EVENT_SUBKLASS(kls) (TRACE_ID_TAG(kls, JDK_JFR_EVENT_SUBKLASS))
#define SET_EVENT_HOST_KLASS(kls) (TRACE_ID_TAG(kls, EVENT_HOST_KLASS))
#define EVENT_KLASS_MASK(kls) (TRACE_ID_RAW(kls) & EVENT_BITS)
// meta
#define META_MASK (~(SERIALIZED_META_BIT | TRANSIENT_META_BIT | LEAKP_META_BIT))
#define SET_LEAKP(ptr) (TRACE_ID_META_TAG(ptr, LEAKP_META_BIT))
#define IS_LEAKP(ptr) (TRACE_ID_PREDICATE(ptr, LEAKP_BIT))
#define SET_TRANSIENT(ptr) (TRACE_ID_META_TAG(ptr, TRANSIENT_META_BIT))
#define IS_SERIALIZED(ptr) (TRACE_ID_PREDICATE(ptr, SERIALIZED_BIT))
#define IS_NOT_SERIALIZED(ptr) (!(IS_SERIALIZED(ptr)))
#define SHOULD_TAG(ptr) (NOT_USED_THIS_EPOCH(ptr))
#define SHOULD_TAG_KLASS_METHOD(ptr) (METHOD_NOT_USED_THIS_EPOCH(ptr))
#define SET_SERIALIZED(ptr) (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
#define CLEAR_SERIALIZED(ptr) (TRACE_ID_META_CLEAR(ptr, META_MASK))
#define IS_METHOD_SERIALIZED(method) (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
#define IS_METHOD_LEAKP_USED(method) (METHOD_FLAG_PREDICATE(method, LEAKP_BIT))
#define METHOD_NOT_SERIALIZED(method) (!(IS_METHOD_SERIALIZED(method)))
#define SET_METHOD_LEAKP(method) (METHOD_META_TAG(method, LEAKP_META_BIT))
#define SET_METHOD_SERIALIZED(method) (METHOD_META_TAG(method, SERIALIZED_META_BIT))
#define CLEAR_METHOD_SERIALIZED(method) (METHOD_META_CLEAR(method, META_MASK))
#define CLEAR_LEAKP(ptr) (TRACE_ID_META_CLEAR(ptr, (~(LEAKP_META_BIT))))
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
......@@ -38,7 +38,7 @@ JfrChunkState::JfrChunkState() :
_start_nanos(0),
_previous_start_ticks(0),
_previous_start_nanos(0),
_previous_checkpoint_offset(0) {}
_last_checkpoint_offset(0) {}
JfrChunkState::~JfrChunkState() {
reset();
......@@ -49,15 +49,15 @@ void JfrChunkState::reset() {
JfrCHeapObj::free(_path, strlen(_path) + 1);
_path = NULL;
}
set_previous_checkpoint_offset(0);
set_last_checkpoint_offset(0);
}
void JfrChunkState::set_previous_checkpoint_offset(jlong offset) {
_previous_checkpoint_offset = offset;
void JfrChunkState::set_last_checkpoint_offset(int64_t offset) {
_last_checkpoint_offset = offset;
}
jlong JfrChunkState::previous_checkpoint_offset() const {
return _previous_checkpoint_offset;
int64_t JfrChunkState::last_checkpoint_offset() const {
return _last_checkpoint_offset;
}
jlong JfrChunkState::previous_start_ticks() const {
......
......@@ -37,7 +37,7 @@ class JfrChunkState : public JfrCHeapObj {
jlong _start_nanos;
jlong _previous_start_ticks;
jlong _previous_start_nanos;
jlong _previous_checkpoint_offset;
int64_t _last_checkpoint_offset;
void update_start_ticks();
void update_start_nanos();
......@@ -47,8 +47,8 @@ class JfrChunkState : public JfrCHeapObj {
JfrChunkState();
~JfrChunkState();
void reset();
jlong previous_checkpoint_offset() const;
void set_previous_checkpoint_offset(jlong offset);
int64_t last_checkpoint_offset() const;
void set_last_checkpoint_offset(int64_t offset);
jlong previous_start_ticks() const;
jlong previous_start_nanos() const;
jlong last_chunk_duration() const;
......
......@@ -87,7 +87,7 @@ void JfrChunkWriter::write_header(intptr_t metadata_offset) {
// Chunk size
this->write_be_at_offset((jlong)size_written(), CHUNK_SIZE_OFFSET);
// initial checkpoint event offset
this->write_be_at_offset(_chunkstate->previous_checkpoint_offset(), CHUNK_SIZE_OFFSET + (1 * FILEHEADER_SLOT_SIZE));
this->write_be_at_offset(_chunkstate->last_checkpoint_offset(), CHUNK_SIZE_OFFSET + (1 * FILEHEADER_SLOT_SIZE));
// metadata event offset
this->write_be_at_offset((jlong)metadata_offset, CHUNK_SIZE_OFFSET + (2 * FILEHEADER_SLOT_SIZE));
// start of chunk in nanos since epoch
......@@ -106,12 +106,12 @@ intptr_t JfrChunkWriter::size_written() const {
return this->is_valid() ? this->current_offset() : 0;
}
intptr_t JfrChunkWriter::previous_checkpoint_offset() const {
return _chunkstate->previous_checkpoint_offset();
int64_t JfrChunkWriter::last_checkpoint_offset() const {
return _chunkstate->last_checkpoint_offset();
}
void JfrChunkWriter::set_previous_checkpoint_offset(intptr_t offset) {
_chunkstate->set_previous_checkpoint_offset(offset);
void JfrChunkWriter::set_last_checkpoint_offset(int64_t offset) {
_chunkstate->set_last_checkpoint_offset(offset);
}
void JfrChunkWriter::time_stamp_chunk_now() {
......
......@@ -49,8 +49,8 @@ class JfrChunkWriter : public JfrChunkWriterBase {
JfrChunkWriter();
bool initialize();
intptr_t size_written() const;
intptr_t previous_checkpoint_offset() const;
void set_previous_checkpoint_offset(intptr_t offset);
int64_t last_checkpoint_offset() const;
void set_last_checkpoint_offset(int64_t offset);
void time_stamp_chunk_now();
};
......
......@@ -106,10 +106,6 @@ u4 JfrOptionSet::stackdepth() {
return _stack_depth;
}
static const u4 STACK_DEPTH_DEFAULT = 64;
static const u4 MIN_STACK_DEPTH = 1;
static const u4 MAX_STACK_DEPTH = 2048;
void JfrOptionSet::set_stackdepth(u4 depth) {
if (depth < MIN_STACK_DEPTH) {
_stack_depth = MIN_STACK_DEPTH;
......
......@@ -132,13 +132,13 @@ class RotationLock : public StackObj {
};
static intptr_t write_checkpoint_event_prologue(JfrChunkWriter& cw, u8 type_id) {
const intptr_t prev_cp_offset = cw.previous_checkpoint_offset();
const intptr_t prev_cp_relative_offset = 0 == prev_cp_offset ? 0 : prev_cp_offset - cw.current_offset();
const int64_t last_cp_offset = cw.last_checkpoint_offset();
const int64_t delta_to_last_checkpoint = 0 == last_cp_offset ? 0 : last_cp_offset - cw.current_offset();
cw.reserve(sizeof(u4));
cw.write<u8>(EVENT_CHECKPOINT);
cw.write(JfrTicks::now());
cw.write<jlong>((jlong)0);
cw.write<jlong>((jlong)prev_cp_relative_offset); // write previous checkpoint offset delta
cw.write((int64_t)0); // duration
cw.write(delta_to_last_checkpoint);
cw.write<bool>(false); // flushpoint
cw.write<u4>((u4)1); // nof types in this checkpoint
cw.write<u8>(type_id);
......@@ -177,7 +177,7 @@ class WriteCheckpointEvent : public StackObj {
_cw.write_padded_at_offset<u4>(number_of_elements, num_elements_offset);
_cw.write_padded_at_offset<u4>((u4)_cw.current_offset() - current_cp_offset, current_cp_offset);
// update writer with last checkpoint position
_cw.set_previous_checkpoint_offset(current_cp_offset);
_cw.set_last_checkpoint_offset(current_cp_offset);
return true;
}
};
......@@ -316,19 +316,16 @@ void JfrRecorderService::rotate(int msgs) {
vm_error = true;
prepare_for_vm_error_rotation();
}
if (msgs & (MSGBIT(MSG_STOP))) {
stop();
}
// action determined by chunkwriter state
if (!_chunkwriter.is_valid()) {
if (!_storage.control().to_disk()) {
in_memory_rotation();
return;
}
if (vm_error) {
} else if (vm_error) {
vm_error_rotation();
return;
} else {
chunk_rotation();
}
if (msgs & (MSGBIT(MSG_STOP))) {
stop();
}
chunk_rotation();
}
void JfrRecorderService::prepare_for_vm_error_rotation() {
......@@ -399,12 +396,6 @@ static void write_stacktrace_checkpoint(JfrStackTraceRepository& stack_trace_rep
WriteStackTraceCheckpoint write_stack_trace_checkpoint(chunkwriter, TYPE_STACKTRACE, write_stacktrace_repo);
write_stack_trace_checkpoint.process();
}
static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
object_sample_stacktrace.process();
}
static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
WriteStringPool write_string_pool(string_pool);
WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
......@@ -439,9 +430,7 @@ void JfrRecorderService::pre_safepoint_write() {
if (LeakProfiler::is_running()) {
// Exclusive access to the object sampler instance.
// The sampler is released (unlocked) later in post_safepoint_write.
ObjectSampler* const sampler = ObjectSampler::acquire();
assert(sampler != NULL, "invariant");
write_object_sample_stacktrace(sampler, _stack_trace_repository);
ObjectSampleCheckpoint::on_rotation(ObjectSampler::acquire(), _stack_trace_repository);
}
_storage.write();
}
......
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/vframe.hpp"
static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
assert(lhs_frames != NULL, "invariant");
assert(rhs_frames != NULL, "invariant");
if (length > 0) {
*lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing);
memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame));
}
}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
_method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
_method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
_next(NULL),
_frames(frames),
_id(0),
_hash(0),
_nr_of_frames(0),
_max_frames(max_frames),
_frames_ownership(false),
_reached_root(false),
_lineno(false),
_written(false) {}
JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
_next(next),
_frames(NULL),
_id(id),
_hash(trace._hash),
_nr_of_frames(trace._nr_of_frames),
_max_frames(trace._max_frames),
_frames_ownership(true),
_reached_root(trace._reached_root),
_lineno(trace._lineno),
_written(false) {
copy_frames(&_frames, trace._nr_of_frames, trace._frames);
}
JfrStackTrace::~JfrStackTrace() {
if (_frames_ownership) {
FREE_C_HEAP_ARRAY(JfrStackFrame, _frames, mtTracing);
}
}
template <typename Writer>
static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
w.write((u8)id);
w.write((u1)!reached_root);
w.write(nr_of_frames);
for (u4 i = 0; i < nr_of_frames; ++i) {
frames[i].write(w);
}
}
void JfrStackTrace::write(JfrChunkWriter& sw) const {
assert(!_written, "invariant");
write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
_written = true;
}
void JfrStackTrace::write(JfrCheckpointWriter& cpw) const {
write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
}
bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
}
bool JfrStackTrace::equals(const JfrStackTrace& rhs) const {
if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
return false;
}
for (u4 i = 0; i < _nr_of_frames; ++i) {
if (!_frames[i].equals(rhs._frames[i])) {
return false;
}
}
return true;
}
template <typename Writer>
static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
w.write((u8)methodid);
w.write((u4)line);
w.write((u4)bci);
w.write((u8)type);
}
void JfrStackFrame::write(JfrChunkWriter& cw) const {
write_frame(cw, _methodid, _line, _bci, _type);
}
void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
write_frame(cpw, _methodid, _line, _bci, _type);
}
class vframeStreamSamples : public vframeStreamCommon {
public:
// constructor that starts with sender of frame fr (top_frame)
vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
_stop_at_java_call_stub = stop_at_java_call_stub;
_frame = fr;
// We must always have a valid frame to start filling
bool filled_in = fill_from_frame();
assert(filled_in, "invariant");
}
void samples_next();
void stop() {}
};
// Solaris SPARC Compiler1 needs an additional check on the grandparent
// of the top_frame when the parent of the top_frame is interpreted and
// the grandparent is compiled. However, in this method we do not know
// the relationship of the current _frame relative to the top_frame so
// we implement a more broad sanity check. When the previous callee is
// interpreted and the current sender is compiled, we verify that the
// current sender is also walkable. If it is not walkable, then we mark
// the current vframeStream as at the end.
void vframeStreamSamples::samples_next() {
// handle frames with inlining
if (_mode == compiled_mode &&
vframeStreamCommon::fill_in_compiled_inlined_sender()) {
return;
}
// handle general case
u4 loop_count = 0;
u4 loop_max = MAX_STACK_DEPTH * 2;
do {
loop_count++;
// By the time we get here we should never see unsafe but better safe then segv'd
if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
_mode = at_end_mode;
return;
}
_frame = _frame.sender(&_reg_map);
} while (!fill_from_frame());
}
bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
vframeStreamSamples st(&thread, frame, false);
u4 count = 0;
_reached_root = true;
while (!st.at_end()) {
if (count >= _max_frames) {
_reached_root = false;
break;
}
const Method* method = st.method();
if (!method->is_valid_method()) {
// we throw away everything we've gathered in this sample since
// none of it is safe
return false;
}
const traceid mid = JfrTraceId::use(method);
int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
} else {
bci = st.bci();
}
const int lineno = method->line_number_from_bci(bci);
// Can we determine if it's inlined?
_hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
_frames[count] = JfrStackFrame(mid, bci, type, method);
st.samples_next();
count++;
}
_lineno = true;
_nr_of_frames = count;
return true;
}
void JfrStackFrame::resolve_lineno() const {
assert(_method, "no method pointer");
assert(_line == 0, "already have linenumber");
_line = _method->line_number_from_bci(_bci);
}
void JfrStackTrace::resolve_linenos() const {
for (unsigned int i = 0; i < _nr_of_frames; i++) {
_frames[i].resolve_lineno();
}
_lineno = true;
}
bool JfrStackTrace::record_safe(JavaThread* thread, int skip, StackWalkMode mode) {
bool success = false;
switch(mode) {
case WALK_BY_DEFAULT:
{
vframeStream vfs(thread);
success = fill_in(vfs, skip, mode);
break;
}
case WALK_BY_CURRENT_FRAME:
{
vframeStream vfs(thread, os::current_frame());
success = fill_in(vfs, skip, mode);
break;
}
default:
ShouldNotReachHere();
}
return success;
}
bool JfrStackTrace::fill_in(vframeStream& vfs, int skip, StackWalkMode mode) {
u4 count = 0;
_reached_root = true;
// Indicates whether the top frame is visited in this frames iteration.
// Top frame bci may be invalid and fill_in() will fix the top frame bci in a conservative way.
bool top_frame_visited = false;
for (int i = 0; i < skip; i++) {
if (vfs.at_end()) {
break;
}
// The top frame is in skip list.
// Mark top_frame_visited to avoid unnecessary top frame bci fixing.
if (!top_frame_visited) {
top_frame_visited = true;
}
vfs.next();
}
while (!vfs.at_end()) {
if (count >= _max_frames) {
_reached_root = false;
break;
}
const Method* method = vfs.method();
const traceid mid = JfrTraceId::use(method);
int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
// The top frame is in native.
// Mark top_frame_visited to avoid unnecessary top frame bci fixing.
if (!top_frame_visited) {
top_frame_visited = true;
}
}
else {
bci = vfs.bci();
// Hit the top frame and fix bci here.
if (!top_frame_visited) {
if (mode == WALK_BY_CURRENT_FRAME) {
// Only fix opto fast path allocation.
// All fast path allocations do not have cached event id.
if (!vfs.thread_ref()->jfr_thread_local()->has_cached_event_id()) {
assert(vfs.thread_ref()->jfr_thread_local()->has_cached_top_frame_bci(), "Invariant");
bci = vfs.thread_ref()->jfr_thread_local()->cached_top_frame_bci();
}
}
top_frame_visited = true;
}
}
// Can we determine if it's inlined?
_hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
_frames[count] = JfrStackFrame(mid, bci, type, method);
vfs.next();
count++;
}
_nr_of_frames = count;
return true;
}
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
class frame;
class JavaThread;
class JfrCheckpointWriter;
class JfrChunkWriter;
class Method;
enum StackWalkMode {
// walk stack by vframeStream vfs(thread).
WALK_BY_DEFAULT = 0,
// walk stack by vframeStream vfs(thread, os::current_frame()).
// It is only used in JIT runtime leaf call. In JIT runtime leaf call,
// last_java_sp is not maintained and WALK_BY_DEFAULT can not walk stack.
WALK_BY_CURRENT_FRAME
};
class JfrStackFrame {
friend class ObjectSampleCheckpoint;
private:
const Method* _method;
traceid _methodid;
mutable int _line;
int _bci;
u1 _type;
public:
JfrStackFrame(const traceid& id, int bci, int type, const Method* method);
JfrStackFrame(const traceid& id, int bci, int type, int lineno);
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
enum {
FRAME_INTERPRETER = 0,
FRAME_JIT,
FRAME_INLINE,
FRAME_NATIVE,
NUM_FRAME_TYPES
};
};
class JfrStackTrace : public JfrCHeapObj {
friend class JfrNativeSamplerCallback;
friend class JfrStackTraceRepository;
friend class ObjectSampleCheckpoint;
friend class ObjectSampler;
friend class OSThreadSampler;
friend class StackTraceResolver;
private:
const JfrStackTrace* _next;
JfrStackFrame* _frames;
traceid _id;
unsigned int _hash;
u4 _nr_of_frames;
u4 _max_frames;
bool _frames_ownership;
bool _reached_root;
mutable bool _lineno;
mutable bool _written;
const JfrStackTrace* next() const { return _next; }
bool should_write() const { return !_written; }
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
bool equals(const JfrStackTrace& rhs) const;
void set_id(traceid id) { _id = id; }
void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
void set_hash(unsigned int hash) { _hash = hash; }
void set_reached_root(bool reached_root) { _reached_root = reached_root; }
void resolve_linenos() const;
bool record_thread(JavaThread& thread, frame& frame);
bool record_safe(JavaThread* thread, int skip, StackWalkMode stack_walk_mode);
bool have_lineno() const { return _lineno; }
bool full_stacktrace() const { return _reached_root; }
JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next);
JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
~JfrStackTrace();
bool fill_in(vframeStream& vfs, int skip, StackWalkMode mode);
public:
unsigned int hash() const { return _hash; }
traceid id() const { return _id; }
};
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -24,66 +24,18 @@
#include "precompiled.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allocation.inline.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/task.hpp"
#include "runtime/vframe.hpp"
class vframeStreamSamples : public vframeStreamCommon {
public:
// constructor that starts with sender of frame fr (top_frame)
vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub);
void samples_next();
void stop() {}
};
vframeStreamSamples::vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
_stop_at_java_call_stub = stop_at_java_call_stub;
_frame = fr;
// We must always have a valid frame to start filling
bool filled_in = fill_from_frame();
assert(filled_in, "invariant");
}
// Solaris SPARC Compiler1 needs an additional check on the grandparent
// of the top_frame when the parent of the top_frame is interpreted and
// the grandparent is compiled. However, in this method we do not know
// the relationship of the current _frame relative to the top_frame so
// we implement a more broad sanity check. When the previous callee is
// interpreted and the current sender is compiled, we verify that the
// current sender is also walkable. If it is not walkable, then we mark
// the current vframeStream as at the end.
void vframeStreamSamples::samples_next() {
// handle frames with inlining
if (_mode == compiled_mode &&
vframeStreamCommon::fill_in_compiled_inlined_sender()) {
return;
}
static JfrStackTraceRepository* _instance = NULL;
// handle general case
int loop_count = 0;
int loop_max = MaxJavaStackTraceDepth * 2;
do {
loop_count++;
// By the time we get here we should never see unsafe but better safe then segv'd
if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
_mode = at_end_mode;
return;
}
_frame = _frame.sender(&_reg_map);
} while (!fill_from_frame());
JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
memset(_table, 0, sizeof(_table));
}
static JfrStackTraceRepository* _instance = NULL;
JfrStackTraceRepository& JfrStackTraceRepository::instance() {
return *_instance;
}
......@@ -94,15 +46,6 @@ JfrStackTraceRepository* JfrStackTraceRepository::create() {
return _instance;
}
void JfrStackTraceRepository::destroy() {
assert(_instance != NULL, "invarinat");
delete _instance;
_instance = NULL;
}
JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
memset(_table, 0, sizeof(_table));
}
class JfrFrameType : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
......@@ -122,100 +65,10 @@ bool JfrStackTraceRepository::initialize() {
return JfrSerializer::register_serializer(TYPE_FRAMETYPE, false, true, new JfrFrameType());
}
size_t JfrStackTraceRepository::clear() {
MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
if (_entries == 0) {
return 0;
}
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
while (stacktrace != NULL) {
JfrStackTraceRepository::StackTrace* next = stacktrace->next();
delete stacktrace;
stacktrace = next;
}
}
memset(_table, 0, sizeof(_table));
const size_t processed = _entries;
_entries = 0;
return processed;
}
traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
const size_t index = stacktrace._hash % TABLE_SIZE;
const StackTrace* table_entry = _table[index];
while (table_entry != NULL) {
if (table_entry->equals(stacktrace)) {
return table_entry->id();
}
table_entry = table_entry->next();
}
if (!stacktrace.have_lineno()) {
return 0;
}
traceid id = ++_next_id;
_table[index] = new StackTrace(id, stacktrace, _table[index]);
++_entries;
return id;
}
traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
traceid tid = instance().add_trace(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = instance().add_trace(stacktrace);
}
assert(tid != 0, "invariant");
return tid;
}
traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode mode) {
assert(thread == Thread::current(), "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
if (tl->has_cached_stack_trace()) {
return tl->cached_stack_trace_id();
}
if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
return 0;
}
JfrStackFrame* frames = tl->stackframes();
if (frames == NULL) {
// pending oom
return 0;
}
assert(frames != NULL, "invariant");
assert(tl->stackframes() == frames, "invariant");
return instance().record_for((JavaThread*)thread, skip, mode, frames, tl->stackdepth());
}
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame *frames, u4 max_frames) {
JfrStackTrace stacktrace(frames, max_frames);
return stacktrace.record_safe(thread, skip, false, mode) ? add(stacktrace) : 0;
}
traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(stacktrace->hash() != 0, "invariant");
return add(*stacktrace);
}
bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip, StackWalkMode mode) {
assert(thread == Thread::current(), "invariant");
assert(stacktrace != NULL, "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
if (cached_stacktrace_hash != 0) {
stacktrace->set_hash(cached_stacktrace_hash);
return true;
}
return stacktrace->record_safe(thread, skip, true, mode);
void JfrStackTraceRepository::destroy() {
assert(_instance != NULL, "invarinat");
delete _instance;
_instance = NULL;
}
size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
......@@ -223,9 +76,9 @@ size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
assert(_entries > 0, "invariant");
int count = 0;
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
JfrStackTrace* stacktrace = _table[i];
while (stacktrace != NULL) {
JfrStackTraceRepository::StackTrace* next = stacktrace->next();
JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
if (stacktrace->should_write()) {
stacktrace->write(sw);
++count;
......@@ -249,7 +102,7 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
traceid JfrStackTraceRepository::write(JfrCheckpointWriter& writer, traceid id, unsigned int hash) {
assert(JfrStacktrace_lock->owned_by_self(), "invariant");
const StackTrace* const trace = resolve_entry(hash, id);
const JfrStackTrace* const trace = lookup(hash, id);
assert(trace != NULL, "invariant");
assert(trace->hash() == hash, "invariant");
assert(trace->id() == id, "invariant");
......@@ -257,84 +110,105 @@ traceid JfrStackTraceRepository::write(JfrCheckpointWriter& writer, traceid id,
return id;
}
JfrStackTraceRepository::StackTrace::StackTrace(traceid id, const JfrStackTrace& trace, JfrStackTraceRepository::StackTrace* next) :
_next(next),
_frames(NULL),
_id(id),
_nr_of_frames(trace._nr_of_frames),
_hash(trace._hash),
_reached_root(trace._reached_root),
_written(false) {
if (_nr_of_frames > 0) {
_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, _nr_of_frames, mtTracing);
memcpy(_frames, trace._frames, _nr_of_frames * sizeof(JfrStackFrame));
}
}
JfrStackTraceRepository::StackTrace::~StackTrace() {
if (_frames != NULL) {
FREE_C_HEAP_ARRAY(JfrStackFrame, _frames, mtTracing);
}
void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
JfrFrameType fct;
writer.write_type(TYPE_FRAMETYPE);
fct.serialize(writer);
}
bool JfrStackTraceRepository::StackTrace::equals(const JfrStackTrace& rhs) const {
if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
return false;
size_t JfrStackTraceRepository::clear() {
MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
if (_entries == 0) {
return 0;
}
for (u4 i = 0; i < _nr_of_frames; ++i) {
if (!_frames[i].equals(rhs._frames[i])) {
return false;
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTrace* stacktrace = _table[i];
while (stacktrace != NULL) {
JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
delete stacktrace;
stacktrace = next;
}
}
return true;
memset(_table, 0, sizeof(_table));
const size_t processed = _entries;
_entries = 0;
return processed;
}
template <typename Writer>
static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
w.write((u8)id);
w.write((u1)!reached_root);
w.write(nr_of_frames);
for (u4 i = 0; i < nr_of_frames; ++i) {
frames[i].write(w);
traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode mode) {
assert(thread == Thread::current(), "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
if (tl->has_cached_stack_trace()) {
return tl->cached_stack_trace_id();
}
if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
return 0;
}
JfrStackFrame* frames = tl->stackframes();
if (frames == NULL) {
// pending oom
return 0;
}
assert(frames != NULL, "invariant");
assert(tl->stackframes() == frames, "invariant");
return instance().record_for((JavaThread*)thread, skip, mode, frames, tl->stackdepth());
}
void JfrStackTraceRepository::StackTrace::write(JfrChunkWriter& sw) const {
assert(!_written, "invariant");
write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
_written = true;
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame *frames, u4 max_frames) {
JfrStackTrace stacktrace(frames, max_frames);
return stacktrace.record_safe(thread, skip, mode) ? add(stacktrace) : 0;
}
void JfrStackTraceRepository::StackTrace::write(JfrCheckpointWriter& cpw) const {
write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
traceid tid = instance().add_trace(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = instance().add_trace(stacktrace);
}
assert(tid != 0, "invariant");
return tid;
}
// JfrStackFrame
bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
void JfrStackTraceRepository::record_and_cache(JavaThread* thread, int skip, StackWalkMode mode) {
assert(thread != NULL, "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
assert(!tl->has_cached_stack_trace(), "invariant");
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
stacktrace.record_safe(thread, skip, mode);
const unsigned int hash = stacktrace.hash();
if (hash != 0) {
tl->set_cached_stack_trace_id(instance().add(stacktrace), hash);
}
}
template <typename Writer>
static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
w.write((u8)methodid);
w.write((u4)line);
w.write((u4)bci);
w.write((u8)type);
}
traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
MutexLockerEx lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
const size_t index = stacktrace._hash % TABLE_SIZE;
const JfrStackTrace* table_entry = _table[index];
void JfrStackFrame::write(JfrChunkWriter& cw) const {
write_frame(cw, _methodid, _line, _bci, _type);
}
while (table_entry != NULL) {
if (table_entry->equals(stacktrace)) {
return table_entry->id();
}
table_entry = table_entry->next();
}
void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
write_frame(cpw, _methodid, _line, _bci, _type);
if (!stacktrace.have_lineno()) {
return 0;
}
traceid id = ++_next_id;
_table[index] = new JfrStackTrace(id, stacktrace, _table[index]);
++_entries;
return id;
}
// invariant is that the entry to be resolved actually exists in the table
const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entry(unsigned int hash, traceid id) const {
const JfrStackTrace* JfrStackTraceRepository::lookup(unsigned int hash, traceid id) const {
const size_t index = (hash % TABLE_SIZE);
const StackTrace* trace = _table[index];
const JfrStackTrace* trace = _table[index];
while (trace != NULL && trace->id() != id) {
trace = trace->next();
}
......@@ -343,149 +217,3 @@ const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entr
assert(trace->id() == id, "invariant");
return trace;
}
void JfrStackFrame::resolve_lineno() const {
assert(_method, "no method pointer");
assert(_line == 0, "already have linenumber");
_line = _method->line_number_from_bci(_bci);
_method = NULL;
}
void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
assert(frame_pos < _max_frames, "illegal frame_pos");
_frames[frame_pos] = frame;
}
void JfrStackTrace::resolve_linenos() const {
for(unsigned int i = 0; i < _nr_of_frames; i++) {
_frames[i].resolve_lineno();
}
_lineno = true;
}
bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp, StackWalkMode mode) {
assert(SafepointSynchronize::safepoint_safe(thread, thread->thread_state())
|| thread == Thread::current(), "Thread stack needs to be walkable");
bool success = false;
switch(mode) {
case WALK_BY_DEFAULT:
{
vframeStream vfs(thread);
success = fill_in(vfs, skip, leakp, mode);
break;
}
case WALK_BY_CURRENT_FRAME:
{
vframeStream vfs(thread, os::current_frame());
success = fill_in(vfs, skip, leakp, mode);
break;
}
default:
ShouldNotReachHere();
}
return success;
}
bool JfrStackTrace::fill_in(vframeStream& vfs, int skip, bool leakp, StackWalkMode mode) {
u4 count = 0;
_reached_root = true;
// Indicates whether the top frame is visited in this frames iteration.
// Top frame bci may be invalid and fill_in() will fix the top frame bci in a conservative way.
bool top_frame_visited = false;
for(int i = 0; i < skip; i++) {
if (vfs.at_end()) {
break;
}
// The top frame is in skip list.
// Mark top_frame_visited to avoid unnecessary top frame bci fixing.
if (!top_frame_visited) {
top_frame_visited = true;
}
vfs.next();
}
while (!vfs.at_end()) {
if (count >= _max_frames) {
_reached_root = false;
break;
}
const Method* method = vfs.method();
const traceid mid = JfrTraceId::use(method, leakp);
int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
// The top frame is in native.
// Mark top_frame_visited to avoid unnecessary top frame bci fixing.
if (!top_frame_visited) {
top_frame_visited = true;
}
} else {
bci = vfs.bci();
// Hit the top frame and fix bci here.
if (!top_frame_visited) {
if (mode == WALK_BY_CURRENT_FRAME) {
// Only fix opto fast path allocation.
// All fast path allocations do not have cached event id.
if (!vfs.thread_ref()->jfr_thread_local()->has_cached_event_id()) {
assert(vfs.thread_ref()->jfr_thread_local()->has_cached_top_frame_bci(), "Invariant");
bci = vfs.thread_ref()->jfr_thread_local()->cached_top_frame_bci();
}
}
top_frame_visited = true;
}
}
// Can we determine if it's inlined?
_hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
_frames[count] = JfrStackFrame(mid, bci, type, method);
vfs.next();
count++;
}
_nr_of_frames = count;
return true;
}
bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
vframeStreamSamples st(&thread, frame, false);
u4 count = 0;
_reached_root = true;
while (!st.at_end()) {
if (count >= _max_frames) {
_reached_root = false;
break;
}
const Method* method = st.method();
if (!method->is_valid_method()) {
// we throw away everything we've gathered in this sample since
// none of it is safe
return false;
}
const traceid mid = JfrTraceId::use(method);
int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
} else {
bci = st.bci();
}
const int lineno = method->line_number_from_bci(bci);
// Can we determine if it's inlined?
_hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
_frames[count] = JfrStackFrame(mid, bci, type, lineno);
st.samples_next();
count++;
}
_lineno = true;
_nr_of_frames = count;
return true;
}
void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
JfrFrameType fct;
writer.write_type(TYPE_FRAMETYPE);
fct.serialize(writer);
}
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,147 +22,53 @@
*
*/
#ifndef SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
#define SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
class frame;
class JavaThread;
class JfrCheckpointWriter;
class JfrChunkWriter;
class Method;
class vframeStream;
class JfrStackFrame {
private:
mutable const Method* _method;
traceid _methodid;
mutable int _line;
int _bci;
u1 _type;
public:
enum {
FRAME_INTERPRETER = 0,
FRAME_JIT,
FRAME_INLINE,
FRAME_NATIVE,
NUM_FRAME_TYPES
};
JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
_method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
_method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
};
enum StackWalkMode {
// walk stack by vframeStream vfs(thread).
WALK_BY_DEFAULT = 0,
// walk stack by vframeStream vfs(thread, os::current_frame()).
// It is only used in JIT runtime leaf call. In JIT runtime leaf call,
// last_java_sp is not maintained and WALK_BY_DEFAULT can not walk stack.
WALK_BY_CURRENT_FRAME
};
class JfrStackTrace : public StackObj {
friend class JfrStackTraceRepository;
private:
JfrStackFrame* _frames;
traceid _id;
u4 _nr_of_frames;
unsigned int _hash;
const u4 _max_frames;
bool _reached_root;
mutable bool _lineno;
bool fill_in(vframeStream& vfs, int skip, bool leakp, StackWalkMode mode);
public:
JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
_id(0),
_nr_of_frames(0),
_hash(0),
_reached_root(false),
_max_frames(max_frames),
_lineno(false) {}
bool record_thread(JavaThread& thread, frame& frame);
bool record_safe(JavaThread* thread, int skip, bool leakp, StackWalkMode stack_walk_mode);
void resolve_linenos() const;
void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
void set_hash(unsigned int hash) { _hash = hash; }
unsigned int hash() const { return _hash; }
void set_frame(u4 frame_pos, JfrStackFrame& frame);
void set_reached_root(bool reached_root) { _reached_root = reached_root; }
bool full_stacktrace() const { return _reached_root; }
bool have_lineno() const { return _lineno; }
};
class JfrStackTraceRepository : public JfrCHeapObj {
friend class JfrRecorder;
friend class JfrRecorderService;
friend class JfrThreadSampleClosure;
friend class ObjectSampleCheckpoint;
friend class ObjectSampler;
friend class WriteObjectSampleStacktrace;
class StackTrace : public JfrCHeapObj {
friend class JfrStackTrace;
friend class JfrStackTraceRepository;
private:
StackTrace* _next;
JfrStackFrame* _frames;
const traceid _id;
u4 _nr_of_frames;
unsigned int _hash;
bool _reached_root;
mutable bool _written;
unsigned int hash() const { return _hash; }
bool should_write() const { return !_written; }
public:
StackTrace(traceid id, const JfrStackTrace& trace, StackTrace* next);
~StackTrace();
traceid id() const { return _id; }
StackTrace* next() const { return _next; }
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
bool equals(const JfrStackTrace& rhs) const;
};
friend class StackTraceBlobInstaller;
friend class WriteStackTraceRepository;
private:
static const u4 TABLE_SIZE = 2053;
StackTrace* _table[TABLE_SIZE];
JfrStackTrace* _table[TABLE_SIZE];
traceid _next_id;
u4 _entries;
traceid add_trace(const JfrStackTrace& stacktrace);
static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
traceid record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame* frames, u4 max_frames);
size_t write_impl(JfrChunkWriter& cw, bool clear);
const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
static void write_metadata(JfrCheckpointWriter& cpw);
static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip, StackWalkMode mode);
JfrStackTraceRepository();
static JfrStackTraceRepository& instance();
static JfrStackTraceRepository* create();
bool initialize();
static void destroy();
static JfrStackTraceRepository& instance();
public:
static traceid add(const JfrStackTrace& stacktrace);
static traceid record(Thread* thread, int skip, StackWalkMode mode);
size_t write_impl(JfrChunkWriter& cw, bool clear);
static void write_metadata(JfrCheckpointWriter& cpw);
traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
size_t write(JfrChunkWriter& cw, bool clear);
size_t clear();
traceid add_trace(const JfrStackTrace& stacktrace);
static traceid add(const JfrStackTrace& stacktrace);
traceid record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame* frames, u4 max_frames);
const JfrStackTrace* lookup(unsigned int hash, traceid id) const;
public:
static traceid record(Thread* thread, int skip, StackWalkMode mode);
static void record_and_cache(JavaThread* thread, int skip, StackWalkMode mode);
};
#endif // SHARE_VM_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
......@@ -27,10 +27,6 @@
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrDoublyLinkedList.hpp"
#include "jfr/utilities/jfrIterator.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
template <typename T, template <typename> class RetrievalType, typename Callback>
class JfrMemorySpace : public JfrCHeapObj {
......@@ -107,62 +103,4 @@ class JfrMemorySpace : public JfrCHeapObj {
debug_only(bool in_free_list(const Type* t) const { return _free.in_list(t); })
};
// allocations are even multiples of the mspace min size
inline u8 align_allocation_size(u8 requested_size, size_t min_elem_size) {
assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
u8 alloc_size_bytes = min_elem_size;
while (requested_size > alloc_size_bytes) {
alloc_size_bytes <<= 1;
}
assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
return alloc_size_bytes;
}
template <typename T, template <typename> class RetrievalType, typename Callback>
T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
const u8 aligned_size_bytes = align_allocation_size(size, _min_elem_size);
void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
if (allocation == NULL) {
return NULL;
}
T* const t = new (allocation) T;
assert(t != NULL, "invariant");
if (!t->initialize(sizeof(T), aligned_size_bytes)) {
JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
return NULL;
}
return t;
}
template <typename T, template <typename> class RetrievalType, typename Callback>
void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
assert(t != NULL, "invariant");
assert(!_free.in_list(t), "invariant");
assert(!_full.in_list(t), "invariant");
assert(t != NULL, "invariant");
JfrCHeapObj::free(t, t->total_size());
}
template <typename Mspace>
class MspaceLock {
private:
Mspace* _mspace;
public:
MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
~MspaceLock() { _mspace->unlock(); }
};
template <typename Mspace>
class ReleaseOp : public StackObj {
private:
Mspace* _mspace;
Thread* _thread;
bool _release_full;
public:
typedef typename Mspace::Type Type;
ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : _mspace(mspace), _thread(thread), _release_full(release_full) {}
bool process(Type* t);
size_t processed() const { return 0; }
};
#endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
......@@ -26,6 +26,7 @@
#define SHARE_VM_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
#include "runtime/os.hpp"
template <typename T, template <typename> class RetrievalType, typename Callback>
JfrMemorySpace<T, RetrievalType, Callback>::
......@@ -69,6 +70,42 @@ bool JfrMemorySpace<T, RetrievalType, Callback>::initialize() {
return true;
}
// allocations are even multiples of the mspace min size
static inline size_t align_allocation_size(size_t requested_size, size_t min_elem_size) {
assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
u8 alloc_size_bytes = min_elem_size;
while (requested_size > alloc_size_bytes) {
alloc_size_bytes <<= 1;
}
assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
return (size_t)alloc_size_bytes;
}
template <typename T, template <typename> class RetrievalType, typename Callback>
inline T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
const size_t aligned_size_bytes = align_allocation_size(size, _min_elem_size);
void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
if (allocation == NULL) {
return NULL;
}
T* const t = new (allocation) T;
assert(t != NULL, "invariant");
if (!t->initialize(sizeof(T), aligned_size_bytes)) {
JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
return NULL;
}
return t;
}
template <typename T, template <typename> class RetrievalType, typename Callback>
inline void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
assert(t != NULL, "invariant");
assert(!_free.in_list(t), "invariant");
assert(!_full.in_list(t), "invariant");
assert(t != NULL, "invariant");
JfrCHeapObj::free(t, t->total_size());
}
template <typename T, template <typename> class RetrievalType, typename Callback>
inline void JfrMemorySpace<T, RetrievalType, Callback>::release_full(T* t) {
assert(is_locked(), "invariant");
......@@ -122,6 +159,15 @@ inline void JfrMemorySpace<T, RetrievalType, Callback>
}
}
template <typename Mspace, typename Callback>
static inline Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, Callback* cb) {
Mspace* const mspace = new Mspace(buffer_size, limit, cache_count, cb);
if (mspace != NULL) {
mspace->initialize();
}
return mspace;
}
template <typename Mspace>
inline size_t size_adjustment(size_t size, Mspace* mspace) {
assert(mspace != NULL, "invariant");
......@@ -173,6 +219,15 @@ inline typename Mspace::Type* mspace_allocate_to_full(size_t size, Mspace* mspac
return t;
}
template <typename Mspace>
class MspaceLock {
private:
Mspace* _mspace;
public:
MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
~MspaceLock() { _mspace->unlock(); }
};
template <typename Mspace>
inline typename Mspace::Type* mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread);
......@@ -343,6 +398,20 @@ inline void process_free_list(Processor& processor, Mspace* mspace, jfr_iter_dir
process_free_list_iterator_control<Processor, Mspace, typename Mspace::Iterator>(processor, mspace, direction);
}
template <typename Mspace>
class ReleaseOp : public StackObj {
private:
Mspace* _mspace;
Thread* _thread;
bool _release_full;
public:
typedef typename Mspace::Type Type;
ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) :
_mspace(mspace), _thread(thread), _release_full(release_full) {}
bool process(Type* t);
size_t processed() const { return 0; }
};
template <typename Mspace>
inline bool ReleaseOp<Mspace>::process(typename Mspace::Type* t) {
assert(t != NULL, "invariant");
......
......@@ -36,6 +36,7 @@
#define JDK_JFR_EVENT_SUBKLASS 16
#define JDK_JFR_EVENT_KLASS 32
#define EVENT_HOST_KLASS 64
#define EVENT_RESERVED 128
#define IS_EVENT_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)) != 0)
#define ON_KLASS_CREATION(k, p, t) if (IS_EVENT_KLASS(k)) JfrEventClassTransformer::on_klass_creation(k, p, t)
......
......@@ -25,13 +25,13 @@
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/storage/jfrStorage.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/os.hpp"
......@@ -46,7 +46,7 @@ JfrThreadLocal::JfrThreadLocal() :
_shelved_buffer(NULL),
_stackframes(NULL),
_trace_id(JfrTraceId::assign_thread_id()),
_thread_cp(),
_thread(),
_data_lost(0),
_stack_trace_id(max_julong),
_user_time(0),
......@@ -66,17 +66,17 @@ u8 JfrThreadLocal::add_data_lost(u8 value) {
return _data_lost;
}
bool JfrThreadLocal::has_thread_checkpoint() const {
return _thread_cp.valid();
bool JfrThreadLocal::has_thread_blob() const {
return _thread.valid();
}
void JfrThreadLocal::set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
assert(!_thread_cp.valid(), "invariant");
_thread_cp = ref;
void JfrThreadLocal::set_thread_blob(const JfrBlobHandle& ref) {
assert(!_thread.valid(), "invariant");
_thread = ref;
}
const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
return _thread_cp;
const JfrBlobHandle& JfrThreadLocal::thread_blob() const {
return _thread;
}
static void send_java_thread_start_event(JavaThread* jt) {
......@@ -99,10 +99,12 @@ static void send_java_thread_end_events(traceid id, JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(Thread::current() == jt, "invariant");
assert(jt->jfr_thread_local()->trace_id() == id, "invariant");
EventThreadEnd event;
event.set_thread(id);
event.commit();
JfrThreadCPULoadEvent::send_event_for_thread(jt);
if (JfrRecorder::is_recording()) {
EventThreadEnd event;
event.set_thread(id);
event.commit();
JfrThreadCPULoadEvent::send_event_for_thread(jt);
}
}
void JfrThreadLocal::release(JfrThreadLocal* tl, Thread* t) {
......@@ -131,10 +133,10 @@ void JfrThreadLocal::on_exit(Thread* t) {
assert(t != NULL, "invariant");
JfrThreadLocal * const tl = t->jfr_thread_local();
assert(!tl->is_dead(), "invariant");
if (JfrRecorder::is_recording()) {
if (t->is_Java_thread()) {
send_java_thread_end_events(tl->thread_id(), (JavaThread*)t);
}
if (t->is_Java_thread()) {
JavaThread* const jt = (JavaThread*)t;
ObjectSampleCheckpoint::on_thread_exit(jt);
send_java_thread_end_events(tl->thread_id(), jt);
}
release(tl, Thread::current()); // because it could be that Thread::current() != t
}
......
......@@ -25,7 +25,7 @@
#ifndef SHARE_VM_JFR_SUPPORT_JFRTHREADLOCAL_HPP
#define SHARE_VM_JFR_SUPPORT_JFRTHREADLOCAL_HPP
#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
#include "jfr/utilities/jfrBlob.hpp"
#include "jfr/utilities/jfrTypes.hpp"
class JavaThread;
......@@ -41,7 +41,7 @@ class JfrThreadLocal {
JfrBuffer* _shelved_buffer;
mutable JfrStackFrame* _stackframes;
mutable traceid _trace_id;
JfrCheckpointBlobHandle _thread_cp;
JfrBlobHandle _thread;
u8 _data_lost;
traceid _stack_trace_id;
jlong _user_time;
......@@ -276,9 +276,9 @@ class JfrThreadLocal {
_cached_event_id = MaxJfrEventId;
}
bool has_thread_checkpoint() const;
void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
const JfrCheckpointBlobHandle& thread_checkpoint() const;
bool has_thread_blob() const;
void set_thread_blob(const JfrBlobHandle& handle);
const JfrBlobHandle& thread_blob() const;
static void on_start(Thread* t);
static void on_exit(Thread* t);
......
......@@ -43,39 +43,46 @@
class JfrTraceFlag {
private:
mutable jbyte _flags;
mutable jshort _flags;
public:
JfrTraceFlag() : _flags(0) {}
explicit JfrTraceFlag(jbyte flags) : _flags(flags) {}
void set_flag(jbyte flag) const {
_flags |= flag;
bool is_set(jshort flag) const {
return (_flags & flag) != 0;
}
void clear_flag(jbyte flag) const {
_flags &= (~flag);
jshort flags() const {
return _flags;
}
jbyte flags() const { return _flags; }
bool is_set(jbyte flag) const {
return (_flags & flag) != 0;
void set_flags(jshort flags) const {
_flags = flags;
}
jbyte* const flags_addr() const {
return &_flags;
jbyte* flags_addr() const {
return (jbyte*)&_flags;
}
jbyte* meta_addr() const {
return ((jbyte*)&_flags) + 1;
}
};
#define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
#define DEFINE_TRACE_FLAG_ACCESSOR \
void set_trace_flag(jbyte flag) const { \
_trace_flags.set_flag(flag); \
bool is_trace_flag_set(jshort flag) const { \
return _trace_flags.is_set(flag); \
} \
jbyte trace_flags() const { \
jshort trace_flags() const { \
return _trace_flags.flags(); \
} \
bool is_trace_flag_set(jbyte flag) const { \
return _trace_flags.is_set(flag); \
void set_trace_flags(jshort flags) const { \
_trace_flags.set_flags(flags); \
} \
jbyte* const trace_flags_addr() const { \
jbyte* trace_flags_addr() const { \
return _trace_flags.flags_addr(); \
} \
jbyte* trace_meta_addr() const { \
return _trace_flags.meta_addr(); \
}
#define ARRAY_OBJECT_SIZE_PLACE_HOLDER 0x1111baba
......
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -23,58 +23,32 @@
*/
#include "precompiled.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/utilities/jfrBlob.hpp"
JfrCheckpointBlob::JfrCheckpointBlob(const u1* checkpoint, size_t size) :
_checkpoint(JfrCHeapObj::new_array<u1>(size)),
_size(size),
JfrBlob::JfrBlob(const u1* checkpoint, size_t size) :
_data(JfrCHeapObj::new_array<u1>(size)),
_next(),
_size(size),
_written(false) {
assert(checkpoint != NULL, "invariant");
assert(_checkpoint != NULL, "invariant");
memcpy(const_cast<u1*>(_checkpoint), checkpoint, size);
}
JfrCheckpointBlob::~JfrCheckpointBlob() {
JfrCHeapObj::free(const_cast<u1*>(_checkpoint), _size);
assert(_data != NULL, "invariant");
memcpy(const_cast<u1*>(_data), checkpoint, size);
}
const JfrCheckpointBlobHandle& JfrCheckpointBlob::next() const {
return _next;
JfrBlob::~JfrBlob() {
JfrCHeapObj::free(const_cast<u1*>(_data), _size);
}
void JfrCheckpointBlob::write_this(JfrCheckpointWriter& writer) const {
writer.bytes(_checkpoint, _size);
}
void JfrCheckpointBlob::exclusive_write(JfrCheckpointWriter& writer) const {
void JfrBlob::reset_write_state() const {
if (!_written) {
write_this(writer);
_written = true;
}
if (_next.valid()) {
_next->exclusive_write(writer);
}
}
void JfrCheckpointBlob::write(JfrCheckpointWriter& writer) const {
write_this(writer);
if (_next.valid()) {
_next->write(writer);
}
}
void JfrCheckpointBlob::reset_write_state() const {
if (_written) {
_written = false;
return;
}
_written = false;
if (_next.valid()) {
_next->reset_write_state();
}
}
void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) {
void JfrBlob::set_next(const JfrBlobHandle& ref) {
if (_next == ref) {
return;
}
......@@ -86,8 +60,8 @@ void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) {
_next = ref;
}
JfrCheckpointBlobHandle JfrCheckpointBlob::make(const u1* checkpoint, size_t size) {
const JfrCheckpointBlob* cp_blob = new JfrCheckpointBlob(checkpoint, size);
assert(cp_blob != NULL, "invariant");
return JfrCheckpointBlobReference::make(cp_blob);
JfrBlobHandle JfrBlob::make(const u1* data, size_t size) {
const JfrBlob* const blob = new JfrBlob(data, size);
assert(blob != NULL, "invariant");
return JfrBlobReference::make(blob);
}
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,38 +22,50 @@
*
*/
#ifndef SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
#define SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
#ifndef SHARE_JFR_UTILITIES_JFRBLOB_HPP
#define SHARE_JFR_UTILITIES_JFRBLOB_HPP
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrRefCountPointer.hpp"
class JfrCheckpointBlob;
class JfrCheckpointWriter;
class JfrBlob;
typedef RefCountPointer<JfrBlob, MultiThreadedRefCounter> JfrBlobReference;
typedef RefCountHandle<JfrBlobReference> JfrBlobHandle;
typedef RefCountPointer<JfrCheckpointBlob, MultiThreadedRefCounter> JfrCheckpointBlobReference;
typedef RefCountHandle<JfrCheckpointBlobReference> JfrCheckpointBlobHandle;
class JfrCheckpointBlob : public JfrCHeapObj {
class JfrBlob : public JfrCHeapObj {
template <typename, typename>
friend class RefCountPointer;
private:
const u1* _checkpoint;
const u1* const _data;
JfrBlobHandle _next;
const size_t _size;
JfrCheckpointBlobHandle _next;
mutable bool _written;
JfrCheckpointBlob(const u1* checkpoint, size_t size);
~JfrCheckpointBlob();
const JfrCheckpointBlobHandle& next() const;
void write_this(JfrCheckpointWriter& writer) const;
JfrBlob(const u1* data, size_t size);
~JfrBlob();
public:
void write(JfrCheckpointWriter& writer) const;
void exclusive_write(JfrCheckpointWriter& writer) const;
void set_next(const JfrBlobHandle& ref);
void reset_write_state() const;
void set_next(const JfrCheckpointBlobHandle& ref);
static JfrCheckpointBlobHandle make(const u1* checkpoint, size_t size);
static JfrBlobHandle make(const u1* data, size_t size);
template <typename Writer>
void write(Writer& writer) const {
writer.bytes(_data, _size);
if (_next.valid()) {
_next->write(writer);
}
}
template <typename Writer>
void exclusive_write(Writer& writer) const {
if (_written) {
return;
}
writer.bytes(_data, _size);
_written = true;
if (_next.valid()) {
_next->exclusive_write(writer);
}
}
};
#endif // SHARE_VM_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
#endif // SHARE_JFR_UTILITIES_JFRBLOB_HPP
......@@ -25,13 +25,13 @@
#ifndef SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP
#define SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP
#include "memory/allocation.inline.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
template <typename T>
class JfrBasicHashtableEntry {
class JfrBasicHashtableEntry : public JfrCHeapObj {
private:
typedef JfrBasicHashtableEntry<T> Entry;
Entry* _next;
......@@ -39,8 +39,8 @@ class JfrBasicHashtableEntry {
uintptr_t _hash;
public:
JfrBasicHashtableEntry(uintptr_t hash, const T& data) : _next(NULL), _literal(data), _hash(hash) {}
uintptr_t hash() const { return _hash; }
void set_hash(uintptr_t hash) { _hash = hash; }
T literal() const { return _literal; }
T* literal_addr() { return &_literal; }
void set_literal(T s) { _literal = s; }
......@@ -92,10 +92,7 @@ class JfrBasicHashtable : public CHeapObj<mtTracing> {
--_number_of_entries;
}
void free_buckets() {
if (NULL != _buckets) {
FREE_C_HEAP_ARRAY(Bucket, _buckets, mtTracing);
_buckets = NULL;
}
FREE_C_HEAP_ARRAY(Bucket, _buckets, mtTracing);
}
TableEntry* bucket(size_t i) { return _buckets[i].get_entry();}
TableEntry** bucket_addr(size_t i) { return _buckets[i].entry_addr(); }
......@@ -110,18 +107,18 @@ class JfrBasicHashtable : public CHeapObj<mtTracing> {
};
template <typename IdType, typename Entry, typename T>
class AscendingId : public CHeapObj<mtTracing> {
class AscendingId : public JfrCHeapObj {
private:
IdType _id;
public:
AscendingId() : _id(0) {}
// callbacks
void assign_id(Entry* entry) {
void on_link(Entry* entry) {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(++_id);
}
bool equals(const T& data, uintptr_t hash, const Entry* entry) {
bool on_equals(uintptr_t hash, const Entry* entry) {
assert(entry->hash() == hash, "invariant");
return true;
}
......@@ -129,18 +126,16 @@ class AscendingId : public CHeapObj<mtTracing> {
// IdType must be scalar
template <typename T, typename IdType>
class Entry : public JfrBasicHashtableEntry<T> {
class JfrHashtableEntry : public JfrBasicHashtableEntry<T> {
public:
JfrHashtableEntry(uintptr_t hash, const T& data) : JfrBasicHashtableEntry<T>(hash, data), _id(0) {}
typedef IdType ID;
void init() { _id = 0; }
ID id() const { return _id; }
void set_id(ID id) { _id = id; }
void set_value(const T& value) { this->set_literal(value); }
T& value() const { return *const_cast<Entry*>(this)->literal_addr();}
const T* value_addr() const { return const_cast<Entry*>(this)->literal_addr(); }
void set_id(ID id) const { _id = id; }
T& value() const { return *const_cast<JfrHashtableEntry*>(this)->literal_addr();}
const T* value_addr() const { return const_cast<JfrHashtableEntry*>(this)->literal_addr(); }
private:
ID _id;
mutable ID _id;
};
template <typename T, typename IdType, template <typename, typename> class Entry,
......@@ -149,29 +144,28 @@ template <typename T, typename IdType, template <typename, typename> class Entry
class HashTableHost : public JfrBasicHashtable<T> {
public:
typedef Entry<T, IdType> HashEntry;
HashTableHost() : _callback(new Callback()) {}
HashTableHost(Callback* cb) : JfrBasicHashtable<T>(TABLE_SIZE, sizeof(HashEntry)), _callback(cb) {}
HashTableHost(size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(new Callback()) {}
HashTableHost(Callback* cb, size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(cb) {}
~HashTableHost() {
this->clear_entries();
this->free_buckets();
}
// direct insert assumes non-existing entry
HashEntry& put(const T& data, uintptr_t hash);
HashEntry& put(uintptr_t hash, const T& data);
// lookup entry, will put if not found
HashEntry& lookup_put(const T& data, uintptr_t hash) {
HashEntry* entry = lookup_only(data, hash);
return entry == NULL ? put(data, hash) : *entry;
HashEntry& lookup_put(uintptr_t hash, const T& data) {
HashEntry* entry = lookup_only(hash);
return entry == NULL ? put(hash, data) : *entry;
}
// read-only lookup
HashEntry* lookup_only(const T& query, uintptr_t hash);
HashEntry* lookup_only(uintptr_t hash);
// id retrieval
IdType id(const T& data, uintptr_t hash) {
IdType id(uintptr_t hash, const T& data) {
assert(data != NULL, "invariant");
const HashEntry& entry = lookup_put(data, hash);
const HashEntry& entry = lookup_put(hash, data);
assert(entry.id() > 0, "invariant");
return entry.id();
}
......@@ -190,34 +184,35 @@ class HashTableHost : public JfrBasicHashtable<T> {
void free_entry(HashEntry* entry) {
assert(entry != NULL, "invariant");
JfrBasicHashtable<T>::unlink_entry(entry);
FREE_C_HEAP_ARRAY(char, entry, mtTracing);
_callback->on_unlink(entry);
delete entry;
}
private:
Callback* _callback;
size_t index_for(uintptr_t hash) { return this->hash_to_index(hash); }
HashEntry* new_entry(const T& data, uintptr_t hash);
HashEntry* new_entry(uintptr_t hash, const T& data);
void add_entry(size_t index, HashEntry* new_entry) {
assert(new_entry != NULL, "invariant");
_callback->assign_id(new_entry);
_callback->on_link(new_entry);
assert(new_entry->id() > 0, "invariant");
JfrBasicHashtable<T>::add_entry(index, new_entry);
}
};
template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(const T& data, uintptr_t hash) {
assert(lookup_only(data, hash) == NULL, "use lookup_put()");
HashEntry* const entry = new_entry(data, hash);
Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(uintptr_t hash, const T& data) {
assert(lookup_only(hash) == NULL, "use lookup_put()");
HashEntry* const entry = new_entry(hash, data);
add_entry(index_for(hash), entry);
return *entry;
}
template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(const T& query, uintptr_t hash) {
Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(uintptr_t hash) {
HashEntry* entry = (HashEntry*)this->bucket(index_for(hash));
while (entry != NULL) {
if (entry->hash() == hash && _callback->equals(query, hash, entry)) {
if (entry->hash() == hash && _callback->on_equals(hash, entry)) {
return entry;
}
entry = (HashEntry*)entry->next();
......@@ -269,13 +264,10 @@ void HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::clear_entries() {
}
template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(const T& data, uintptr_t hash) {
Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(uintptr_t hash, const T& data) {
assert(sizeof(HashEntry) == this->entry_size(), "invariant");
HashEntry* const entry = (HashEntry*) NEW_C_HEAP_ARRAY2(char, this->entry_size(), mtTracing, CURRENT_PC);
entry->init();
entry->set_hash(hash);
entry->set_value(data);
entry->set_next(NULL);
HashEntry* const entry = new HashEntry(hash, data);
assert(entry != NULL, "invariant");
assert(0 == entry->id(), "invariant");
return entry;
}
......
......@@ -26,13 +26,22 @@
#define SHARE_VM_JFR_UTILITIES_JFRTYPES_HPP
#include "jfrfiles/jfrEventIds.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
typedef u8 traceid;
typedef int fio_fd;
const int invalid_fd = -1;
const jlong invalid_offset = -1;
const u4 STACK_DEPTH_DEFAULT = 64;
const u4 MIN_STACK_DEPTH = 1;
const u4 MAX_STACK_DEPTH = 2048;
inline int compare_traceid(const traceid& lhs, const traceid& rhs) {
return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
}
inline int sort_traceid(traceid* lhs, traceid* rhs) {
return compare_traceid(*lhs, *rhs);
}
enum EventStartTime {
UNTIMED,
......
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,31 +22,30 @@
*
*/
#ifndef SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
#define SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
#ifndef SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
#define SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allocation.hpp"
template <typename WriterImpl, u4 ID>
class JfrArtifactWriterHost : public StackObj {
class JfrTypeWriterHost : public StackObj {
private:
WriterImpl _impl;
JfrCheckpointWriter* _writer;
JfrCheckpointContext _ctx;
jlong _count_offset;
int64_t _count_offset;
int _count;
bool _skip_header;
public:
JfrArtifactWriterHost(JfrCheckpointWriter* writer,
JfrArtifactSet* artifacts,
bool class_unload,
bool skip_header = false) : _impl(writer, artifacts, class_unload),
_writer(writer),
_ctx(writer->context()),
_count(0),
_skip_header(skip_header) {
JfrTypeWriterHost(JfrCheckpointWriter* writer,
bool class_unload = false,
bool skip_header = false) : _impl(writer, class_unload),
_writer(writer),
_ctx(writer->context()),
_count(0),
_skip_header(skip_header) {
assert(_writer != NULL, "invariant");
if (!_skip_header) {
_writer->write_type((JfrTypeId)ID);
......@@ -54,7 +53,7 @@ class JfrArtifactWriterHost : public StackObj {
}
}
~JfrArtifactWriterHost() {
~JfrTypeWriterHost() {
if (_count == 0) {
// nothing written, restore context for rewind
_writer->set_context(_ctx);
......@@ -75,34 +74,31 @@ class JfrArtifactWriterHost : public StackObj {
void add(int count) { _count += count; }
};
typedef int(*artifact_write_operation)(JfrCheckpointWriter*, JfrArtifactSet*, const void*);
typedef int(*type_write_operation)(JfrCheckpointWriter*, const void*);
template <typename T, artifact_write_operation op>
class JfrArtifactWriterImplHost {
template <typename T, type_write_operation op>
class JfrTypeWriterImplHost {
private:
JfrCheckpointWriter* _writer;
JfrArtifactSet* _artifacts;
bool _class_unload;
public:
typedef T Type;
JfrArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
_writer(writer), _artifacts(artifacts), _class_unload(class_unload) {}
JfrTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) : _writer(writer) {}
int operator()(T const& value) {
return op(this->_writer, this->_artifacts, value);
return op(this->_writer, value);
}
};
template <typename T, typename Predicate, artifact_write_operation op>
class JfrPredicatedArtifactWriterImplHost : public JfrArtifactWriterImplHost<T, op> {
template <typename T, typename Predicate, type_write_operation op>
class JfrPredicatedTypeWriterImplHost : public JfrTypeWriterImplHost<T, op> {
private:
Predicate _predicate;
typedef JfrArtifactWriterImplHost<T, op> Parent;
typedef JfrTypeWriterImplHost<T, op> Parent;
public:
JfrPredicatedArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
Parent(writer, artifacts, class_unload), _predicate(class_unload) {}
JfrPredicatedTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) :
Parent(writer), _predicate(class_unload) {}
int operator()(T const& value) {
return _predicate(value) ? Parent::operator()(value) : 0;
}
};
#endif // SHARE_VM_JFR_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
#endif // SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
......@@ -170,7 +170,7 @@ inline u1* WriterHost<BE, IE, WriterPolicyImpl>::ensure_size(size_t requested) {
}
if (this->available_size() < requested + size_safety_cushion) {
if (!this->accommodate(this->used_size(), requested + size_safety_cushion)) {
this->cancel();
assert(!this->is_valid(), "invariant");
return NULL;
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册