From 6585eb9ceefe7395469fdf618514149b368bc34b Mon Sep 17 00:00:00 2001 From: Denghui Dong Date: Wed, 15 Jul 2020 19:10:32 +0800 Subject: [PATCH] Revert "[JFR] add support for opto object allocations sampling" This reverts commit 7a01e5e279da3491a94d39f936c776de3a4ac263. --- src/share/vm/gc_interface/allocTracer.cpp | 1 + src/share/vm/gc_interface/allocTracer.hpp | 10 +- .../vm/gc_interface/allocTracer.inline.hpp | 120 ----------- src/share/vm/gc_interface/collectedHeap.hpp | 10 - .../vm/gc_interface/collectedHeap.inline.hpp | 14 +- src/share/vm/jfr/dcmd/jfrDcmds.cpp | 26 +-- src/share/vm/jfr/dcmd/jfrDcmds.hpp | 10 - src/share/vm/jfr/jni/jfrJniMethod.cpp | 20 +- src/share/vm/jfr/jni/jfrJniMethod.hpp | 4 - .../vm/jfr/jni/jfrJniMethodRegistration.cpp | 2 - .../leakprofiler/sampling/objectSampler.cpp | 2 +- .../vm/jfr/objectprofiler/objectProfiler.cpp | 89 -------- .../vm/jfr/objectprofiler/objectProfiler.hpp | 43 ---- src/share/vm/jfr/recorder/access/jfrFlush.cpp | 4 +- src/share/vm/jfr/recorder/access/jfrFlush.hpp | 6 +- .../vm/jfr/recorder/access/jfrOptionSet.cpp | 46 ----- .../vm/jfr/recorder/access/jfrOptionSet.hpp | 6 - .../jfr/recorder/access/jfrStackTraceMark.cpp | 11 +- .../vm/jfr/recorder/access/jfrThreadData.cpp | 6 +- .../vm/jfr/recorder/access/jfrThreadData.hpp | 72 +------ .../checkpoint/constant/jfrTagSet.cpp | 15 +- src/share/vm/jfr/recorder/jfrEventSetting.cpp | 10 - src/share/vm/jfr/recorder/jfrEventSetting.hpp | 2 - .../vm/jfr/recorder/service/jfrEvent.hpp | 3 +- .../stacktrace/jfrStackTraceRepository.cpp | 66 +----- .../stacktrace/jfrStackTraceRepository.hpp | 21 +- src/share/vm/opto/macro.cpp | 126 +----------- src/share/vm/opto/macro.hpp | 8 - src/share/vm/opto/runtime.cpp | 44 ---- src/share/vm/opto/runtime.hpp | 5 - src/share/vm/precompiled/precompiled.hpp | 2 - src/share/vm/runtime/thread.cpp | 1 - src/share/vm/runtime/vframe.hpp | 2 - src/share/vm/trace/traceBackend.hpp | 2 - src/share/vm/trace/traceMacros.hpp | 28 --- src/share/vm/trace/traceevents.xml | 13 +- src/share/vm/trace/tracetypes.xml | 1 - .../TestOptoObjectAllocationsSampling.java | 192 ------------------ .../java/testlibrary/jfr/EventNames.java | 2 - 39 files changed, 44 insertions(+), 1001 deletions(-) delete mode 100644 src/share/vm/gc_interface/allocTracer.inline.hpp delete mode 100644 src/share/vm/jfr/objectprofiler/objectProfiler.cpp delete mode 100644 src/share/vm/jfr/objectprofiler/objectProfiler.hpp delete mode 100644 test/jfr/event/objectsprofiling/TestOptoObjectAllocationsSampling.java diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp index f7a73d2c7..55cc6e0ef 100644 --- a/src/share/vm/gc_interface/allocTracer.cpp +++ b/src/share/vm/gc_interface/allocTracer.cpp @@ -22,6 +22,7 @@ * */ +#include "precompiled.hpp" #include "gc_implementation/shared/gcId.hpp" #include "gc_interface/allocTracer.hpp" #include "trace/tracing.hpp" diff --git a/src/share/vm/gc_interface/allocTracer.hpp b/src/share/vm/gc_interface/allocTracer.hpp index b519b1be4..7b9dcbd75 100644 --- a/src/share/vm/gc_interface/allocTracer.hpp +++ b/src/share/vm/gc_interface/allocTracer.hpp @@ -25,22 +25,14 @@ #ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP #define SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP -#include "gc_implementation/shared/gcId.hpp" +#include "memory/allocation.hpp" #include "runtime/handles.hpp" -#include "utilities/globalDefinitions.hpp" class AllocTracer : AllStatic { - private: - static void send_opto_array_allocation_event(KlassHandle klass, oop obj,size_t alloc_size, Thread* thread); - static void send_opto_instance_allocation_event(KlassHandle klass, oop obj, Thread* thread); public: static void send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread); static void send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread); static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId); - static void opto_slow_allocation_enter(bool is_array, Thread* thread); - static void opto_slow_allocation_leave(bool is_array, Thread* thread); - static void send_slow_allocation_event(KlassHandle klass, oop obj,size_t alloc_size, Thread* thread); - static void send_opto_fast_allocation_event(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread); }; #endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */ diff --git a/src/share/vm/gc_interface/allocTracer.inline.hpp b/src/share/vm/gc_interface/allocTracer.inline.hpp deleted file mode 100644 index 0f72b0d94..000000000 --- a/src/share/vm/gc_interface/allocTracer.inline.hpp +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2019 Alibaba Group Holding Limited. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Alibaba designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -#ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP -#define SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP - -#include "trace/tracing.hpp" -#include "gc_implementation/shared/gcId.hpp" -#include "runtime/handles.hpp" -#include "utilities/globalDefinitions.hpp" -#include "gc_interface/allocTracer.hpp" -#include "trace/traceMacros.hpp" - -inline void AllocTracer::opto_slow_allocation_enter(bool is_array, Thread* thread) { - if (EnableJFR && - JfrOptionSet::sample_object_allocations() && - ObjectProfiler::enabled()) { - assert(thread != NULL, "Invariant"); - assert(thread->is_Java_thread(), "Invariant"); - thread->trace_data()->incr_alloc_count(1); - if (is_array) { - thread->trace_data()->set_cached_event_id(TraceOptoArrayObjectAllocationEvent); - } else { - thread->trace_data()->set_cached_event_id(TraceOptoInstanceObjectAllocationEvent); - } - } -} - -inline void AllocTracer::opto_slow_allocation_leave(bool is_array, Thread* thread) { -#ifndef PRODUCT - if (EnableJFR && - JfrOptionSet::sample_object_allocations()) { - TRACE_DATA* trace_data = thread->trace_data(); - assert(!trace_data->has_cached_event_id(), "Invariant"); - assert(trace_data->alloc_count_until_sample() >= trace_data->alloc_count(), "Invariant"); - } -#endif -} - -inline void AllocTracer::send_opto_array_allocation_event(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread) { - EventOptoArrayObjectAllocation event; - if (event.should_commit()) { - event.set_objectClass(klass()); - event.set_address(cast_from_oop(obj)); - event.set_allocationSize(alloc_size); - event.commit(); - } -} - -inline void AllocTracer::send_opto_instance_allocation_event(KlassHandle klass, oop obj, Thread* thread) { - EventOptoInstanceObjectAllocation event; - if (event.should_commit()) { - event.set_objectClass(klass()); - event.set_address(cast_from_oop(obj)); - event.commit(); - } -} - -inline void AllocTracer::send_slow_allocation_event(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread) { - if (EnableJFR && - JfrOptionSet::sample_object_allocations()) { - assert(thread != NULL, "Illegal parameter: thread is NULL"); - assert(thread == Thread::current(), "Invariant"); - if (thread->trace_data()->has_cached_event_id()) { - assert(thread->is_Java_thread(), "Only allow to be called from java thread"); - jlong alloc_count = thread->trace_data()->alloc_count(); - jlong alloc_count_until_sample = thread->trace_data()->alloc_count_until_sample(); - assert(alloc_count > 0 || alloc_count <= alloc_count_until_sample, "Invariant"); - if (alloc_count == alloc_count_until_sample) { - TraceEventId event_id = thread->trace_data()->cached_event_id(); - if (event_id ==TraceOptoArrayObjectAllocationEvent) { - send_opto_array_allocation_event(klass, obj, alloc_size, thread); - } else if(event_id == TraceOptoInstanceObjectAllocationEvent) { - send_opto_instance_allocation_event(klass, obj, thread); - } else { - ShouldNotReachHere(); - } - jlong interval = JfrOptionSet::object_allocations_sampling_interval(); - thread->trace_data()->incr_alloc_count_until_sample(interval); - } - thread->trace_data()->clear_cached_event_id(); - } - } -} - -inline void AllocTracer::send_opto_fast_allocation_event(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread) { - assert(EnableJFR && JfrOptionSet::sample_object_allocations(), "Invariant"); - assert(thread != NULL, "Invariant"); - assert(thread->is_Java_thread(), "Invariant"); - assert(!thread->trace_data()->has_cached_event_id(), "Invariant"); - - Klass* k = klass(); - if (k->oop_is_array()) { - send_opto_array_allocation_event(klass, obj, alloc_size, thread); - } else { - send_opto_instance_allocation_event(klass, obj, thread); - } - jlong interval = JfrOptionSet::object_allocations_sampling_interval(); - thread->trace_data()->incr_alloc_count_until_sample(interval); -} - -#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP */ diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp index 5ee4f8818..b53536568 100644 --- a/src/share/vm/gc_interface/collectedHeap.hpp +++ b/src/share/vm/gc_interface/collectedHeap.hpp @@ -26,7 +26,6 @@ #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP #include "gc_interface/gcCause.hpp" -#include "gc_interface/allocTracer.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "memory/allocation.hpp" #include "memory/barrierSet.hpp" @@ -324,15 +323,6 @@ class CollectedHeap : public CHeapObj { inline static void check_array_size(int size, int length, TRAPS); public: - // Implicit Jfr inline methods. - static void trace_slow_allocation(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread) { - AllocTracer::send_slow_allocation_event(klass, obj, alloc_size, thread); - } - - static void trace_allocation_outside_tlab(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) { - AllocTracer::send_allocation_outside_tlab_event(klass, obj, alloc_size, thread); - } - inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj); diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp index 7c8ccb7dc..39ff06164 100644 --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP +#include "gc_interface/allocTracer.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/threadLocalAllocBuffer.inline.hpp" #include "memory/universe.hpp" @@ -34,8 +35,6 @@ #include "runtime/thread.inline.hpp" #include "services/lowMemoryDetector.hpp" #include "utilities/copy.hpp" -#include "jfr/recorder/access/jfrOptionSet.hpp" -#include "jfr/objectprofiler/objectProfiler.hpp" // Inline allocation implementations. @@ -70,7 +69,7 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, "missing klass"); } -// Support for jvmti, jfr and dtrace +// Support for jvmti and dtrace inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { // support low memory notifications (no-op if not enabled) LowMemoryDetector::detect_low_memory_for_collected_pools(); @@ -84,9 +83,6 @@ inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { SharedRuntime::dtrace_object_alloc(obj, size); } } - - // support for jfr - CollectedHeap::trace_slow_allocation(klass, obj, size * HeapWordSize, Thread::current()); } void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, @@ -95,7 +91,7 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, post_allocation_setup_common(klass, obj); assert(Universe::is_bootstrapping() || !((oop)obj)->is_array(), "must not be an array"); - // notify jvmti, jfr and dtrace + // notify jvmti and dtrace post_allocation_notify(klass, (oop)obj, size); } @@ -110,7 +106,7 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass, post_allocation_setup_common(klass, obj); oop new_obj = (oop)obj; assert(new_obj->is_array(), "must be an array"); - // notify jvmti, jfr and dtrace (must be after length is set for dtrace) + // notify jvmti and dtrace (must be after length is set for dtrace) post_allocation_notify(klass, new_obj, new_obj->size()); } @@ -144,7 +140,7 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t si "Unexpected exception, will result in uninitialized storage"); THREAD->incr_allocated_bytes(size * HeapWordSize); - CollectedHeap::trace_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); + AllocTracer::send_allocation_outside_tlab_event(klass, result, size * HeapWordSize, THREAD); return result; } diff --git a/src/share/vm/jfr/dcmd/jfrDcmds.cpp b/src/share/vm/jfr/dcmd/jfrDcmds.cpp index 026899a12..293cb2b3e 100644 --- a/src/share/vm/jfr/dcmd/jfrDcmds.cpp +++ b/src/share/vm/jfr/dcmd/jfrDcmds.cpp @@ -502,7 +502,6 @@ void JfrStopFlightRecordingDCmd::execute(DCmdSource source, TRAPS) { JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), - _on_vm_start(false), _repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, NULL), _dump_path("dumppath", "Path to dump,.e.g \\\"My Dump path\\\"", "STRING", false, NULL), _stack_depth("stackdepth", "Stack Depth", "JLONG", false, "64"), @@ -511,9 +510,7 @@ JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* out _thread_buffer_size("thread_buffer_size", "Size of a thread buffer", "JLONG", false, "8192"), _memory_size("memorysize", "Overall memory size, ", "JLONG", false, "16777216"), _max_chunk_size("maxchunksize", "Size of an individual disk chunk", "JLONG", false, "12582912"), - _sample_threads("samplethreads", "Activate Thread sampling", "BOOLEAN", false, "true"), - _sample_object_allocations("sampleobjectallocations","object allocations sampling enable / disable", "BOOLEAN", false, "false"), - _object_allocations_sampling_interval("objectallocationssamplinginterval", "object allocations sampling interval", "JLONG", false, "1024") { + _sample_threads("samplethreads", "Activate Thread sampling", "BOOLEAN", false, "true") { _dcmdparser.add_dcmd_option(&_repository_path); _dcmdparser.add_dcmd_option(&_dump_path); _dcmdparser.add_dcmd_option(&_stack_depth); @@ -523,8 +520,6 @@ JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* out _dcmdparser.add_dcmd_option(&_memory_size); _dcmdparser.add_dcmd_option(&_max_chunk_size); _dcmdparser.add_dcmd_option(&_sample_threads); - _dcmdparser.add_dcmd_option(&_sample_object_allocations); - _dcmdparser.add_dcmd_option(&_object_allocations_sampling_interval); }; int JfrConfigureFlightRecorderDCmd::num_arguments() { @@ -555,8 +550,6 @@ void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) { Handle h_dcmd_instance(THREAD, dcmd); assert(h_dcmd_instance.not_null(), "invariant"); - jobject on_vm_start = JfrJavaSupport::new_java_lang_Boolean(_on_vm_start, CHECK); - jstring repository_path = NULL; if (_repository_path.is_set() && _repository_path.value() != NULL) { repository_path = JfrJavaSupport::new_string(_repository_path.value(), CHECK); @@ -602,27 +595,16 @@ void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) { sample_threads = JfrJavaSupport::new_java_lang_Boolean(_sample_threads.value(), CHECK); } - jobject sample_object_allocations = NULL; - if (_sample_object_allocations.is_set()) { - sample_object_allocations = JfrJavaSupport::new_java_lang_Boolean(_sample_object_allocations.value(), CHECK); - } - - jobject object_allocations_sampling_interval = NULL; - if (_object_allocations_sampling_interval.is_set()) { - object_allocations_sampling_interval = JfrJavaSupport::new_java_lang_Long(_object_allocations_sampling_interval.value(), CHECK); - } - static const char klass[] = "jdk/jfr/internal/dcmd/DCmdConfigure"; static const char method[] = "execute"; - static const char signature[] = "(Ljava/lang/Boolean;Ljava/lang/String;Ljava/lang/String;Ljava/lang/Integer;" + static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Integer;" "Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;" - "Ljava/lang/Long;Ljava/lang/Boolean;Ljava/lang/Boolean;Ljava/lang/Long;)Ljava/lang/String;"; + "Ljava/lang/Long;Ljava/lang/Boolean;)Ljava/lang/String;"; JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); execute_args.set_receiver(h_dcmd_instance); // params - execute_args.push_jobject(on_vm_start); execute_args.push_jobject(repository_path); execute_args.push_jobject(dump_path); execute_args.push_jobject(stack_depth); @@ -632,8 +614,6 @@ void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) { execute_args.push_jobject(memory_size); execute_args.push_jobject(max_chunk_size); execute_args.push_jobject(sample_threads); - execute_args.push_jobject(sample_object_allocations); - execute_args.push_jobject(object_allocations_sampling_interval); JfrJavaSupport::call_virtual(&execute_args, THREAD); handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); diff --git a/src/share/vm/jfr/dcmd/jfrDcmds.hpp b/src/share/vm/jfr/dcmd/jfrDcmds.hpp index 1ca8c9f20..035179cbd 100644 --- a/src/share/vm/jfr/dcmd/jfrDcmds.hpp +++ b/src/share/vm/jfr/dcmd/jfrDcmds.hpp @@ -136,11 +136,6 @@ class JfrRuntimeOptions; class JfrConfigureFlightRecorderDCmd : public DCmdWithParser { friend class JfrOptionSet; - private: - bool _on_vm_start; - void set_on_vm_start(bool on_vm_start) { - _on_vm_start = on_vm_start; - } protected: DCmdArgument _repository_path; DCmdArgument _dump_path; @@ -151,8 +146,6 @@ class JfrConfigureFlightRecorderDCmd : public DCmdWithParser { DCmdArgument _memory_size; DCmdArgument _max_chunk_size; DCmdArgument _sample_threads; - DCmdArgument _sample_object_allocations; - DCmdArgument _object_allocations_sampling_interval; public: JfrConfigureFlightRecorderDCmd(outputStream* output, bool heap); @@ -171,9 +164,6 @@ class JfrConfigureFlightRecorderDCmd : public DCmdWithParser { } static int num_arguments(); virtual void execute(DCmdSource source, TRAPS); - bool on_vm_start() const { - return _on_vm_start; - } }; class JfrUnlockCommercialFeaturesDCmd : public DCmd { diff --git a/src/share/vm/jfr/jni/jfrJniMethod.cpp b/src/share/vm/jfr/jni/jfrJniMethod.cpp index 0d1a33079..fd30a0dbb 100644 --- a/src/share/vm/jfr/jni/jfrJniMethod.cpp +++ b/src/share/vm/jfr/jni/jfrJniMethod.cpp @@ -43,7 +43,6 @@ #include "jfr/instrumentation/jfrEventClassTransformer.hpp" #include "jfr/instrumentation/jfrJvmtiAgent.hpp" #include "jfr/leakprofiler/leakProfiler.hpp" -#include "jfr/objectprofiler/objectProfiler.hpp" #include "jfr/utilities/jfrJavaLog.hpp" #include "jfr/utilities/jfrTimeConverter.hpp" #include "jfr/utilities/jfrTraceTime.hpp" @@ -114,15 +113,6 @@ NO_TRANSITION(void, jfr_set_enabled(JNIEnv* env, jobject jvm, jlong event_type_i LeakProfiler::stop(); } } - if (EventOptoInstanceObjectAllocation::eventId == event_type_id || - EventOptoArrayObjectAllocation::eventId == event_type_id) { - ThreadInVMfromNative transition(JavaThread::thread_from_jni_environment(env)); - if (JNI_TRUE == enabled) { - ObjectProfiler::start(event_type_id); - } else { - ObjectProfiler::stop(event_type_id); - } - } NO_TRANSITION_END NO_TRANSITION(void, jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong threshold)) @@ -157,14 +147,6 @@ NO_TRANSITION(void, jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size)) JfrOptionSet::set_memory_size(size); NO_TRANSITION_END -NO_TRANSITION(void, jfr_set_sample_object_allocations(JNIEnv* env, jobject jvm, jboolean sampleAllocations)) - JfrOptionSet::set_sample_object_allocations(sampleAllocations); -NO_TRANSITION_END - -NO_TRANSITION(void, jfr_set_object_allocations_sampling_interval(JNIEnv* env, jobject jvm, jlong interval)) - JfrOptionSet::set_object_allocations_sampling_interval(interval); -NO_TRANSITION_END - NO_TRANSITION(jboolean, jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks)) return JfrEventSetting::set_threshold(event_type_id, thresholdTicks) ? JNI_TRUE : JNI_FALSE; NO_TRANSITION_END @@ -249,7 +231,7 @@ JVM_ENTRY_NO_ENV(jlong, jfr_class_id(JNIEnv* env, jclass jvm, jclass jc)) JVM_END JVM_ENTRY_NO_ENV(jlong, jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip)) - return JfrStackTraceRepository::record(thread, skip, WALK_BY_DEFAULT); + return JfrStackTraceRepository::record(thread, skip); JVM_END JVM_ENTRY_NO_ENV(void, jfr_log(JNIEnv* env, jobject jvm, jint tag_set, jint level, jstring message)) diff --git a/src/share/vm/jfr/jni/jfrJniMethod.hpp b/src/share/vm/jfr/jni/jfrJniMethod.hpp index 2de6cfa01..cc18997e6 100644 --- a/src/share/vm/jfr/jni/jfrJniMethod.hpp +++ b/src/share/vm/jfr/jni/jfrJniMethod.hpp @@ -93,10 +93,6 @@ void JNICALL jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size); jboolean JNICALL jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks); -void JNICALL jfr_set_sample_object_allocations(JNIEnv* env, jobject jvm, jboolean sampleAllocations); - -void JNICALL jfr_set_object_allocations_sampling_interval(JNIEnv* env, jobject jvm, jlong interval); - void JNICALL jfr_store_metadata_descriptor(JNIEnv* env, jobject jvm, jbyteArray descriptor); jlong JNICALL jfr_id_for_thread(JNIEnv* env, jobject jvm, jobject t); diff --git a/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp b/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp index d9ca18801..2770912e1 100644 --- a/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp +++ b/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp @@ -62,8 +62,6 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) { (char*)"setThreadBufferSize", (char*)"(J)V", (void*)jfr_set_thread_buffer_size, (char*)"setMemorySize", (char*)"(J)V", (void*)jfr_set_memory_size, (char*)"setThreshold", (char*)"(JJ)Z", (void*)jfr_set_threshold, - (char*)"setSampleObjectAllocations", (char*)"(Z)V",(void*)jfr_set_sample_object_allocations, - (char*)"setObjectAllocationsSamplingInterval", (char*)"(J)V",(void*)jfr_set_object_allocations_sampling_interval, (char*)"storeMetadataDescriptor", (char*)"([B)V", (void*)jfr_store_metadata_descriptor, (char*)"getAllowedToDoEventRetransforms", (char*)"()Z", (void*)jfr_allow_event_retransforms, (char*)"isAvailable", (char*)"()Z", (void*)jfr_is_available, diff --git a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp index 8bbf89030..71e875183 100644 --- a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp @@ -68,7 +68,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) { traceid stack_trace_id = 0; unsigned int stack_trace_hash = 0; if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { - stack_trace_id = JfrStackTraceRepository::record(thread, 0, WALK_BY_DEFAULT, &stack_trace_hash); + stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash); thread->trace_data()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash); } diff --git a/src/share/vm/jfr/objectprofiler/objectProfiler.cpp b/src/share/vm/jfr/objectprofiler/objectProfiler.cpp deleted file mode 100644 index efd438c54..000000000 --- a/src/share/vm/jfr/objectprofiler/objectProfiler.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2019 Alibaba Group Holding Limited. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Alibaba designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -#include "runtime/vmThread.hpp" -#include "jfr/objectprofiler/objectProfiler.hpp" -#include "jfr/utilities/jfrTryLock.hpp" -#include "tracefiles/traceEventClasses.hpp" - -volatile jint ObjectProfiler::_enabled = JNI_FALSE; -bool ObjectProfiler::_sample_instance_obj_alloc = false; -bool ObjectProfiler::_sample_array_obj_alloc = false; -#ifndef PRODUCT -volatile int ObjectProfiler::_try_lock = 0; -#endif - -void ObjectProfiler::start(jlong event_id) { -#ifndef PRODUCT - JfrTryLock try_lock(&_try_lock); - assert(try_lock.has_lock(), "Not allow contention"); -#endif - if (EventOptoInstanceObjectAllocation::eventId == event_id) { - if (!_sample_instance_obj_alloc) { - _sample_instance_obj_alloc = true; - } - } else if (EventOptoArrayObjectAllocation::eventId == event_id) { - if (!_sample_array_obj_alloc) { - _sample_array_obj_alloc = true; - } - } else { - ShouldNotReachHere(); - } - if (enabled() == JNI_TRUE) { - return; - } - OrderAccess::release_store((volatile jint*)&_enabled, JNI_TRUE); -} - -void ObjectProfiler::stop(jlong event_id) { -#ifndef PRODUCT - JfrTryLock try_lock(&_try_lock); - assert(try_lock.has_lock(), "Not allow contention"); -#endif - if (enabled() == JNI_FALSE) { - assert(!_sample_array_obj_alloc && !_sample_instance_obj_alloc, "Invariant"); - return; - } - if (EventOptoInstanceObjectAllocation::eventId == event_id) { - if (_sample_instance_obj_alloc) { - _sample_instance_obj_alloc = false; - } - } else if (EventOptoArrayObjectAllocation::eventId == event_id) { - if (_sample_array_obj_alloc) { - _sample_array_obj_alloc = false; - } - } else { - ShouldNotReachHere(); - } - bool should_enable = _sample_array_obj_alloc || _sample_instance_obj_alloc; - if (should_enable) { - return; - } - OrderAccess::release_store(&_enabled, JNI_FALSE); -} - -jint ObjectProfiler::enabled() { - return OrderAccess::load_acquire((volatile jint*)&_enabled); -} - -void* ObjectProfiler::enabled_flag_address() { - return (void*)&_enabled; -} diff --git a/src/share/vm/jfr/objectprofiler/objectProfiler.hpp b/src/share/vm/jfr/objectprofiler/objectProfiler.hpp deleted file mode 100644 index a2fbe47b2..000000000 --- a/src/share/vm/jfr/objectprofiler/objectProfiler.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2019 Alibaba Group Holding Limited. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Alibaba designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -#ifndef SHARE_VM_JFR_OBJECTROFILER_OBJECTPROFILER_HPP -#define SHARE_VM_JFR_OBJECTROFILER_OBJECTPROFILER_HPP - -#include "prims/jni.h" - -class ObjectProfiler : public AllStatic { - private: - static volatile jint _enabled; - static bool _sample_instance_obj_alloc; - static bool _sample_array_obj_alloc; -#ifndef PRODUCT - static volatile int _try_lock; -#endif - - public: - static void start(jlong event_id); - static void stop(jlong event_id); - static jint enabled(); - static void* enabled_flag_address(); -}; - -#endif // SHARE_VM_JFR_OBJECTROFILER_OBJECTPROFILER_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrFlush.cpp b/src/share/vm/jfr/recorder/access/jfrFlush.cpp index b953b8514..05fb5cc30 100644 --- a/src/share/vm/jfr/recorder/access/jfrFlush.cpp +++ b/src/share/vm/jfr/recorder/access/jfrFlush.cpp @@ -72,12 +72,12 @@ void jfr_conditional_flush(TraceEventId id, size_t size, Thread* t) { } } -bool jfr_save_stacktrace(Thread* t, StackWalkMode mode) { +bool jfr_save_stacktrace(Thread* t) { JfrThreadData* const trace_data = t->trace_data(); if (trace_data->has_cached_stack_trace()) { return false; // no ownership } - trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t, 0, mode)); + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t)); return true; } diff --git a/src/share/vm/jfr/recorder/access/jfrFlush.hpp b/src/share/vm/jfr/recorder/access/jfrFlush.hpp index b6af0fa4d..501fc3500 100644 --- a/src/share/vm/jfr/recorder/access/jfrFlush.hpp +++ b/src/share/vm/jfr/recorder/access/jfrFlush.hpp @@ -26,8 +26,6 @@ #define SHARE_VM_JFR_RECORDER_ACCESS_JFRFLUSH_HPP #include "jfr/recorder/storage/jfrBuffer.hpp" -#include "jfr/recorder/jfrEventSetting.hpp" -#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "memory/allocation.hpp" #include "tracefiles/traceEventIds.hpp" @@ -45,7 +43,7 @@ class JfrFlush : public StackObj { void jfr_conditional_flush(TraceEventId id, size_t size, Thread* t); bool jfr_is_event_enabled(TraceEventId id); bool jfr_has_stacktrace_enabled(TraceEventId id); -bool jfr_save_stacktrace(Thread* t, StackWalkMode mode); +bool jfr_save_stacktrace(Thread* t); void jfr_clear_stacktrace(Thread* t); template @@ -66,7 +64,7 @@ class JfrEventConditionalFlushWithStacktrace : public JfrEventConditionalFlush(t), _t(t), _owner(false) { if (Event::hasStackTrace && jfr_has_stacktrace_enabled(Event::eventId)) { - _owner = jfr_save_stacktrace(t, JfrEventSetting::stack_walk_mode(Event::eventId)); + _owner = jfr_save_stacktrace(t); } } ~JfrEventConditionalFlushWithStacktrace() { diff --git a/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp b/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp index 716996e30..7cedba714 100644 --- a/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp +++ b/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp @@ -35,7 +35,6 @@ #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" #include "utilities/ostream.hpp" -#include "utilities/debug.hpp" struct ObsoleteOption { const char* name; @@ -163,22 +162,6 @@ bool JfrOptionSet::allow_event_retransforms() { return allow_retransforms() && (DumpSharedSpaces || can_retransform()); } -bool JfrOptionSet::sample_object_allocations() { - return _sample_object_allocations == JNI_TRUE; -} - -void JfrOptionSet::set_sample_object_allocations(jboolean value) { - _sample_object_allocations = value; -} - -jlong JfrOptionSet::object_allocations_sampling_interval() { - return _object_allocations_sampling_interval; -} - -void JfrOptionSet::set_object_allocations_sampling_interval(jlong value) { - _object_allocations_sampling_interval = value; -} - // default options for the dcmd parser const char* const default_repository = NULL; const char* const default_global_buffer_size = "512k"; @@ -191,9 +174,6 @@ const char* const default_stack_depth = "64"; const char* const default_retransform = "true"; const char* const default_old_object_queue_size = "256"; DEBUG_ONLY(const char* const default_sample_protection = "false";) -const char* const default_sample_object_allocations = "false"; -// the unit of this value is not time but quantity -const char* const default_object_allocations_sampling_interval = "1024"; // statics static DCmdArgument _dcmd_repository( @@ -275,20 +255,6 @@ static DCmdArgument _dcmd_retransform( true, default_retransform); -static DCmdArgument _dcmd_sampleobjectallocations( - "sampleobjectallocations", - "If object allocations should be sampled (by default false)", - "BOOLEAN", - false, - default_sample_object_allocations); - -static DCmdArgument _dcmd_objectallocationssamplinginterval( - "objectallocationssamplinginterval", - "object allocations sampling interval (by default 1024)", - "JLONG", - false, - default_object_allocations_sampling_interval); - static DCmdParser _parser; static void register_parser_options() { @@ -303,8 +269,6 @@ static void register_parser_options() { _parser.add_dcmd_option(&_dcmd_retransform); _parser.add_dcmd_option(&_dcmd_old_object_queue_size); DEBUG_ONLY(_parser.add_dcmd_option(&_dcmd_sample_protection);) - _parser.add_dcmd_option(&_dcmd_sampleobjectallocations); - _parser.add_dcmd_option(&_dcmd_objectallocationssamplinginterval); } static bool parse_flight_recorder_options_internal(TRAPS) { @@ -350,9 +314,6 @@ jboolean JfrOptionSet::_sample_protection = JNI_FALSE; #else jboolean JfrOptionSet::_sample_protection = JNI_TRUE; #endif -volatile jboolean JfrOptionSet::_sample_object_allocations = JNI_FALSE; -// the unit of this value is not time but quantity -volatile jlong JfrOptionSet::_object_allocations_sampling_interval = 1024; bool JfrOptionSet::initialize(Thread* thread) { register_parser_options(); @@ -374,7 +335,6 @@ bool JfrOptionSet::configure(TRAPS) { bufferedStream st; // delegate to DCmd execution JfrConfigureFlightRecorderDCmd configure(&st, false); - configure.set_on_vm_start(true); configure._repository_path.set_is_set(_dcmd_repository.is_set()); char* repo = _dcmd_repository.value(); if (repo != NULL) { @@ -408,12 +368,6 @@ bool JfrOptionSet::configure(TRAPS) { configure._sample_threads.set_is_set(_dcmd_sample_threads.is_set()); configure._sample_threads.set_value(_dcmd_sample_threads.value()); - configure._sample_object_allocations.set_is_set(_dcmd_sampleobjectallocations.is_set()); - configure._sample_object_allocations.set_value(_dcmd_sampleobjectallocations.value()); - - configure._object_allocations_sampling_interval.set_is_set(_dcmd_objectallocationssamplinginterval.is_set()); - configure._object_allocations_sampling_interval.set_value(_dcmd_objectallocationssamplinginterval.value()); - configure.execute(DCmd_Source_Internal, THREAD); if (HAS_PENDING_EXCEPTION) { diff --git a/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp b/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp index 5602ed0b9..dc48eba69 100644 --- a/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp +++ b/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp @@ -45,8 +45,6 @@ class JfrOptionSet : public AllStatic { static jboolean _sample_threads; static jboolean _retransform; static jboolean _sample_protection; - static volatile jboolean _sample_object_allocations; - static volatile jlong _object_allocations_sampling_interval; static bool initialize(Thread* thread); static bool configure(TRAPS); @@ -76,10 +74,6 @@ class JfrOptionSet : public AllStatic { static bool allow_event_retransforms(); static bool sample_protection(); DEBUG_ONLY(static void set_sample_protection(jboolean protection);) - static bool sample_object_allocations(); - static void set_sample_object_allocations(jboolean value); - static jlong object_allocations_sampling_interval(); - static void set_object_allocations_sampling_interval(jlong value); static bool parse_start_flight_recording(const JavaVMOption** option, char* tail); static bool parse_flight_recorder_options(const JavaVMOption** option, char* tail); diff --git a/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp index db5b2d4fc..cc527a6cc 100644 --- a/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp +++ b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "jfr/recorder/jfrEventSetting.inline.hpp" -#include "jfr/recorder/jfrEventSetting.hpp" #include "jfr/recorder/access/jfrStackTraceMark.hpp" #include "jfr/recorder/access/jfrThreadData.hpp" #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" @@ -36,7 +35,7 @@ JfrStackTraceMark::JfrStackTraceMark() : _t(Thread::current()), _previous_id(0), _previous_id = trace_data->cached_stack_trace_id(); _previous_hash = trace_data->cached_stack_trace_hash(); } - trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(Thread::current(), 0, WALK_BY_DEFAULT)); + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(Thread::current())); } JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previous_hash(0) { @@ -45,7 +44,7 @@ JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previ _previous_id = trace_data->cached_stack_trace_id(); _previous_hash = trace_data->cached_stack_trace_hash(); } - trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t, 0, WALK_BY_DEFAULT)); + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t)); } JfrStackTraceMark::JfrStackTraceMark(TraceEventId eventId) : _t(NULL), _previous_id(0), _previous_hash(0) { @@ -56,8 +55,7 @@ JfrStackTraceMark::JfrStackTraceMark(TraceEventId eventId) : _t(NULL), _previous _previous_id = trace_data->cached_stack_trace_id(); _previous_hash = trace_data->cached_stack_trace_hash(); } - StackWalkMode mode = JfrEventSetting::stack_walk_mode(eventId); - trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t, 0, mode)); + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t)); } } @@ -69,8 +67,7 @@ JfrStackTraceMark::JfrStackTraceMark(TraceEventId eventId, Thread* t) : _t(NULL) _previous_id = trace_data->cached_stack_trace_id(); _previous_hash = trace_data->cached_stack_trace_hash(); } - StackWalkMode mode = JfrEventSetting::stack_walk_mode(eventId); - trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t, 0, mode)); + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t)); } } diff --git a/src/share/vm/jfr/recorder/access/jfrThreadData.cpp b/src/share/vm/jfr/recorder/access/jfrThreadData.cpp index ccbef13bd..eee6463d3 100644 --- a/src/share/vm/jfr/recorder/access/jfrThreadData.cpp +++ b/src/share/vm/jfr/recorder/access/jfrThreadData.cpp @@ -52,11 +52,7 @@ JfrThreadData::JfrThreadData() : _wallclock_time(os::javaTimeNanos()), _stack_trace_hash(0), _stackdepth(0), - _entering_suspend_flag(0), - _cached_top_frame_bci(max_jint), - _alloc_count(0), - _alloc_count_until_sample(1), - _cached_event_id(MaxTraceEventId) {} + _entering_suspend_flag(0) {} u8 JfrThreadData::add_data_lost(u8 value) { _data_lost += value; diff --git a/src/share/vm/jfr/recorder/access/jfrThreadData.hpp b/src/share/vm/jfr/recorder/access/jfrThreadData.hpp index da17b6f71..51bcdf6f4 100644 --- a/src/share/vm/jfr/recorder/access/jfrThreadData.hpp +++ b/src/share/vm/jfr/recorder/access/jfrThreadData.hpp @@ -51,27 +51,6 @@ class JfrThreadData { unsigned int _stack_trace_hash; mutable u4 _stackdepth; volatile jint _entering_suspend_flag; - // Jfr callstack collection relies on vframeStream. - // But the bci of top frame can not be determined by vframeStream in some scenarios. - // For example, in the opto CallLeafNode runtime call of - // OptoRuntime::jfr_fast_object_alloc_C, the top frame bci - // returned by vframeStream is always invalid. This is largely due to the oopmap that - // is not correctly granted ( refer to PhaseMacroExpand::expand_allocate_common to get more details ). - // The opto fast path object allocation tracing occurs in the opto CallLeafNode, - // which has been broken by invalid top frame bci. - // To fix this, we get the top frame bci in opto compilation phase - // and pass it as parameter to runtime call. Our implementation will replace the invalid top - // frame bci with cached_top_frame_bci. - jint _cached_top_frame_bci; - jlong _alloc_count; - jlong _alloc_count_until_sample; - // This field is used to help to distinguish the object allocation request source. - // For example, for object allocation slow path, we trace it in CollectedHeap::obj_allocate. - // But in CollectedHeap::obj_allocate, it is impossible to determine where the allocation request - // is from, which could be from c1, opto, or even interpreter. - // We save this infomation in _event_id, which later can be retrieved in - // CollecetedHeap::obj_allocate to identify the real allocation request source. - TraceEventId _cached_event_id; JfrBuffer* install_native_buffer() const; JfrBuffer* install_java_buffer() const; @@ -170,6 +149,7 @@ class JfrThreadData { return _stack_trace_hash; } + void set_trace_block() { _entering_suspend_flag = 1; } @@ -212,54 +192,6 @@ class JfrThreadData { _wallclock_time = wallclock_time; } - void set_cached_top_frame_bci(jint bci) { - _cached_top_frame_bci = bci; - } - - bool has_cached_top_frame_bci() const { - return _cached_top_frame_bci != max_jint; - } - - jint cached_top_frame_bci() const { - return _cached_top_frame_bci; - } - - void clear_cached_top_frame_bci() { - _cached_top_frame_bci = max_jint; - } - - jlong alloc_count() const { - return _alloc_count; - } - - void incr_alloc_count(jlong delta) { - _alloc_count += delta; - } - - jlong alloc_count_until_sample() const { - return _alloc_count_until_sample; - } - - void incr_alloc_count_until_sample(jlong delta) { - _alloc_count_until_sample += delta; - } - - void set_cached_event_id(TraceEventId event_id) { - _cached_event_id = event_id; - } - - TraceEventId cached_event_id() const { - return _cached_event_id; - } - - bool has_cached_event_id() const { - return _cached_event_id != MaxTraceEventId; - } - - void clear_cached_event_id() { - _cached_event_id = MaxTraceEventId; - } - bool has_thread_checkpoint() const; void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle); const JfrCheckpointBlobHandle& thread_checkpoint() const; @@ -275,8 +207,6 @@ class JfrThreadData { TRACE_DEFINE_THREAD_ID_OFFSET; TRACE_DEFINE_THREAD_ID_SIZE; TRACE_DEFINE_THREAD_DATA_WRITER_OFFSET; - TRACE_DEFINE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET; - TRACE_DEFINE_THREAD_ALLOC_COUNT_OFFSET; }; #endif // SHARE_VM_JFR_JFRTHREADDATA_HPP diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp index 6348b2712..1d84691a5 100644 --- a/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp @@ -106,32 +106,21 @@ int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifact assert(k != NULL, "invariant"); KlassPtr klass = (KlassPtr)k; traceid pkg_id = 0; -#ifndef PRODUCT KlassPtr theklass = klass; if (theklass->oop_is_objArray()) { const ObjArrayKlass* obj_arr_klass = ObjArrayKlass::cast(klass); theklass = obj_arr_klass->bottom_klass(); } - if (!theklass->oop_is_instance()) { + if (theklass->oop_is_instance()) { + } else { assert(theklass->oop_is_typeArray(), "invariant"); } -#endif const traceid symbol_id = artifacts->mark(klass); assert(symbol_id > 0, "need to have an address for symbol!"); writer->write(TRACE_ID(klass)); writer->write(cld_id(klass->class_loader_data())); writer->write((traceid)CREATE_SYMBOL_ID(symbol_id)); writer->write((s4)klass->access_flags().get_flags()); - if (klass->oop_is_array()) { - // The object array size can not be determined statically from klass. - // It is determined by the elements length in object layout. - // So we put a place holder here to make the event parser ignore it. - writer->write((s4)ARRAY_OBJECT_SIZE_PLACE_HOLDER); - } else { - assert(klass->oop_is_instance(), "invariant"); - jint instanceSize = ((InstanceKlass*) klass)->size_helper() * HeapWordSize; - writer->write((s4)instanceSize); - } return 1; } diff --git a/src/share/vm/jfr/recorder/jfrEventSetting.cpp b/src/share/vm/jfr/recorder/jfrEventSetting.cpp index fd41740d8..e360932d4 100644 --- a/src/share/vm/jfr/recorder/jfrEventSetting.cpp +++ b/src/share/vm/jfr/recorder/jfrEventSetting.cpp @@ -23,9 +23,7 @@ */ #include "precompiled.hpp" -#include "tracefiles/traceEventIds.hpp" #include "jfr/recorder/jfrEventSetting.inline.hpp" -#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" JfrNativeSettings JfrEventSetting::_jvm_event_settings; @@ -55,14 +53,6 @@ void JfrEventSetting::set_enabled(jlong id, bool enabled) { setting(event_id).enabled = enabled; } -StackWalkMode JfrEventSetting::stack_walk_mode(TraceEventId event_id) { - if (event_id == TraceOptoArrayObjectAllocationEvent || - event_id == TraceOptoInstanceObjectAllocationEvent) { - return WALK_BY_CURRENT_FRAME; - } - return WALK_BY_DEFAULT; -} - #ifdef ASSERT bool JfrEventSetting::bounds_check_event(jlong id) { if ((unsigned)id < NUM_RESERVED_EVENTS || (unsigned)id >= MaxTraceEventId) { diff --git a/src/share/vm/jfr/recorder/jfrEventSetting.hpp b/src/share/vm/jfr/recorder/jfrEventSetting.hpp index 2746d6ebd..9d8cfda1f 100644 --- a/src/share/vm/jfr/recorder/jfrEventSetting.hpp +++ b/src/share/vm/jfr/recorder/jfrEventSetting.hpp @@ -27,7 +27,6 @@ #include "jni.h" #include "jfr/utilities/jfrAllocation.hpp" -#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "tracefiles/traceEventControl.hpp" #include "tracefiles/traceEventIds.hpp" @@ -48,7 +47,6 @@ class JfrEventSetting : AllStatic { static jlong threshold(TraceEventId event_id); static bool set_cutoff(jlong event_id, jlong cutoff_ticks); static jlong cutoff(TraceEventId event_id); - static StackWalkMode stack_walk_mode(TraceEventId event_id); DEBUG_ONLY(static bool bounds_check_event(jlong id);) }; diff --git a/src/share/vm/jfr/recorder/service/jfrEvent.hpp b/src/share/vm/jfr/recorder/service/jfrEvent.hpp index 6559fd899..f8e8cb408 100644 --- a/src/share/vm/jfr/recorder/service/jfrEvent.hpp +++ b/src/share/vm/jfr/recorder/service/jfrEvent.hpp @@ -26,7 +26,6 @@ #define SHARE_VM_JFR_RECORDER_SERVICE_JFREVENT_HPP #include "jfr/recorder/jfrEventSetting.inline.hpp" -#include "jfr/recorder/jfrEventSetting.hpp" #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "jfr/utilities/jfrTraceTime.hpp" #include "jfr/writers/jfrNativeEventWriter.hpp" @@ -113,7 +112,7 @@ class JfrTraceEvent : public TraceEvent { if (trace_data->has_cached_stack_trace()) { writer.write(trace_data->cached_stack_trace_id()); } else { - writer.write(JfrStackTraceRepository::record(event_thread, 0, JfrEventSetting::stack_walk_mode(T::eventId))); + writer.write(JfrStackTraceRepository::record(event_thread)); } } else { writer.write(0); diff --git a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp index e47abbd51..6b5c4f5df 100644 --- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp +++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp @@ -167,7 +167,7 @@ traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) { return instance().add_trace(stacktrace); } -traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode mode) { +traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) { assert(thread == Thread::current(), "invariant"); JfrThreadData* const trace_data = thread->trace_data(); assert(trace_data != NULL, "invariant"); @@ -184,10 +184,10 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode } assert(frames != NULL, "invariant"); assert(trace_data->stackframes() == frames, "invariant"); - return instance().record_for((JavaThread*)thread, skip, mode, frames, trace_data->stackdepth()); + return instance().record_for((JavaThread*)thread, skip,frames, trace_data->stackdepth()); } -traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode mode, unsigned int* hash) { +traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) { assert(thread == Thread::current(), "invariant"); JfrThreadData* const trace_data = thread->trace_data(); assert(trace_data != NULL, "invariant"); @@ -206,12 +206,12 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip, StackWalkMode } assert(frames != NULL, "invariant"); assert(trace_data->stackframes() == frames, "invariant"); - return instance().record_for((JavaThread*)thread, skip, mode, frames, thread->trace_data()->stackdepth(), hash); + return instance().record_for((JavaThread*)thread, skip, frames, thread->trace_data()->stackdepth(), hash); } -traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame *frames, u4 max_frames) { +traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) { JfrStackTrace stacktrace(frames, max_frames); - if (!stacktrace.record_safe(thread, skip, false, mode)) { + if (!stacktrace.record_safe(thread, skip)) { return 0; } traceid tid = add(stacktrace); @@ -222,10 +222,10 @@ traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, StackW return tid; } -traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) { +traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) { assert(hash != NULL && *hash == 0, "invariant"); JfrStackTrace stacktrace(frames, max_frames); - if (!stacktrace.record_safe(thread, skip, true, mode)) { + if (!stacktrace.record_safe(thread, skip, true)) { return 0; } traceid tid = add(stacktrace); @@ -382,45 +382,16 @@ void JfrStackTrace::resolve_linenos() { _lineno = true; } -bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp, StackWalkMode mode) { +bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp /* false */) { assert(SafepointSynchronize::safepoint_safe(thread, thread->thread_state()) || thread == Thread::current(), "Thread stack needs to be walkable"); - - bool success = false; - switch(mode) { - case WALK_BY_DEFAULT: - { - vframeStream vfs(thread); - success = fill_in(vfs, skip, leakp, mode); - break; - } - case WALK_BY_CURRENT_FRAME: - { - vframeStream vfs(thread, os::current_frame()); - success = fill_in(vfs, skip, leakp, mode); - break; - } - default: - ShouldNotReachHere(); - } - return success; -} - -bool JfrStackTrace::fill_in(vframeStream& vfs, int skip, bool leakp, StackWalkMode mode) { + vframeStream vfs(thread); u4 count = 0; _reached_root = true; - // Indicates whether the top frame is visited in this frames iteration. - // Top frame bci may be invalid and fill_in() will fix the top frame bci in a conservative way. - bool top_frame_visited = false; for(int i = 0; i < skip; i++) { if (vfs.at_end()) { break; } - // The top frame is in skip list. - // Mark top_frame_visited to avoid unnecessary top frame bci fixing. - if (!top_frame_visited) { - top_frame_visited = true; - } vfs.next(); } @@ -435,25 +406,8 @@ bool JfrStackTrace::fill_in(vframeStream& vfs, int skip, bool leakp, StackWalkMo int bci = 0; if (method->is_native()) { type = JfrStackFrame::FRAME_NATIVE; - // The top frame is in native. - // Mark top_frame_visited to avoid unnecessary top frame bci fixing. - if (!top_frame_visited) { - top_frame_visited = true; - } } else { bci = vfs.bci(); - // Hit the top frame and fix bci here. - if (!top_frame_visited) { - if (mode == WALK_BY_CURRENT_FRAME) { - // Only fix opto fast path allocation. - // All fast path allocations do not have cached event id. - if (!vfs.thread_ref()->trace_data()->has_cached_event_id()) { - assert(vfs.thread_ref()->trace_data()->has_cached_top_frame_bci(), "Invariant"); - bci = vfs.thread_ref()->trace_data()->cached_top_frame_bci(); - } - } - top_frame_visited = true; - } } // Can we determine if it's inlined? _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type); diff --git a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp index e74211eb3..5f6c3d6b2 100644 --- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp +++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp @@ -34,7 +34,6 @@ class JfrCheckpointSystem; class JfrCheckpointWriter; class JfrChunkWriter; class Method; -class vframeStream; class JfrStackFrame { private: @@ -63,15 +62,6 @@ class JfrStackFrame { void resolve_lineno(); }; -enum StackWalkMode { - // walk stack by vframeStream vfs(thread). - WALK_BY_DEFAULT = 0, - // walk stack by vframeStream vfs(thread, os::current_frame()). - // It is only used in JIT runtime leaf call. In JIT runtime leaf call, - // last_java_sp is not maintained and WALK_BY_DEFAULT can not walk stack. - WALK_BY_CURRENT_FRAME -}; - class JfrStackTrace : public StackObj { friend class JfrStackTraceRepository; private: @@ -83,7 +73,6 @@ class JfrStackTrace : public StackObj { bool _reached_root; bool _lineno; - bool fill_in(vframeStream& vfs, int skip, bool leakp, StackWalkMode mode); public: JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames), _id(0), @@ -93,7 +82,7 @@ class JfrStackTrace : public StackObj { _max_frames(max_frames), _lineno(false) {} bool record_thread(JavaThread& thread, frame& frame); - bool record_safe(JavaThread* thread, int skip, bool leakp, StackWalkMode stack_walk_mode); + bool record_safe(JavaThread* thread, int skip, bool leakp = false); void resolve_linenos(); void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; } void set_hash(unsigned int hash) { _hash = hash; } @@ -141,8 +130,8 @@ class JfrStackTraceRepository : public JfrCHeapObj { u4 _entries; size_t write_impl(JfrChunkWriter& cw, bool clear); - traceid record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame* frames, u4 max_frames); - traceid record_for(JavaThread* thread, int skip, StackWalkMode mode, JfrStackFrame* frames, u4 max_frames, unsigned int* hash); + traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames); + traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash); traceid add_trace(const JfrStackTrace& stacktrace); const StackTrace* resolve_entry(unsigned int hash, traceid id) const; @@ -155,8 +144,8 @@ class JfrStackTraceRepository : public JfrCHeapObj { bool initialize(); static void destroy(); static traceid add(const JfrStackTrace& stacktrace); - static traceid record(Thread* thread, int skip, StackWalkMode mode); - static traceid record(Thread* thread, int skip, StackWalkMode mode, unsigned int* hash); + static traceid record(Thread* thread, int skip = 0); + static traceid record(Thread* thread, int skip, unsigned int* hash); traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash); size_t write(JfrChunkWriter& cw, bool clear); size_t clear(); diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp index a147e457f..cf2ba77d5 100644 --- a/src/share/vm/opto/macro.cpp +++ b/src/share/vm/opto/macro.cpp @@ -27,7 +27,6 @@ #include "libadt/vectset.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" -#include "opto/divnode.hpp" #include "opto/cfgnode.hpp" #include "opto/compile.hpp" #include "opto/connode.hpp" @@ -42,7 +41,6 @@ #include "opto/subnode.hpp" #include "opto/type.hpp" #include "runtime/sharedRuntime.hpp" -#include "jfr/objectprofiler/objectProfiler.hpp" // @@ -1089,21 +1087,15 @@ void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_ad } } -Node* PhaseMacroExpand::load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt, MemNode::MemOrd mo) { + +Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { Node* adr = basic_plus_adr(base, offset); const TypePtr* adr_type = adr->bottom_type()->is_ptr(); - Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, mo); + Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered); transform_later(value); return value; } -Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { - return load(ctl, mem, base, offset, value_type, bt, MemNode::unordered); -} - -Node* PhaseMacroExpand::make_load_acquire(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { - return load(ctl, mem, base, offset, value_type, bt, MemNode::acquire); -} Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { Node* adr = basic_plus_adr(base, offset); @@ -1442,10 +1434,6 @@ void PhaseMacroExpand::expand_allocate_common( } } - if (EnableJFR && JfrOptionSet::sample_object_allocations()) { - jfr_sample_fast_object_allocation(alloc, fast_oop, fast_oop_ctrl, fast_oop_rawmem); - } - if (C->env()->dtrace_extended_probes()) { // Slow-path call int size = TypeFunc::Parms + 2; @@ -1631,114 +1619,6 @@ void PhaseMacroExpand::expand_allocate_common( // This completes all paths into the result merge point } -static jint bottom_java_frame_bci(JVMState* state) { - assert(state != NULL, "Invariant"); - - JVMState* last = NULL; - JVMState* current = state; - while (current != NULL) { - last = current; - current = current->caller(); - } - return last->bci(); -} - -// -// Pseudo code: -// -// int alloc_sample_enabled = *(int *)ObjectProfiler::enabled_flag_address(); -// if (alloc_sample_enabled) { -// long alloc_count = thread->trace_data()->alloc_count(); -// long alloc_count_new = alloc_count + 1; -// thread->trace_data()->set_alloc_count(alloc_count_new); -// long alloc_count_until_sample = thread->trace_data()->alloc_count_until_sample(); -// if (alloc_count_until_sample == alloc_count_new) { -// jfr_fast_object_alloc_C(obj, thread); -// } -// } -void PhaseMacroExpand::jfr_sample_fast_object_allocation( - AllocateNode* alloc, Node* fast_oop, - Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) { - Node* tls = transform_later(new (C) ThreadLocalNode()); - - Node* alloc_sample_enabled_addr = transform_later(ConPNode::make(C, (address) ObjectProfiler::enabled_flag_address())); - Node* alloc_sample_enabled = make_load_acquire(fast_oop_ctrl, fast_oop_rawmem, alloc_sample_enabled_addr, 0, TypeInt::INT, T_INT); - Node* alloc_sample_enabled_cmp = transform_later(new (C) CmpINode(alloc_sample_enabled, intcon(1))); - Node* alloc_sample_enabled_bool = transform_later(new (C) BoolNode(alloc_sample_enabled_cmp, BoolTest::eq)); - IfNode* alloc_sample_enabled_if = (IfNode*)transform_later(new (C) IfNode(fast_oop_ctrl, alloc_sample_enabled_bool, PROB_MIN, COUNT_UNKNOWN)); - Node* alloc_sample_enabled_ctrl = transform_later(new (C) IfTrueNode(alloc_sample_enabled_if)); - Node* alloc_sample_enabled_mem = fast_oop_rawmem; - Node* alloc_sample_disabled_ctrl = transform_later(new (C) IfFalseNode(alloc_sample_enabled_if)); - Node* alloc_sample_disabled_mem = fast_oop_rawmem; - Node* alloc_sample_enabled_region = transform_later(new (C) RegionNode(3)); - Node* alloc_sample_enabled_region_phi_mem = transform_later(new (C) PhiNode(alloc_sample_enabled_region, Type::MEMORY, TypeRawPtr::BOTTOM)); - enum { enabled_idx = 1, disabled_idx = 2 }; - - // if _enabled then - { - const int alloc_count_offset = in_bytes(TRACE_THREAD_ALLOC_COUNT_OFFSET); - Node* alloc_count = make_load(alloc_sample_enabled_ctrl, alloc_sample_enabled_mem, tls, alloc_count_offset, TypeLong::LONG, T_LONG); - Node* alloc_count_new = transform_later(new (C) AddLNode(alloc_count, longcon(1))); - alloc_sample_enabled_mem = make_store(alloc_sample_enabled_ctrl, alloc_sample_enabled_mem, tls, alloc_count_offset, alloc_count_new, T_LONG); - const int alloc_count_until_sample_offset = in_bytes(TRACE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET); - Node* alloc_count_until_sample = make_load(alloc_sample_enabled_ctrl, alloc_sample_enabled_mem, tls, alloc_count_until_sample_offset, TypeLong::LONG, T_LONG); - Node* alloc_count_until_sample_cmp = transform_later(new (C) CmpLNode(alloc_count_until_sample, alloc_count_new)); - Node* alloc_sample_hit_bool = transform_later(new (C) BoolNode(alloc_count_until_sample_cmp, BoolTest::eq)); - IfNode* alloc_sample_hit_if = (IfNode*)transform_later(new (C) IfNode(alloc_sample_enabled_ctrl, alloc_sample_hit_bool, PROB_MIN, COUNT_UNKNOWN)); - Node* alloc_sample_hit_ctrl = transform_later(new (C) IfTrueNode(alloc_sample_hit_if)); - Node* alloc_sample_hit_mem = alloc_sample_enabled_mem; - Node* alloc_sample_miss_ctrl = transform_later(new (C) IfFalseNode(alloc_sample_hit_if)); - Node* alloc_sample_miss_mem = alloc_sample_enabled_mem; - Node* alloc_sample_hit_region = transform_later(new (C) RegionNode(3)); - Node* alloc_sample_hit_region_phi_mem = transform_later(new (C) PhiNode(alloc_sample_hit_region, Type::MEMORY, TypeRawPtr::BOTTOM)); - - // if sample_hit then - { - CallLeafNode *call = new (C) CallLeafNode(OptoRuntime::jfr_fast_object_alloc_Type(), - CAST_FROM_FN_PTR(address, OptoRuntime::jfr_fast_object_alloc_C), - "jfr_fast_object_alloc_C", - TypeRawPtr::BOTTOM); - call->init_req(TypeFunc::Parms+0, fast_oop); - call->init_req(TypeFunc::Parms+1, intcon(bottom_java_frame_bci(alloc->jvms()))); - call->init_req(TypeFunc::Parms+2, tls); - call->init_req(TypeFunc::Control, alloc_sample_hit_ctrl); - call->init_req(TypeFunc::I_O , top()); - call->init_req(TypeFunc::Memory , alloc_sample_hit_mem); - call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); - call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); - transform_later(call); - alloc_sample_hit_ctrl = new (C) ProjNode(call,TypeFunc::Control); - transform_later(alloc_sample_hit_ctrl); - alloc_sample_hit_mem = new (C) ProjNode(call,TypeFunc::Memory); - transform_later(alloc_sample_hit_mem); - - alloc_sample_hit_region->init_req(enabled_idx, alloc_sample_hit_ctrl); - alloc_sample_hit_region_phi_mem->init_req(enabled_idx, alloc_sample_hit_mem); - } - - { - alloc_sample_hit_region->init_req(disabled_idx, alloc_sample_miss_ctrl); - alloc_sample_hit_region_phi_mem->init_req(disabled_idx, alloc_sample_miss_mem); - } - - { - alloc_sample_enabled_ctrl = alloc_sample_hit_region; - alloc_sample_enabled_mem = alloc_sample_hit_region_phi_mem; - } - alloc_sample_enabled_region->init_req(enabled_idx, alloc_sample_enabled_ctrl); - alloc_sample_enabled_region_phi_mem->init_req(enabled_idx, alloc_sample_enabled_mem); - } - - { - alloc_sample_enabled_region->init_req(disabled_idx, alloc_sample_disabled_ctrl); - alloc_sample_enabled_region_phi_mem->init_req(disabled_idx, alloc_sample_disabled_mem); - } - - { - fast_oop_ctrl = alloc_sample_enabled_region; - fast_oop_rawmem = alloc_sample_enabled_region_phi_mem; - } -} // Helper for PhaseMacroExpand::expand_allocate_common. // Initializes the newly-allocated storage. diff --git a/src/share/vm/opto/macro.hpp b/src/share/vm/opto/macro.hpp index 9268cf524..0efe988ab 100644 --- a/src/share/vm/opto/macro.hpp +++ b/src/share/vm/opto/macro.hpp @@ -61,12 +61,8 @@ private: return n; } void set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr); - Node* load( Node* ctl, Node* mem, Node* base, int offset, - const Type* value_type, BasicType bt, MemNode::MemOrd mo); Node* make_load( Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt); - Node* make_load_acquire( Node* ctl, Node* mem, Node* base, int offset, - const Type* value_type, BasicType bt); Node* make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt); @@ -123,10 +119,6 @@ private: Node* old_eden_top, Node* new_eden_top, Node* length); - //JFR tracing - void jfr_sample_fast_object_allocation(AllocateNode* alloc, Node* fast_oop, - Node*& fast_oop_ctrl, Node*& fast_oop_rawmem); - public: PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) { _igvn.set_delay_transform(true); diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp index d6e4292bc..4562dbcd5 100644 --- a/src/share/vm/opto/runtime.cpp +++ b/src/share/vm/opto/runtime.cpp @@ -68,7 +68,6 @@ #include "runtime/vframe_hp.hpp" #include "utilities/copy.hpp" #include "utilities/preserveException.hpp" -#include "gc_interface/allocTracer.inline.hpp" #if defined AD_MD_HPP # include AD_MD_HPP #elif defined TARGET_ARCH_MODEL_x86_32 @@ -261,10 +260,8 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thre if (!HAS_PENDING_EXCEPTION) { // Scavenge and allocate an instance. Handle holder(THREAD, klass->klass_holder()); // keep the klass alive - TRACE_OPTO_SLOW_ALLOCATION_ENTER(false, THREAD); oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); thread->set_vm_result(result); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(false, THREAD); // Pass oops back through thread local storage. Our apparent type to Java // is that we return an oop, but we can block on exit from this routine and @@ -293,7 +290,6 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaT // Scavenge and allocate an instance. oop result; - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); if (array_type->oop_is_typeArray()) { // The oopFactory likes to work with the element type. // (We could bypass the oopFactory, since it doesn't add much value.) @@ -307,7 +303,6 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaT Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); result = oopFactory::new_objArray(elem_type, len, THREAD); } - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); // Pass oops back through thread local storage. Our apparent type to Java // is that we return an oop, but we can block on exit from this routine and @@ -334,12 +329,10 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len // Scavenge and allocate an instance. oop result; - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); assert(array_type->oop_is_typeArray(), "should be called only for type array"); // The oopFactory likes to work with the element type. BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); // Pass oops back through thread local storage. Our apparent type to Java // is that we return an oop, but we can block on exit from this routine and @@ -386,9 +379,7 @@ JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int l dims[0] = len1; dims[1] = len2; Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); thread->set_vm_result(obj); JRT_END @@ -405,9 +396,7 @@ JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int l dims[1] = len2; dims[2] = len3; Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); thread->set_vm_result(obj); JRT_END @@ -425,9 +414,7 @@ JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int l dims[2] = len3; dims[3] = len4; Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); thread->set_vm_result(obj); JRT_END @@ -464,9 +451,7 @@ JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* d Copy::conjoint_jints_atomic(j_dims, c_dims, len); Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive - TRACE_OPTO_SLOW_ALLOCATION_ENTER(true, THREAD); oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); - TRACE_OPTO_SLOW_ALLOCATION_LEAVE(true, THREAD); deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); thread->set_vm_result(obj); JRT_END @@ -1692,32 +1677,3 @@ JRT_LEAF(void, OptoRuntime::zap_dead_native_locals_C(JavaThread* thread)) JRT_END # endif - -//----------------------------------------------------------------------------- -// JFR support. -const TypeFunc *OptoRuntime::jfr_fast_object_alloc_Type() { - const Type **fields = TypeTuple::fields(3); - fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // newly allocated object - fields[TypeFunc::Parms+1] = TypeInt::INT; // bci - fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // tls - - const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); - - // create result type (range) - fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // returned oop - - const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); - - return TypeFunc::make(domain, range); -} - -void OptoRuntime::jfr_fast_object_alloc_C(oopDesc* obj, jint top_frame_bci, JavaThread* thread) { - KlassHandle kh(thread, obj->klass()); - assert(obj != NULL, "invariant"); - assert(obj->klass() != NULL, "invariant"); - thread->trace_data()->set_cached_top_frame_bci(top_frame_bci); - AllocTracer::send_opto_fast_allocation_event(kh, obj, obj->size() * HeapWordSize, thread); - thread->trace_data()->clear_cached_top_frame_bci(); - thread->set_vm_result(obj); -} diff --git a/src/share/vm/opto/runtime.hpp b/src/share/vm/opto/runtime.hpp index 48aea577e..58c6bd501 100644 --- a/src/share/vm/opto/runtime.hpp +++ b/src/share/vm/opto/runtime.hpp @@ -179,8 +179,6 @@ public: static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread); static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock); - // JFR support - static void jfr_fast_object_alloc_C(oopDesc* obj, jint bci, JavaThread* thread); private: // Implicit exception support @@ -341,9 +339,6 @@ private: static const TypeFunc* zap_dead_locals_Type(); # endif - // JFR support - static const TypeFunc* jfr_fast_object_alloc_Type(); - private: static NamedCounter * volatile _named_counters; diff --git a/src/share/vm/precompiled/precompiled.hpp b/src/share/vm/precompiled/precompiled.hpp index 986025843..d7409fca2 100644 --- a/src/share/vm/precompiled/precompiled.hpp +++ b/src/share/vm/precompiled/precompiled.hpp @@ -97,8 +97,6 @@ # include "gc_interface/collectedHeap.hpp" # include "gc_interface/collectedHeap.inline.hpp" # include "gc_interface/gcCause.hpp" -# include "gc_interface/allocTracer.hpp" -# include "gc_interface/allocTracer.inline.hpp" # include "interpreter/abstractInterpreter.hpp" # include "interpreter/bytecode.hpp" # include "interpreter/bytecodeHistogram.hpp" diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp index 2a16a9718..b8a55ce45 100644 --- a/src/share/vm/runtime/thread.cpp +++ b/src/share/vm/runtime/thread.cpp @@ -45,7 +45,6 @@ #include "oops/symbol.hpp" #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" -#include "jfr/utilities/jfrLog.hpp" #include "prims/jvmtiThreadState.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp index 9ef651d01..2adaf841f 100644 --- a/src/share/vm/runtime/vframe.hpp +++ b/src/share/vm/runtime/vframe.hpp @@ -367,8 +367,6 @@ class vframeStream : public vframeStreamCommon { } } - Thread *& thread_ref() { return (Thread *&)_thread; } - // top_frame may not be at safepoint, start with sender vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); }; diff --git a/src/share/vm/trace/traceBackend.hpp b/src/share/vm/trace/traceBackend.hpp index 1544c2b8e..55f7e7770 100644 --- a/src/share/vm/trace/traceBackend.hpp +++ b/src/share/vm/trace/traceBackend.hpp @@ -28,13 +28,11 @@ #include "jfr/jfr.hpp" #include "jfr/instrumentation/jfrEventClassTransformer.hpp" #include "jfr/leakprofiler/leakProfiler.hpp" -#include "jfr/objectprofiler/objectProfiler.hpp" #include "jfr/leakprofiler/utilities/objectSampleAssistance.hpp" #include "jfr/recorder/access/jfrbackend.hpp" #include "jfr/recorder/access/jfrFlush.hpp" #include "jfr/recorder/access/jfrStackTraceMark.hpp" #include "jfr/recorder/access/jfrThreadData.hpp" -#include "jfr/recorder/access/jfrOptionSet.hpp" #include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.hpp" typedef JfrBackend Tracing; #else // !INCLUDE_TRACE diff --git a/src/share/vm/trace/traceMacros.hpp b/src/share/vm/trace/traceMacros.hpp index 56a9b491b..65981496e 100644 --- a/src/share/vm/trace/traceMacros.hpp +++ b/src/share/vm/trace/traceMacros.hpp @@ -53,8 +53,6 @@ typedef u8 traceid; #define METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_1_SHIFT) #define METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_2_SHIFT) -#define ARRAY_OBJECT_SIZE_PLACE_HOLDER 0x1111baba - #define ANY_USED_BITS (USED_EPOCH_2_BIT | \ USED_EPOCH_1_BIT | \ METHOD_USED_EPOCH_2_BIT | \ @@ -161,24 +159,6 @@ class TraceFlag { extern "C" void JNICALL trace_register_natives(JNIEnv*, jclass); #define TRACE_REGISTER_NATIVES ((void*)((address_word)(&trace_register_natives))) -#define TRACE_OPTO_SLOW_ALLOCATION_ENTER(is_array, thread) \ - AllocTracer::opto_slow_allocation_enter(is_array, thread) - -#define TRACE_OPTO_SLOW_ALLOCATION_LEAVE(is_array, thread) \ - AllocTracer::opto_slow_allocation_leave(is_array, thread) - -#define TRACE_SLOW_ALLOCATION(klass, obj, alloc_size, thread) \ - AllocTracer::send_slow_allocation_event(klass, obj, alloc_size, thread) - -#define TRACE_DEFINE_THREAD_ALLOC_COUNT_OFFSET \ - static ByteSize alloc_count_offset() { return in_ByteSize(offset_of(TRACE_DATA, _alloc_count)); } -#define TRACE_THREAD_ALLOC_COUNT_OFFSET \ - (TRACE_DATA::alloc_count_offset() + TRACE_THREAD_TRACE_DATA_OFFSET) -#define TRACE_DEFINE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET \ - static ByteSize alloc_count_until_sample_offset() { return in_ByteSize(offset_of(TRACE_DATA, _alloc_count_until_sample)); } -#define TRACE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET \ - (TRACE_DATA::alloc_count_until_sample_offset() + TRACE_THREAD_TRACE_DATA_OFFSET) - #else // !INCLUDE_TRACE #define EVENT_THREAD_EXIT(thread) @@ -218,13 +198,5 @@ extern "C" void JNICALL trace_register_natives(JNIEnv*, jclass); #define TRACE_TEMPLATES(template) #define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) -#define TRACE_OPTO_SLOW_ALLOCATION_ENTER(is_array, thread) -#define TRACE_OPTO_SLOW_ALLOCATION_LEAVE(is_array, thread) -#define TRACE_SLOW_ALLOCATION(klass, obj, alloc_size, thread) -#define TRACE_DEFINE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET -#define TRACE_THREAD_ALLOC_COUNT_UNTIL_SAMPLE_OFFSET -#define TRACE_DEFINE_THREAD_ALLOC_COUNT_OFFSET -#define TRACE_THREAD_ALLOC_COUNT_OFFSET - #endif // INCLUDE_TRACE #endif // SHARE_VM_TRACE_TRACEMACROS_HPP diff --git a/src/share/vm/trace/traceevents.xml b/src/share/vm/trace/traceevents.xml index edaf164a5..1b9347cd6 100644 --- a/src/share/vm/trace/traceevents.xml +++ b/src/share/vm/trace/traceevents.xml @@ -667,18 +667,7 @@ Declares a structure type that can be used in other events. - - - - - - - - - - - - + diff --git a/src/share/vm/trace/tracetypes.xml b/src/share/vm/trace/tracetypes.xml index 5251b35d6..3db3d043c 100644 --- a/src/share/vm/trace/tracetypes.xml +++ b/src/share/vm/trace/tracetypes.xml @@ -66,7 +66,6 @@ Now we can use the content + data type in declaring event fields. - thisClass = TestOptoObjectAllocationsSampling.class; - OPTO_METHOD = thisClass.getMethod(OPTO_METHOD_NAME, Integer.TYPE, Byte.TYPE); - } catch (Exception e) { - Asserts.fail(e.getMessage()); - throw new ExceptionInInitializerError(e); - } - } - - private static class InstanceObject { - private byte content; - - InstanceObject(byte content) { - this.content = content; - } - - public String toString() { - return "InstanceObject ( " + content + " )"; - } - } - - public static Object array; - public static Object instance; - - public static void fireOptoAllocations(int arrayLength, byte b) { - array = new InstanceObject[arrayLength]; - instance = new InstanceObject(b); - } - - private static void ensureOptoMethod() throws Exception { - int initialCompLevel = WB.getMethodCompilationLevel(OPTO_METHOD); - if (initialCompLevel != COMP_LEVEL_FULL_OPTIMIZATION) { - WB.enqueueMethodForCompilation(OPTO_METHOD, COMP_LEVEL_FULL_OPTIMIZATION); - } - Utils.waitForCondition(() -> COMP_LEVEL_FULL_OPTIMIZATION == WB.getMethodCompilationLevel(OPTO_METHOD), 30_000L); - System.out.format("%s is already compiled at full optimization compile level\n", OPTO_METHOD_NAME); - } - - private static void assertOptoMethod() throws Exception { - int compLevel = WB.getMethodCompilationLevel(OPTO_METHOD); - Asserts.assertTrue(compLevel == COMP_LEVEL_FULL_OPTIMIZATION); - } - - private static void runForLong(int arg1, byte arg2) { - for (int i = 0; i < 1000_000; i++) { - try { - ensureOptoMethod(); - fireOptoAllocations(arg1, arg2); - TimeUnit.SECONDS.sleep(1); - } catch (Exception e) { - } - } - } - - public static void main(String[] args) throws Exception { - int arg1 = 3*1024; - byte arg2 = (byte)0x66; - - // Run warm code to prevent deoptimizaiton. - // It will deoptimize if it runs without this warmup step. - fireOptoAllocations(arg1, arg2); - ensureOptoMethod(); - assertOptoMethod(); - - // runForLong(arg1, arg2); - - try (Recording recording = new Recording()) { - recording.enable(EventNames.OptoArrayObjectAllocation); - recording.enable(EventNames.OptoInstanceObjectAllocation); - - recording.start(); - - // fireOptoAllocationsMethod would be deoptimized after jfr recording is started. - // The root cause is not clear. - // Invoke ensureOptoMethod blindly to enforce it in level 4 again. - ensureOptoMethod(); - - for (int i = 0; i < OPTO_METHOD_INVOKE_COUNT; i++) { - assertOptoMethod(); - fireOptoAllocations(arg1, arg2); - WB.youngGC(); - } - - recording.stop(); - - List events = Events.fromRecording(recording); - int countOfInstanceObject = 0; - int countOfArrayObject = 0; - final String instanceObjectClassName = InstanceObject.class.getName(); - final String arrayObjectClassName = InstanceObject[].class.getName(); - - for (RecordedEvent event : events) { - RecordedStackTrace stackTrace = event.getStackTrace(); - Asserts.assertTrue(stackTrace != null); - List frames = stackTrace.getFrames(); - Asserts.assertTrue(frames != null && frames.size() > 0); - RecordedFrame topFrame = frames.get(0); - Asserts.assertTrue(event.hasField("objectClass")); - RecordedClass clazz = event.getValue("objectClass"); - String className = clazz.getName(); - Asserts.assertTrue(event.getStackTrace().getFrames().size() > 0); - int objectSize = clazz.getObjectSize(); - System.out.format("Allocation Object Class Name: %s, Object Size: %x, topFrame: %s\n", className, objectSize, topFrame.getMethod()); - if (className.equals(instanceObjectClassName)) { - Asserts.assertTrue(!clazz.isArray()); - Asserts.assertTrue(objectSize > 0); - Asserts.assertTrue(topFrame.getLineNumber() > 0); - Asserts.assertTrue(topFrame.getBytecodeIndex() > 0); - countOfInstanceObject++; - } else if (className.equals(arrayObjectClassName)) { - Asserts.assertTrue(clazz.isArray()); - Asserts.assertTrue(objectSize == RECORDED_ARRAY_CLASS_OBJECT_SIZE_MAGIC_CODE); - countOfArrayObject++; - Asserts.assertTrue(topFrame.getLineNumber() > 0); - Asserts.assertTrue(topFrame.getBytecodeIndex() > 0); - } - } - System.out.format("Total Event Count: %d, EventOptoInstanceObjectAllocaiton Count: %d, EventOptoArrayObjectAllocation Count: %d\n", events.size(), countOfInstanceObject, countOfArrayObject); - - Asserts.assertTrue(countOfArrayObject == countOfInstanceObject); - Asserts.assertTrue(countOfArrayObject == OPTO_METHOD_INVOKE_COUNT); - Asserts.assertTrue(events.size() >= (countOfInstanceObject + countOfArrayObject)); - } - } -} diff --git a/test/testlibrary/com/oracle/java/testlibrary/jfr/EventNames.java b/test/testlibrary/com/oracle/java/testlibrary/jfr/EventNames.java index cc8858b0f..3b5e2b003 100644 --- a/test/testlibrary/com/oracle/java/testlibrary/jfr/EventNames.java +++ b/test/testlibrary/com/oracle/java/testlibrary/jfr/EventNames.java @@ -173,8 +173,6 @@ public class EventNames { public final static String CPUTimeStampCounter = PREFIX + "CPUTimeStampCounter";// "os.processor.cpu_tsc"; public final static String ActiveRecording = PREFIX + "ActiveRecording";//"com.oracle.jdk.ActiveRecording" public final static String ActiveSetting = PREFIX + "ActiveSetting";//"com.oracle.jdk.ActiveSetting" - public final static String OptoInstanceObjectAllocation = PREFIX + "OptoInstanceObjectAllocation"; //"com.oracle.jdk.OptoInstanceObjectAllocation" - public final static String OptoArrayObjectAllocation = PREFIX + "OptoArrayObjectAllocation"; //"com.oracle.jdk.OptoArrayObjectAllocation" public static boolean isGcEvent(EventType et) { return et.getCategoryNames().contains(GC_CATEGORY); -- GitLab