diff --git a/make/linux/makefiles/trace.make b/make/linux/makefiles/trace.make index f173e0ad3aba0dcd89d38c16aec30c8940920384..71a72a9129fe89dd2966e4a0a2fbcc3a977b5786 100644 --- a/make/linux/makefiles/trace.make +++ b/make/linux/makefiles/trace.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -50,8 +50,10 @@ VPATH += $(Src_Dirs_V:%=%:) TraceGeneratedNames = \ traceEventClasses.hpp \ - traceEventIds.hpp \ - traceTypes.hpp + traceEventIds.hpp \ + traceTypes.hpp \ + traceEventControl.hpp \ + tracePeriodic.hpp ifeq ($(HAS_ALT_SRC), true) TraceGeneratedNames += \ @@ -69,6 +71,8 @@ TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/tracerelationdecls.xml \ + $(TraceSrcDir)/traceevents.xml \ $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod ifeq ($(HAS_ALT_SRC), true) XML_DEPS += $(TraceAltSrcDir)/traceevents.xml @@ -96,6 +100,12 @@ ifeq ($(HAS_ALT_SRC), false) $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/tracePeriodic.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracePeriodic.xsl $(XML_DEPS) + $(GENERATE_CODE) + else $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) diff --git a/src/cpu/x86/vm/rdtsc_x86.cpp b/src/cpu/x86/vm/rdtsc_x86.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a880207c725f724b9d41e13878c1b978104b59a5 --- /dev/null +++ b/src/cpu/x86/vm/rdtsc_x86.cpp @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "rdtsc_x86.hpp" +#include "runtime/thread.inline.hpp" +#include "vm_version_ext_x86.hpp" + +// The following header contains the implementations of rdtsc() +#ifdef TARGET_OS_ARCH_linux_x86 +#include "os_linux_x86.inline.hpp" +#endif + +static jlong baseline_counter = 0; +static bool rdtsc_elapsed_counter_enabled = false; +static jlong tsc_frequency = 0; + +static jlong set_baseline_counter() { + assert(0 == baseline_counter, "invariant"); + baseline_counter = os::rdtsc(); + return baseline_counter; +} + +// Base loop to estimate ticks frequency for tsc counter from user mode. +// Volatiles and sleep() are used to prevent compiler from applying optimizations. +static void do_time_measurements(volatile jlong& time_base, + volatile jlong& time_fast, + volatile jlong& time_base_elapsed, + volatile jlong& time_fast_elapsed) { + static const unsigned int FT_SLEEP_MILLISECS = 1; + const unsigned int loopcount = 3; + + volatile jlong start = 0; + volatile jlong fstart = 0; + volatile jlong end = 0; + volatile jlong fend = 0; + + // Figure out the difference between rdtsc and os provided timer. + // base algorithm adopted from JRockit. + for (unsigned int times = 0; times < loopcount; times++) { + start = os::elapsed_counter(); + OrderAccess::fence(); + fstart = os::rdtsc(); + + // use sleep to prevent compiler from optimizing + os::sleep(Thread::current(), FT_SLEEP_MILLISECS, true); + + end = os::elapsed_counter(); + OrderAccess::fence(); + fend = os::rdtsc(); + + time_base += end - start; + time_fast += fend - fstart; + + // basis for calculating the os tick start + // to fast time tick start offset + time_base_elapsed += end; + time_fast_elapsed += (fend - baseline_counter); + } + + time_base /= loopcount; + time_fast /= loopcount; + time_base_elapsed /= loopcount; + time_fast_elapsed /= loopcount; +} + +static jlong initialize_frequency() { + assert(0 == tsc_frequency, "invariant"); + assert(0 == baseline_counter, "invariant"); + jlong initial_counter = set_baseline_counter(); + if (initial_counter == 0) { + return 0; + } + // os time frequency + static double os_freq = (double)os::elapsed_frequency(); + assert(os_freq > 0, "os_elapsed frequency corruption!"); + + double tsc_freq = .0; + double os_to_tsc_conv_factor = 1.0; + + // if platform supports invariant tsc, + // apply higher resolution and granularity for conversion calculations + if (VM_Version_Ext::supports_tscinv_ext()) { + // for invariant tsc platforms, take the maximum qualified cpu frequency + tsc_freq = (double)VM_Version_Ext::maximum_qualified_cpu_frequency(); + os_to_tsc_conv_factor = tsc_freq / os_freq; + } else { + // for non-trusted platforms, use measurements to estimate + // a conversion factor and the tsc frequency + + volatile jlong time_base = 0; + volatile jlong time_fast = 0; + volatile jlong time_base_elapsed = 0; + volatile jlong time_fast_elapsed = 0; + + // do measurements to get base data + // on os timer and fast ticks tsc time relation. + do_time_measurements(time_base, time_fast, time_base_elapsed, time_fast_elapsed); + + // if invalid measurements, cannot proceed + if (time_fast == 0 || time_base == 0) { + return 0; + } + + os_to_tsc_conv_factor = (double)time_fast / (double)time_base; + if (os_to_tsc_conv_factor > 1) { + // estimate on tsc counter frequency + tsc_freq = os_to_tsc_conv_factor * os_freq; + } + } + + if ((tsc_freq < 0) || (tsc_freq > 0 && tsc_freq <= os_freq) || (os_to_tsc_conv_factor <= 1)) { + // safer to run with normal os time + tsc_freq = .0; + } + + // frequency of the tsc_counter + return (jlong)tsc_freq; +} + +static bool initialize_elapsed_counter() { + tsc_frequency = initialize_frequency(); + return tsc_frequency != 0 && baseline_counter != 0; +} + +static bool ergonomics() { + const bool invtsc_support = Rdtsc::is_supported(); + if (FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps) && invtsc_support) { + FLAG_SET_ERGO(bool, UseFastUnorderedTimeStamps, true); + } + + bool ft_enabled = UseFastUnorderedTimeStamps && invtsc_support; + + if (!ft_enabled) { + if (UseFastUnorderedTimeStamps && VM_Version::supports_tsc()) { + warning("\nThe hardware does not support invariant tsc (INVTSC) register and/or cannot guarantee tsc synchronization between sockets at startup.\n"\ + "Values returned via rdtsc() are not guaranteed to be accurate, esp. when comparing values from cross sockets reads. Enabling UseFastUnorderedTimeStamps on non-invariant tsc hardware should be considered experimental.\n"); + ft_enabled = true; + } + } + + if (!ft_enabled) { + // Warn if unable to support command-line flag + if (UseFastUnorderedTimeStamps && !VM_Version::supports_tsc()) { + warning("Ignoring UseFastUnorderedTimeStamps, hardware does not support normal tsc"); + } + } + + return ft_enabled; +} + +bool Rdtsc::is_supported() { + return VM_Version_Ext::supports_tscinv_ext(); +} + +bool Rdtsc::is_elapsed_counter_enabled() { + return rdtsc_elapsed_counter_enabled; +} + +jlong Rdtsc::frequency() { + return tsc_frequency; +} + +jlong Rdtsc::elapsed_counter() { + return os::rdtsc() - baseline_counter; +} + +jlong Rdtsc::raw() { + return os::rdtsc(); +} + +bool Rdtsc::initialize() { + static bool initialized = false; + if (!initialized) { + assert(!rdtsc_elapsed_counter_enabled, "invariant"); + VM_Version_Ext::initialize(); + assert(0 == tsc_frequency, "invariant"); + assert(0 == baseline_counter, "invariant"); + bool result = initialize_elapsed_counter(); // init hw + if (result) { + result = ergonomics(); // check logical state + } + rdtsc_elapsed_counter_enabled = result; + initialized = true; + } + return rdtsc_elapsed_counter_enabled; +} diff --git a/src/cpu/x86/vm/rdtsc_x86.hpp b/src/cpu/x86/vm/rdtsc_x86.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4a632abd2789aa4ca69c41f093168095ab63e01b --- /dev/null +++ b/src/cpu/x86/vm/rdtsc_x86.hpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_VM_RDTSC_X86_HPP +#define CPU_X86_VM_RDTSC_X86_HPP + +#include "memory/allocation.hpp" +#include "utilities/macros.hpp" + +// Interface to the x86 rdtsc() time counter, if available. +// Not guaranteed to be synchronized across hardware threads and +// therefore software threads, and can be updated asynchronously +// by software. elapsed_counter() can jump backwards +// as well as jump forward when threads query different cores/sockets. +// Very much not recommended for general use. +// INVTSC is a minimal requirement for auto-enablement. + +class Rdtsc : AllStatic { + public: + static jlong elapsed_counter(); // provides quick time stamps + static jlong frequency(); // tsc register + static bool is_supported(); // InvariantTSC + static jlong raw(); // direct rdtsc() access + static bool is_elapsed_counter_enabled(); // turn off with -XX:-UseFastUnorderedTimeStamps + static jlong elapsed_counter_baseline(); + static bool initialize(); +}; + +#endif // CPU_X86_VM_RDTSC_X86_HPP diff --git a/src/cpu/x86/vm/vm_version_ext_x86.cpp b/src/cpu/x86/vm/vm_version_ext_x86.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cb1871f29cb8c224dbd907fb5286cd14127b6a95 --- /dev/null +++ b/src/cpu/x86/vm/vm_version_ext_x86.cpp @@ -0,0 +1,967 @@ +/* + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "utilities/macros.hpp" +#include "asm/macroAssembler.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/java.hpp" +#include "runtime/stubCodeGenerator.hpp" +#include "vm_version_ext_x86.hpp" + +typedef enum { + CPU_FAMILY_8086_8088 = 0, + CPU_FAMILY_INTEL_286 = 2, + CPU_FAMILY_INTEL_386 = 3, + CPU_FAMILY_INTEL_486 = 4, + CPU_FAMILY_PENTIUM = 5, + CPU_FAMILY_PENTIUMPRO = 6, // Same family several models + CPU_FAMILY_PENTIUM_4 = 0xF +} FamilyFlag; + + typedef enum { + RDTSCP_FLAG = 0x08000000, // bit 27 + INTEL64_FLAG = 0x20000000 // bit 29 + } _featureExtendedEdxFlag; + +#define CPUID_STANDARD_FN 0x0 +#define CPUID_STANDARD_FN_1 0x1 +#define CPUID_STANDARD_FN_4 0x4 +#define CPUID_STANDARD_FN_B 0xb + +#define CPUID_EXTENDED_FN 0x80000000 +#define CPUID_EXTENDED_FN_1 0x80000001 +#define CPUID_EXTENDED_FN_2 0x80000002 +#define CPUID_EXTENDED_FN_3 0x80000003 +#define CPUID_EXTENDED_FN_4 0x80000004 +#define CPUID_EXTENDED_FN_7 0x80000007 +#define CPUID_EXTENDED_FN_8 0x80000008 + +typedef enum { + FPU_FLAG = 0x00000001, + VME_FLAG = 0x00000002, + DE_FLAG = 0x00000004, + PSE_FLAG = 0x00000008, + TSC_FLAG = 0x00000010, + MSR_FLAG = 0x00000020, + PAE_FLAG = 0x00000040, + MCE_FLAG = 0x00000080, + CX8_FLAG = 0x00000100, + APIC_FLAG = 0x00000200, + SEP_FLAG = 0x00000800, + MTRR_FLAG = 0x00001000, + PGE_FLAG = 0x00002000, + MCA_FLAG = 0x00004000, + CMOV_FLAG = 0x00008000, + PAT_FLAG = 0x00010000, + PSE36_FLAG = 0x00020000, + PSNUM_FLAG = 0x00040000, + CLFLUSH_FLAG = 0x00080000, + DTS_FLAG = 0x00200000, + ACPI_FLAG = 0x00400000, + MMX_FLAG = 0x00800000, + FXSR_FLAG = 0x01000000, + SSE_FLAG = 0x02000000, + SSE2_FLAG = 0x04000000, + SS_FLAG = 0x08000000, + HTT_FLAG = 0x10000000, + TM_FLAG = 0x20000000 +} FeatureEdxFlag; + +static BufferBlob* cpuid_brand_string_stub_blob; +static const int cpuid_brand_string_stub_size = 550; + +extern "C" { + typedef void (*getCPUIDBrandString_stub_t)(void*); +} + +static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = NULL; + +class VM_Version_Ext_StubGenerator: public StubCodeGenerator { + public: + + VM_Version_Ext_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} + + address generate_getCPUIDBrandString(void) { + // Flags to test CPU type. + const uint32_t HS_EFL_AC = 0x40000; + const uint32_t HS_EFL_ID = 0x200000; + // Values for when we don't have a CPUID instruction. + const int CPU_FAMILY_SHIFT = 8; + const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); + const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); + + Label detect_486, cpu486, detect_586, done, ext_cpuid; + + StubCodeMark mark(this, "VM_Version_Ext", "getCPUIDNameInfo_stub"); +# define __ _masm-> + + address start = __ pc(); + + // + // void getCPUIDBrandString(VM_Version::CpuidInfo* cpuid_info); + // + // LP64: rcx and rdx are first and second argument registers on windows + + __ push(rbp); +#ifdef _LP64 + __ mov(rbp, c_rarg0); // cpuid_info address +#else + __ movptr(rbp, Address(rsp, 8)); // cpuid_info address +#endif + __ push(rbx); + __ push(rsi); + __ pushf(); // preserve rbx, and flags + __ pop(rax); + __ push(rax); + __ mov(rcx, rax); + // + // if we are unable to change the AC flag, we have a 386 + // + __ xorl(rax, HS_EFL_AC); + __ push(rax); + __ popf(); + __ pushf(); + __ pop(rax); + __ cmpptr(rax, rcx); + __ jccb(Assembler::notEqual, detect_486); + + __ movl(rax, CPU_FAMILY_386); + __ jmp(done); + + // + // If we are unable to change the ID flag, we have a 486 which does + // not support the "cpuid" instruction. + // + __ bind(detect_486); + __ mov(rax, rcx); + __ xorl(rax, HS_EFL_ID); + __ push(rax); + __ popf(); + __ pushf(); + __ pop(rax); + __ cmpptr(rcx, rax); + __ jccb(Assembler::notEqual, detect_586); + + __ bind(cpu486); + __ movl(rax, CPU_FAMILY_486); + __ jmp(done); + + // + // At this point, we have a chip which supports the "cpuid" instruction + // + __ bind(detect_586); + __ xorl(rax, rax); + __ cpuid(); + __ orl(rax, rax); + __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input + // value of at least 1, we give up and + // assume a 486 + + // + // Extended cpuid(0x80000000) for processor brand string detection + // + __ bind(ext_cpuid); + __ movl(rax, CPUID_EXTENDED_FN); + __ cpuid(); + __ cmpl(rax, CPUID_EXTENDED_FN_4); + __ jcc(Assembler::below, done); + + // + // Extended cpuid(0x80000002) // first 16 bytes in brand string + // + __ movl(rax, CPUID_EXTENDED_FN_2); + __ cpuid(); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_0_offset()))); + __ movl(Address(rsi, 0), rax); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_1_offset()))); + __ movl(Address(rsi, 0), rbx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_2_offset()))); + __ movl(Address(rsi, 0), rcx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_3_offset()))); + __ movl(Address(rsi,0), rdx); + + // + // Extended cpuid(0x80000003) // next 16 bytes in brand string + // + __ movl(rax, CPUID_EXTENDED_FN_3); + __ cpuid(); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_4_offset()))); + __ movl(Address(rsi, 0), rax); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_5_offset()))); + __ movl(Address(rsi, 0), rbx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_6_offset()))); + __ movl(Address(rsi, 0), rcx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_7_offset()))); + __ movl(Address(rsi,0), rdx); + + // + // Extended cpuid(0x80000004) // last 16 bytes in brand string + // + __ movl(rax, CPUID_EXTENDED_FN_4); + __ cpuid(); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_8_offset()))); + __ movl(Address(rsi, 0), rax); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_9_offset()))); + __ movl(Address(rsi, 0), rbx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_10_offset()))); + __ movl(Address(rsi, 0), rcx); + __ lea(rsi, Address(rbp, in_bytes(VM_Version_Ext::proc_name_11_offset()))); + __ movl(Address(rsi,0), rdx); + + // + // return + // + __ bind(done); + __ popf(); + __ pop(rsi); + __ pop(rbx); + __ pop(rbp); + __ ret(0); + +# undef __ + + return start; + }; +}; + + +// VM_Version_Ext statics +const size_t VM_Version_Ext::VENDOR_LENGTH = 13; +const size_t VM_Version_Ext::CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1); +const size_t VM_Version_Ext::CPU_TYPE_DESC_BUF_SIZE = 256; +const size_t VM_Version_Ext::CPU_DETAILED_DESC_BUF_SIZE = 4096; +char* VM_Version_Ext::_cpu_brand_string = NULL; +jlong VM_Version_Ext::_max_qualified_cpu_frequency = 0; + +int VM_Version_Ext::_no_of_threads = 0; +int VM_Version_Ext::_no_of_cores = 0; +int VM_Version_Ext::_no_of_packages = 0; + +void VM_Version_Ext::initialize(void) { + ResourceMark rm; + + cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size); + if (cpuid_brand_string_stub_blob == NULL) { + vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub"); + } + CodeBuffer c(cpuid_brand_string_stub_blob); + VM_Version_Ext_StubGenerator g(&c); + getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t, + g.generate_getCPUIDBrandString()); +} + +const char* VM_Version_Ext::cpu_model_description(void) { + uint32_t cpu_family = extended_cpu_family(); + uint32_t cpu_model = extended_cpu_model(); + const char* model = NULL; + + if (cpu_family == CPU_FAMILY_PENTIUMPRO) { + for (uint32_t i = 0; i <= cpu_model; i++) { + model = _model_id_pentium_pro[i]; + if (model == NULL) { + break; + } + } + } + return model; +} + +const char* VM_Version_Ext::cpu_brand_string(void) { + if (_cpu_brand_string == NULL) { + _cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal); + if (NULL == _cpu_brand_string) { + return NULL; + } + int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH); + if (ret_val != OS_OK) { + FREE_C_HEAP_ARRAY(char, _cpu_brand_string, mtInternal); + _cpu_brand_string = NULL; + } + } + return _cpu_brand_string; +} + +const char* VM_Version_Ext::cpu_brand(void) { + const char* brand = NULL; + + if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) { + int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF; + brand = _brand_id[0]; + for (int i = 0; brand != NULL && i <= brand_num; i += 1) { + brand = _brand_id[i]; + } + } + return brand; +} + +bool VM_Version_Ext::cpu_is_em64t(void) { + return ((_cpuid_info.ext_cpuid1_edx.value & INTEL64_FLAG) == INTEL64_FLAG); +} + +bool VM_Version_Ext::is_netburst(void) { + return (is_intel() && (extended_cpu_family() == CPU_FAMILY_PENTIUM_4)); +} + +bool VM_Version_Ext::supports_tscinv_ext(void) { + if (!supports_tscinv_bit()) { + return false; + } + + if (is_intel()) { + return true; + } + + if (is_amd()) { + return !is_amd_Barcelona(); + } + + return false; +} + +void VM_Version_Ext::resolve_cpu_information_details(void) { + + // in future we want to base this information on proper cpu + // and cache topology enumeration such as: + // Intel 64 Architecture Processor Topology Enumeration + // which supports system cpu and cache topology enumeration + // either using 2xAPICIDs or initial APICIDs + + // currently only rough cpu information estimates + // which will not necessarily reflect the exact configuration of the system + + // this is the number of logical hardware threads + // visible to the operating system + _no_of_threads = os::processor_count(); + + // find out number of threads per cpu package + int threads_per_package = threads_per_core() * cores_per_cpu(); + + // use amount of threads visible to the process in order to guess number of sockets + _no_of_packages = _no_of_threads / threads_per_package; + + // process might only see a subset of the total number of threads + // from a single processor package. Virtualization/resource management for example. + // If so then just write a hard 1 as num of pkgs. + if (0 == _no_of_packages) { + _no_of_packages = 1; + } + + // estimate the number of cores + _no_of_cores = cores_per_cpu() * _no_of_packages; +} + +int VM_Version_Ext::number_of_threads(void) { + if (_no_of_threads == 0) { + resolve_cpu_information_details(); + } + return _no_of_threads; +} + +int VM_Version_Ext::number_of_cores(void) { + if (_no_of_cores == 0) { + resolve_cpu_information_details(); + } + return _no_of_cores; +} + +int VM_Version_Ext::number_of_sockets(void) { + if (_no_of_packages == 0) { + resolve_cpu_information_details(); + } + return _no_of_packages; +} + +const char* VM_Version_Ext::cpu_family_description(void) { + int cpu_family_id = extended_cpu_family(); + if (is_amd()) { + return _family_id_amd[cpu_family_id]; + } + if (is_intel()) { + if (cpu_family_id == CPU_FAMILY_PENTIUMPRO) { + return cpu_model_description(); + } + return _family_id_intel[cpu_family_id]; + } + return "Unknown x86"; +} + +int VM_Version_Ext::cpu_type_description(char* const buf, size_t buf_len) { + assert(buf != NULL, "buffer is NULL!"); + assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!"); + + const char* cpu_type = NULL; + const char* x64 = NULL; + + if (is_intel()) { + cpu_type = "Intel"; + x64 = cpu_is_em64t() ? " Intel64" : ""; + } else if (is_amd()) { + cpu_type = "AMD"; + x64 = cpu_is_em64t() ? " AMD64" : ""; + } else { + cpu_type = "Unknown x86"; + x64 = cpu_is_em64t() ? " x86_64" : ""; + } + + jio_snprintf(buf, buf_len, "%s %s%s SSE SSE2%s%s%s%s%s%s%s%s", + cpu_type, + cpu_family_description(), + supports_ht() ? " (HT)" : "", + supports_sse3() ? " SSE3" : "", + supports_ssse3() ? " SSSE3" : "", + supports_sse4_1() ? " SSE4.1" : "", + supports_sse4_2() ? " SSE4.2" : "", + supports_sse4a() ? " SSE4A" : "", + is_netburst() ? " Netburst" : "", + is_intel_family_core() ? " Core" : "", + x64); + + return OS_OK; +} + +int VM_Version_Ext::cpu_extended_brand_string(char* const buf, size_t buf_len) { + assert(buf != NULL, "buffer is NULL!"); + assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!"); + assert(getCPUIDBrandString_stub != NULL, "not initialized"); + + // invoke newly generated asm code to fetch CPU Brand String + getCPUIDBrandString_stub(&_cpuid_info); + + // fetch results into buffer + *((uint32_t*) &buf[0]) = _cpuid_info.proc_name_0; + *((uint32_t*) &buf[4]) = _cpuid_info.proc_name_1; + *((uint32_t*) &buf[8]) = _cpuid_info.proc_name_2; + *((uint32_t*) &buf[12]) = _cpuid_info.proc_name_3; + *((uint32_t*) &buf[16]) = _cpuid_info.proc_name_4; + *((uint32_t*) &buf[20]) = _cpuid_info.proc_name_5; + *((uint32_t*) &buf[24]) = _cpuid_info.proc_name_6; + *((uint32_t*) &buf[28]) = _cpuid_info.proc_name_7; + *((uint32_t*) &buf[32]) = _cpuid_info.proc_name_8; + *((uint32_t*) &buf[36]) = _cpuid_info.proc_name_9; + *((uint32_t*) &buf[40]) = _cpuid_info.proc_name_10; + *((uint32_t*) &buf[44]) = _cpuid_info.proc_name_11; + + return OS_OK; +} + +size_t VM_Version_Ext::cpu_write_support_string(char* const buf, size_t buf_len) { + assert(buf != NULL, "buffer is NULL!"); + assert(buf_len > 0, "buffer len not enough!"); + + unsigned int flag = 0; + unsigned int fi = 0; + size_t written = 0; + const char* prefix = ""; + +#define WRITE_TO_BUF(string) \ + { \ + int res = jio_snprintf(&buf[written], buf_len - written, "%s%s", prefix, string); \ + if (res < 0 || (size_t) res >= buf_len - 1) { \ + buf[buf_len-1] = '\0'; \ + return buf_len - 1; \ + } \ + written += res; \ + if (prefix[0] == '\0') { \ + prefix = ", "; \ + } \ + } + + for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) { + if (flag == HTT_FLAG && (((_cpuid_info.std_cpuid1_ebx.value >> 16) & 0xff) <= 1)) { + continue; /* no hyperthreading */ + } else if (flag == SEP_FLAG && (cpu_family() == CPU_FAMILY_PENTIUMPRO && ((_cpuid_info.std_cpuid1_eax.value & 0xff) < 0x33))) { + continue; /* no fast system call */ + } + if ((_cpuid_info.std_cpuid1_edx.value & flag) && strlen(_feature_edx_id[fi]) > 0) { + WRITE_TO_BUF(_feature_edx_id[fi]); + } + } + + for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) { + if ((_cpuid_info.std_cpuid1_ecx.value & flag) && strlen(_feature_ecx_id[fi]) > 0) { + WRITE_TO_BUF(_feature_ecx_id[fi]); + } + } + + for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) { + if ((_cpuid_info.ext_cpuid1_ecx.value & flag) && strlen(_feature_extended_ecx_id[fi]) > 0) { + WRITE_TO_BUF(_feature_extended_ecx_id[fi]); + } + } + + for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) { + if ((_cpuid_info.ext_cpuid1_edx.value & flag) && strlen(_feature_extended_edx_id[fi]) > 0) { + WRITE_TO_BUF(_feature_extended_edx_id[fi]); + } + } + + if (supports_tscinv_bit()) { + WRITE_TO_BUF("Invariant TSC"); + } + + return written; +} + +/** + * Write a detailed description of the cpu to a given buffer, including + * feature set. + */ +int VM_Version_Ext::cpu_detailed_description(char* const buf, size_t buf_len) { + assert(buf != NULL, "buffer is NULL!"); + assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!"); + + static const char* unknown = ""; + char vendor_id[VENDOR_LENGTH]; + const char* family = NULL; + const char* model = NULL; + const char* brand = NULL; + int outputLen = 0; + + family = cpu_family_description(); + if (family == NULL) { + family = unknown; + } + + model = cpu_model_description(); + if (model == NULL) { + model = unknown; + } + + brand = cpu_brand_string(); + + if (brand == NULL) { + brand = cpu_brand(); + if (brand == NULL) { + brand = unknown; + } + } + + *((uint32_t*) &vendor_id[0]) = _cpuid_info.std_vendor_name_0; + *((uint32_t*) &vendor_id[4]) = _cpuid_info.std_vendor_name_2; + *((uint32_t*) &vendor_id[8]) = _cpuid_info.std_vendor_name_1; + vendor_id[VENDOR_LENGTH-1] = '\0'; + + outputLen = jio_snprintf(buf, buf_len, "Brand: %s, Vendor: %s\n" + "Family: %s (0x%x), Model: %s (0x%x), Stepping: 0x%x\n" + "Ext. family: 0x%x, Ext. model: 0x%x, Type: 0x%x, Signature: 0x%8.8x\n" + "Features: ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n" + "Ext. features: eax: 0x%8.8x, ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n" + "Supports: ", + brand, + vendor_id, + family, + extended_cpu_family(), + model, + extended_cpu_model(), + cpu_stepping(), + _cpuid_info.std_cpuid1_eax.bits.ext_family, + _cpuid_info.std_cpuid1_eax.bits.ext_model, + _cpuid_info.std_cpuid1_eax.bits.proc_type, + _cpuid_info.std_cpuid1_eax.value, + _cpuid_info.std_cpuid1_ebx.value, + _cpuid_info.std_cpuid1_ecx.value, + _cpuid_info.std_cpuid1_edx.value, + _cpuid_info.ext_cpuid1_eax, + _cpuid_info.ext_cpuid1_ebx, + _cpuid_info.ext_cpuid1_ecx, + _cpuid_info.ext_cpuid1_edx); + + if (outputLen < 0 || (size_t) outputLen >= buf_len - 1) { + buf[buf_len-1] = '\0'; + return OS_ERR; + } + + cpu_write_support_string(&buf[outputLen], buf_len - outputLen); + + return OS_OK; +} + +const char* VM_Version_Ext::cpu_name(void) { + char cpu_type_desc[CPU_TYPE_DESC_BUF_SIZE]; + size_t cpu_desc_len = sizeof(cpu_type_desc); + + cpu_type_description(cpu_type_desc, cpu_desc_len); + char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, cpu_desc_len, mtTracing); + if (NULL == tmp) { + return NULL; + } + strncpy(tmp, cpu_type_desc, cpu_desc_len); + return tmp; +} + +const char* VM_Version_Ext::cpu_description(void) { + char cpu_detailed_desc_buffer[CPU_DETAILED_DESC_BUF_SIZE]; + size_t cpu_detailed_desc_len = sizeof(cpu_detailed_desc_buffer); + + cpu_detailed_description(cpu_detailed_desc_buffer, cpu_detailed_desc_len); + + char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, cpu_detailed_desc_len, mtTracing); + + if (NULL == tmp) { + return NULL; + } + + strncpy(tmp, cpu_detailed_desc_buffer, cpu_detailed_desc_len); + return tmp; +} + +/** + * See Intel Application note 485 (chapter 10) for details + * on frequency extraction from cpu brand string. + * http://www.intel.com/content/dam/www/public/us/en/documents/application-notes/processor-identification-cpuid-instruction-note.pdf + * + */ +jlong VM_Version_Ext::max_qualified_cpu_freq_from_brand_string(void) { + // get brand string + const char* const brand_string = cpu_brand_string(); + if (brand_string == NULL) { + return 0; + } + + const u8 MEGA = 1000000; + u8 multiplier = 0; + jlong frequency = 0; + + // the frequency information in the cpu brand string + // is given in either of two formats "x.xxyHz" or "xxxxyHz", + // where y=M,G,T and x is digits + const char* Hz_location = strchr(brand_string, 'H'); + + if (Hz_location != NULL) { + if (*(Hz_location + 1) == 'z') { + // switch on y in "yHz" + switch(*(Hz_location - 1)) { + case 'M' : + // Set multiplier to frequency is in Hz + multiplier = MEGA; + break; + case 'G' : + multiplier = MEGA * 1000; + break; + case 'T' : + multiplier = MEGA * 1000 * 1000; + break; + } + } + } + + if (multiplier > 0) { + // compute frequency (in Hz) from brand string + if (*(Hz_location - 4) == '.') { // if format is "x.xx" + frequency = (jlong)(*(Hz_location - 5) - '0') * (multiplier); + frequency += (jlong)(*(Hz_location - 3) - '0') * (multiplier / 10); + frequency += (jlong)(*(Hz_location - 2) - '0') * (multiplier / 100); + } else { // format is "xxxx" + frequency = (jlong)(*(Hz_location - 5) - '0') * 1000; + frequency += (jlong)(*(Hz_location - 4) - '0') * 100; + frequency += (jlong)(*(Hz_location - 3) - '0') * 10; + frequency += (jlong)(*(Hz_location - 2) - '0'); + frequency *= multiplier; + } + } + return frequency; +} + + +jlong VM_Version_Ext::maximum_qualified_cpu_frequency(void) { + if (_max_qualified_cpu_frequency == 0) { + _max_qualified_cpu_frequency = max_qualified_cpu_freq_from_brand_string(); + } + return _max_qualified_cpu_frequency; +} + +const char* const VM_Version_Ext::_family_id_intel[] = { + "8086/8088", + "", + "286", + "386", + "486", + "Pentium", + "Pentium Pro", //or Pentium-M/Woodcrest depeding on model + "", + "", + "", + "", + "", + "", + "", + "", + "Pentium 4" +}; + +const char* const VM_Version_Ext::_family_id_amd[] = { + "", + "", + "", + "", + "5x86", + "K5/K6", + "Athlon/AthlonXP", + "", + "", + "", + "", + "", + "", + "", + "", + "Opteron/Athlon64", + "Opteron QC/Phenom" // Barcelona et.al. +}; +// Partially from Intel 64 and IA-32 Architecture Software Developer's Manual, +// September 2013, Vol 3C Table 35-1 +const char* const VM_Version_Ext::_model_id_pentium_pro[] = { + "", + "Pentium Pro", + "", + "Pentium II model 3", + "", + "Pentium II model 5/Xeon/Celeron", + "Celeron", + "Pentium III/Pentium III Xeon", + "Pentium III/Pentium III Xeon", + "Pentium M model 9", // Yonah + "Pentium III, model A", + "Pentium III, model B", + "", + "Pentium M model D", // Dothan + "", + "Core 2", // 0xf Woodcrest/Conroe/Merom/Kentsfield/Clovertown + "", + "", + "", + "", + "", + "", + "Celeron", // 0x16 Celeron 65nm + "Core 2", // 0x17 Penryn / Harpertown + "", + "", + "Core i7", // 0x1A CPU_MODEL_NEHALEM_EP + "Atom", // 0x1B Z5xx series Silverthorn + "", + "Core 2", // 0x1D Dunnington (6-core) + "Nehalem", // 0x1E CPU_MODEL_NEHALEM + "", + "", + "", + "", + "", + "", + "Westmere", // 0x25 CPU_MODEL_WESTMERE + "", + "", + "", // 0x28 + "", + "Sandy Bridge", // 0x2a "2nd Generation Intel Core i7, i5, i3" + "", + "Westmere-EP", // 0x2c CPU_MODEL_WESTMERE_EP + "Sandy Bridge-EP", // 0x2d CPU_MODEL_SANDYBRIDGE_EP + "Nehalem-EX", // 0x2e CPU_MODEL_NEHALEM_EX + "Westmere-EX", // 0x2f CPU_MODEL_WESTMERE_EX + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "Ivy Bridge", // 0x3a + "", + "Haswell", // 0x3c "4th Generation Intel Core Processor" + "", // 0x3d "Next Generation Intel Core Processor" + "Ivy Bridge-EP", // 0x3e "Next Generation Intel Xeon Processor E7 Family" + "", // 0x3f "Future Generation Intel Xeon Processor" + "", + "", + "", + "", + "", + "Haswell", // 0x45 "4th Generation Intel Core Processor" + "Haswell", // 0x46 "4th Generation Intel Core Processor" + NULL +}; + +/* Brand ID is for back compability + * Newer CPUs uses the extended brand string */ +const char* const VM_Version_Ext::_brand_id[] = { + "", + "Celeron processor", + "Pentium III processor", + "Intel Pentium III Xeon processor", + "", + "", + "", + "", + "Intel Pentium 4 processor", + NULL +}; + + +const char* const VM_Version_Ext::_feature_edx_id[] = { + "On-Chip FPU", + "Virtual Mode Extensions", + "Debugging Extensions", + "Page Size Extensions", + "Time Stamp Counter", + "Model Specific Registers", + "Physical Address Extension", + "Machine Check Exceptions", + "CMPXCHG8B Instruction", + "On-Chip APIC", + "", + "Fast System Call", + "Memory Type Range Registers", + "Page Global Enable", + "Machine Check Architecture", + "Conditional Mov Instruction", + "Page Attribute Table", + "36-bit Page Size Extension", + "Processor Serial Number", + "CLFLUSH Instruction", + "", + "Debug Trace Store feature", + "ACPI registers in MSR space", + "Intel Architecture MMX Technology", + "Fast Float Point Save and Restore", + "Streaming SIMD extensions", + "Streaming SIMD extensions 2", + "Self-Snoop", + "Hyper Threading", + "Thermal Monitor", + "", + "Pending Break Enable" +}; + +const char* const VM_Version_Ext::_feature_extended_edx_id[] = { + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "SYSCALL/SYSRET", + "", + "", + "", + "", + "", + "", + "", + "", + "Execute Disable Bit", + "", + "", + "", + "", + "", + "", + "RDTSCP", + "", + "Intel 64 Architecture", + "", + "" +}; + +const char* const VM_Version_Ext::_feature_ecx_id[] = { + "Streaming SIMD Extensions 3", + "PCLMULQDQ", + "64-bit DS Area", + "MONITOR/MWAIT instructions", + "CPL Qualified Debug Store", + "Virtual Machine Extensions", + "Safer Mode Extensions", + "Enhanced Intel SpeedStep technology", + "Thermal Monitor 2", + "Supplemental Streaming SIMD Extensions 3", + "L1 Context ID", + "", + "Fused Multiply-Add", + "CMPXCHG16B", + "xTPR Update Control", + "Perfmon and Debug Capability", + "", + "Process-context identifiers", + "Direct Cache Access", + "Streaming SIMD extensions 4.1", + "Streaming SIMD extensions 4.2", + "x2APIC", + "MOVBE", + "Popcount instruction", + "TSC-Deadline", + "AESNI", + "XSAVE", + "OSXSAVE", + "AVX", + "F16C", + "RDRAND", + "" +}; + +const char* const VM_Version_Ext::_feature_extended_ecx_id[] = { + "LAHF/SAHF instruction support", + "Core multi-processor leagacy mode", + "", + "", + "", + "Advanced Bit Manipulations: LZCNT", + "SSE4A: MOVNTSS, MOVNTSD, EXTRQ, INSERTQ", + "Misaligned SSE mode", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" +}; diff --git a/src/cpu/x86/vm/vm_version_ext_x86.hpp b/src/cpu/x86/vm/vm_version_ext_x86.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3d984a8b148cfc1fa15400a7836fe9cdf7eccacb --- /dev/null +++ b/src/cpu/x86/vm/vm_version_ext_x86.hpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_VM_VM_VERSION_EXT_X86_HPP +#define CPU_X86_VM_VM_VERSION_EXT_X86_HPP + +#include "utilities/macros.hpp" +#include "vm_version_x86.hpp" + +class VM_Version_Ext : public VM_Version { + private: + static const size_t VENDOR_LENGTH; + static const size_t CPU_EBS_MAX_LENGTH; + static const size_t CPU_TYPE_DESC_BUF_SIZE; + static const size_t CPU_DETAILED_DESC_BUF_SIZE; + + static const char* const _family_id_intel[]; + static const char* const _family_id_amd[]; + static const char* const _brand_id[]; + static const char* const _model_id_pentium_pro[]; + + static const char* const _feature_edx_id[]; + static const char* const _feature_extended_edx_id[]; + static const char* const _feature_ecx_id[]; + static const char* const _feature_extended_ecx_id[]; + + static int _no_of_threads; + static int _no_of_cores; + static int _no_of_packages; + static char* _cpu_brand_string; + static jlong _max_qualified_cpu_frequency; + + static const char* cpu_family_description(void); + static const char* cpu_model_description(void); + static const char* cpu_brand(void); + static const char* cpu_brand_string(void); + + static int cpu_type_description(char* const buf, size_t buf_len); + static int cpu_detailed_description(char* const buf, size_t buf_len); + static int cpu_extended_brand_string(char* const buf, size_t buf_len); + + static bool cpu_is_em64t(void); + static bool is_netburst(void); + + static size_t cpu_write_support_string(char* const buf, size_t buf_len); + static void resolve_cpu_information_details(void); + static jlong max_qualified_cpu_freq_from_brand_string(void); + + public: + // Offsets for cpuid asm stub brand string + static ByteSize proc_name_0_offset() { return byte_offset_of(CpuidInfo, proc_name_0); } + static ByteSize proc_name_1_offset() { return byte_offset_of(CpuidInfo, proc_name_1); } + static ByteSize proc_name_2_offset() { return byte_offset_of(CpuidInfo, proc_name_2); } + static ByteSize proc_name_3_offset() { return byte_offset_of(CpuidInfo, proc_name_3); } + static ByteSize proc_name_4_offset() { return byte_offset_of(CpuidInfo, proc_name_4); } + static ByteSize proc_name_5_offset() { return byte_offset_of(CpuidInfo, proc_name_5); } + static ByteSize proc_name_6_offset() { return byte_offset_of(CpuidInfo, proc_name_6); } + static ByteSize proc_name_7_offset() { return byte_offset_of(CpuidInfo, proc_name_7); } + static ByteSize proc_name_8_offset() { return byte_offset_of(CpuidInfo, proc_name_8); } + static ByteSize proc_name_9_offset() { return byte_offset_of(CpuidInfo, proc_name_9); } + static ByteSize proc_name_10_offset() { return byte_offset_of(CpuidInfo, proc_name_10); } + static ByteSize proc_name_11_offset() { return byte_offset_of(CpuidInfo, proc_name_11); } + + static int number_of_threads(void); + static int number_of_cores(void); + static int number_of_sockets(void); + + static jlong maximum_qualified_cpu_frequency(void); + + static bool supports_tscinv_ext(void); + + static const char* cpu_name(void); + static const char* cpu_description(void); + + static void initialize(); +}; + +#endif // CPU_X86_VM_VM_VERSION_EXT_X86_HPP diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp index a499e499e8ea3ce0ddbbc939c460dee3a883dc8e..0daa106914a505af84d200f3dcf8ff2efd9bfeb7 100644 --- a/src/os/linux/vm/os_linux.cpp +++ b/src/os/linux/vm/os_linux.cpp @@ -2141,9 +2141,44 @@ void os::print_dll_info(outputStream *st) { jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid); - if (!_print_ascii_file(fname, st)) { - st->print("Can not get library information for pid = %d\n", pid); - } + if (!_print_ascii_file(fname, st)) { + st->print("Can not get library information for pid = %d\n", pid); + } +} + +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + FILE *procmapsFile = NULL; + + // Open the procfs maps file for the current process + if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) { + // Allocate PATH_MAX for file name plus a reasonable size for other fields. + char line[PATH_MAX + 100]; + + // Read line by line from 'file' + while (fgets(line, sizeof(line), procmapsFile) != NULL) { + u8 base, top, offset, inode; + char permissions[5]; + char device[6]; + char name[PATH_MAX + 1]; + + // Parse fields from line + sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %4s " UINT64_FORMAT_X " %5s " INT64_FORMAT " %s", + &base, &top, permissions, &offset, device, &inode, name); + + // Filter by device id '00:00' so that we only get file system mapped files. + if (strcmp(device, "00:00") != 0) { + + // Call callback with the fields of interest + if(callback(name, (address)base, (address)top, param)) { + // Oops abort, callback aborted + fclose(procmapsFile); + return 1; + } + } + } + fclose(procmapsFile); + } + return 0; } void os::print_os_info_brief(outputStream* st) { @@ -4030,6 +4065,10 @@ size_t os::read(int fd, void *buf, unsigned int nBytes) { return ::read(fd, buf, nBytes); } +size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + return ::pread(fd, buf, nBytes, offset); +} + // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation. // Solaris uses poll(), linux uses park(). // Poll() is likely a better choice, assuming that Thread.interrupt() diff --git a/src/os/linux/vm/os_perf_linux.cpp b/src/os/linux/vm/os_perf_linux.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a03d81644414f5dc3122025b8aa5bd008b0e7af9 --- /dev/null +++ b/src/os/linux/vm/os_perf_linux.cpp @@ -0,0 +1,1061 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "memory/allocation.inline.hpp" +#include "os_linux.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/os_perf.hpp" +#include "utilities/compilerWarnings.hpp" + +#ifdef X86 +#include "vm_version_ext_x86.hpp" +#endif +#ifdef ARM +#include "vm_version_ext_arm.hpp" +#endif +#ifndef ARM +#ifdef AARCH64 +#include "vm_version_ext_aarch64.hpp" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + /proc/[number]/stat + Status information about the process. This is used by ps(1). It is defined in /usr/src/linux/fs/proc/array.c. + + The fields, in order, with their proper scanf(3) format specifiers, are: + + 1. pid %d The process id. + + 2. comm %s + The filename of the executable, in parentheses. This is visible whether or not the executable is swapped out. + + 3. state %c + One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible disk + sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging. + + 4. ppid %d + The PID of the parent. + + 5. pgrp %d + The process group ID of the process. + + 6. session %d + The session ID of the process. + + 7. tty_nr %d + The tty the process uses. + + 8. tpgid %d + The process group ID of the process which currently owns the tty that the process is connected to. + + 9. flags %lu + The flags of the process. The math bit is decimal 4, and the traced bit is decimal 10. + + 10. minflt %lu + The number of minor faults the process has made which have not required loading a memory page from disk. + + 11. cminflt %lu + The number of minor faults that the process's waited-for children have made. + + 12. majflt %lu + The number of major faults the process has made which have required loading a memory page from disk. + + 13. cmajflt %lu + The number of major faults that the process's waited-for children have made. + + 14. utime %lu + The number of jiffies that this process has been scheduled in user mode. + + 15. stime %lu + The number of jiffies that this process has been scheduled in kernel mode. + + 16. cutime %ld + The number of jiffies that this process's waited-for children have been scheduled in user mode. (See also times(2).) + + 17. cstime %ld + The number of jiffies that this process' waited-for children have been scheduled in kernel mode. + + 18. priority %ld + The standard nice value, plus fifteen. The value is never negative in the kernel. + + 19. nice %ld + The nice value ranges from 19 (nicest) to -19 (not nice to others). + + 20. 0 %ld This value is hard coded to 0 as a placeholder for a removed field. + + 21. itrealvalue %ld + The time in jiffies before the next SIGALRM is sent to the process due to an interval timer. + + 22. starttime %lu + The time in jiffies the process started after system boot. + + 23. vsize %lu + Virtual memory size in bytes. + + 24. rss %ld + Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which count + towards text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. + + 25. rlim %lu + Current limit in bytes on the rss of the process (usually 4294967295 on i386). + + 26. startcode %lu + The address above which program text can run. + + 27. endcode %lu + The address below which program text can run. + + 28. startstack %lu + The address of the start of the stack. + + 29. kstkesp %lu + The current value of esp (stack pointer), as found in the kernel stack page for the process. + + 30. kstkeip %lu + The current EIP (instruction pointer). + + 31. signal %lu + The bitmap of pending signals (usually 0). + + 32. blocked %lu + The bitmap of blocked signals (usually 0, 2 for shells). + + 33. sigignore %lu + The bitmap of ignored signals. + + 34. sigcatch %lu + The bitmap of catched signals. + + 35. wchan %lu + This is the "channel" in which the process is waiting. It is the address of a system call, and can be looked up in a namelist if you need + a textual name. (If you have an up-to-date /etc/psdatabase, then try ps -l to see the WCHAN field in action.) + + 36. nswap %lu + Number of pages swapped - not maintained. + + 37. cnswap %lu + Cumulative nswap for child processes. + + 38. exit_signal %d + Signal to be sent to parent when we die. + + 39. processor %d + CPU number last executed on. + + + + ///// SSCANF FORMAT STRING. Copy and use. + +field: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 +format: %d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %d %d + + +*/ + +/** + * For platforms that have them, when declaring + * a printf-style function, + * formatSpec is the parameter number (starting at 1) + * that is the format argument ("%d pid %s") + * params is the parameter number where the actual args to + * the format starts. If the args are in a va_list, this + * should be 0. + */ +#ifndef PRINTF_ARGS +# define PRINTF_ARGS(formatSpec, params) ATTRIBUTE_PRINTF(formatSpec, params) +#endif + +#ifndef SCANF_ARGS +# define SCANF_ARGS(formatSpec, params) ATTRIBUTE_SCANF(formatSpec, params) +#endif + +#ifndef _PRINTFMT_ +# define _PRINTFMT_ +#endif + +#ifndef _SCANFMT_ +# define _SCANFMT_ +#endif + + +struct CPUPerfTicks { + uint64_t used; + uint64_t usedKernel; + uint64_t total; +}; + +typedef enum { + CPU_LOAD_VM_ONLY, + CPU_LOAD_GLOBAL, +} CpuLoadTarget; + +enum { + UNDETECTED, + UNDETECTABLE, + LINUX26_NPTL, + BAREMETAL +}; + +struct CPUPerfCounters { + int nProcs; + CPUPerfTicks jvmTicks; + CPUPerfTicks* cpus; +}; + +static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target); + +/** reads /proc//stat data, with some checks and some skips. + * Ensure that 'fmt' does _NOT_ contain the first two "%d %s" + */ +static int SCANF_ARGS(2, 0) vread_statdata(const char* procfile, _SCANFMT_ const char* fmt, va_list args) { + FILE*f; + int n; + char buf[2048]; + + if ((f = fopen(procfile, "r")) == NULL) { + return -1; + } + + if ((n = fread(buf, 1, sizeof(buf), f)) != -1) { + char *tmp; + + buf[n-1] = '\0'; + /** skip through pid and exec name. */ + if ((tmp = strrchr(buf, ')')) != NULL) { + // skip the ')' and the following space + // but check that buffer is long enough + tmp += 2; + if (tmp < buf + n) { + n = vsscanf(tmp, fmt, args); + } + } + } + + fclose(f); + + return n; +} + +static int SCANF_ARGS(2, 3) read_statdata(const char* procfile, _SCANFMT_ const char* fmt, ...) { + int n; + va_list args; + + va_start(args, fmt); + n = vread_statdata(procfile, fmt, args); + va_end(args); + return n; +} + +static FILE* open_statfile(void) { + FILE *f; + + if ((f = fopen("/proc/stat", "r")) == NULL) { + static int haveWarned = 0; + if (!haveWarned) { + haveWarned = 1; + } + } + return f; +} + +static void +next_line(FILE *f) { + int c; + do { + c = fgetc(f); + } while (c != '\n' && c != EOF); +} + +/** + * Return the total number of ticks since the system was booted. + * If the usedTicks parameter is not NULL, it will be filled with + * the number of ticks spent on actual processes (user, system or + * nice processes) since system boot. Note that this is the total number + * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is + * n times the number of ticks that has passed in clock time. + * + * Returns a negative value if the reading of the ticks failed. + */ +static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) { + FILE* fh; + uint64_t userTicks, niceTicks, systemTicks, idleTicks; + uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0; + int logical_cpu = -1; + const int expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5; + int n; + + if ((fh = open_statfile()) == NULL) { + return OS_ERR; + } + if (-1 == which_logical_cpu) { + n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT, + &userTicks, &niceTicks, &systemTicks, &idleTicks, + &iowTicks, &irqTicks, &sirqTicks); + } else { + // Move to next line + next_line(fh); + + // find the line for requested cpu faster to just iterate linefeeds? + for (int i = 0; i < which_logical_cpu; i++) { + next_line(fh); + } + + n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT, + &logical_cpu, &userTicks, &niceTicks, + &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks); + } + + fclose(fh); + if (n < expected_assign_count || logical_cpu != which_logical_cpu) { +#ifdef DEBUG_LINUX_PROC_STAT + vm_fprintf(stderr, "[stat] read failed"); +#endif + return OS_ERR; + } + +#ifdef DEBUG_LINUX_PROC_STAT + vm_fprintf(stderr, "[stat] read " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n", + userTicks, niceTicks, systemTicks, idleTicks, + iowTicks, irqTicks, sirqTicks); +#endif + + pticks->used = userTicks + niceTicks; + pticks->usedKernel = systemTicks + irqTicks + sirqTicks; + pticks->total = userTicks + niceTicks + systemTicks + idleTicks + + iowTicks + irqTicks + sirqTicks; + + return OS_OK; +} + + +static int get_systemtype(void) { + static int procEntriesType = UNDETECTED; + DIR *taskDir; + + if (procEntriesType != UNDETECTED) { + return procEntriesType; + } + + // Check whether we have a task subdirectory + if ((taskDir = opendir("/proc/self/task")) == NULL) { + procEntriesType = UNDETECTABLE; + } else { + // The task subdirectory exists; we're on a Linux >= 2.6 system + closedir(taskDir); + procEntriesType = LINUX26_NPTL; + } + + return procEntriesType; +} + +/** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */ +static int read_ticks(const char* procfile, uint64_t* userTicks, uint64_t* systemTicks) { + return read_statdata(procfile, "%*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u " UINT64_FORMAT " " UINT64_FORMAT, + userTicks, systemTicks); +} + +/** + * Return the number of ticks spent in any of the processes belonging + * to the JVM on any CPU. + */ +static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) { + uint64_t userTicks; + uint64_t systemTicks; + + if (get_systemtype() != LINUX26_NPTL) { + return OS_ERR; + } + + if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) != 2) { + return OS_ERR; + } + + // get the total + if (get_total_ticks(-1, pticks) != OS_OK) { + return OS_ERR; + } + + pticks->used = userTicks; + pticks->usedKernel = systemTicks; + + return OS_OK; +} + +/** + * Return the load of the CPU as a double. 1.0 means the CPU process uses all + * available time for user or system processes, 0.0 means the CPU uses all time + * being idle. + * + * Returns a negative value if there is a problem in determining the CPU load. + */ +static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) { + uint64_t udiff, kdiff, tdiff; + CPUPerfTicks* pticks; + CPUPerfTicks tmp; + double user_load; + + *pkernelLoad = 0.0; + + if (target == CPU_LOAD_VM_ONLY) { + pticks = &counters->jvmTicks; + } else if (-1 == which_logical_cpu) { + pticks = &counters->cpus[counters->nProcs]; + } else { + pticks = &counters->cpus[which_logical_cpu]; + } + + tmp = *pticks; + + if (target == CPU_LOAD_VM_ONLY) { + if (get_jvm_ticks(pticks) != OS_OK) { + return -1.0; + } + } else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) { + return -1.0; + } + + // seems like we sometimes end up with less kernel ticks when + // reading /proc/self/stat a second time, timing issue between cpus? + if (pticks->usedKernel < tmp.usedKernel) { + kdiff = 0; + } else { + kdiff = pticks->usedKernel - tmp.usedKernel; + } + tdiff = pticks->total - tmp.total; + udiff = pticks->used - tmp.used; + + if (tdiff == 0) { + return 0.0; + } else if (tdiff < (udiff + kdiff)) { + tdiff = udiff + kdiff; + } + *pkernelLoad = (kdiff / (double)tdiff); + // BUG9044876, normalize return values to sane values + *pkernelLoad = MAX2(*pkernelLoad, 0.0); + *pkernelLoad = MIN2(*pkernelLoad, 1.0); + + user_load = (udiff / (double)tdiff); + user_load = MAX2(user_load, 0.0); + user_load = MIN2(user_load, 1.0); + + return user_load; +} + +static int SCANF_ARGS(1, 2) parse_stat(_SCANFMT_ const char* fmt, ...) { + FILE *f; + va_list args; + + va_start(args, fmt); + + if ((f = open_statfile()) == NULL) { + va_end(args); + return OS_ERR; + } + for (;;) { + char line[80]; + if (fgets(line, sizeof(line), f) != NULL) { + if (vsscanf(line, fmt, args) == 1) { + fclose(f); + va_end(args); + return OS_OK; + } + } else { + fclose(f); + va_end(args); + return OS_ERR; + } + } +} + +static int get_noof_context_switches(uint64_t* switches) { + return parse_stat("ctxt " UINT64_FORMAT "\n", switches); +} + +/** returns boot time in _seconds_ since epoch */ +static int get_boot_time(uint64_t* time) { + return parse_stat("btime " UINT64_FORMAT "\n", time); +} + +static int perf_context_switch_rate(double* rate) { + static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER; + static uint64_t lastTime; + static uint64_t lastSwitches; + static double lastRate; + + uint64_t lt = 0; + int res = 0; + + if (lastTime == 0) { + uint64_t tmp; + if (get_boot_time(&tmp) < 0) { + return OS_ERR; + } + lt = tmp * 1000; + } + + res = OS_OK; + + pthread_mutex_lock(&contextSwitchLock); + { + + uint64_t sw; + s8 t, d; + + if (lastTime == 0) { + lastTime = lt; + } + + t = os::javaTimeMillis(); + d = t - lastTime; + + if (d == 0) { + *rate = lastRate; + } else if (!get_noof_context_switches(&sw)) { + *rate = ( (double)(sw - lastSwitches) / d ) * 1000; + lastRate = *rate; + lastSwitches = sw; + lastTime = t; + } else { + *rate = 0; + res = OS_ERR; + } + if (*rate <= 0) { + *rate = 0; + lastRate = 0; + } + } + pthread_mutex_unlock(&contextSwitchLock); + + return res; +} + +class CPUPerformanceInterface::CPUPerformance : public CHeapObj { + friend class CPUPerformanceInterface; + private: + CPUPerfCounters _counters; + + int cpu_load(int which_logical_cpu, double* cpu_load); + int context_switch_rate(double* rate); + int cpu_load_total_process(double* cpu_load); + int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad); + + public: + CPUPerformance(); + bool initialize(); + ~CPUPerformance(); +}; + +CPUPerformanceInterface::CPUPerformance::CPUPerformance() { + _counters.nProcs = os::active_processor_count(); + _counters.cpus = NULL; +} + +bool CPUPerformanceInterface::CPUPerformance::initialize() { + size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks); + _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal); + if (NULL == _counters.cpus) { + return false; + } + memset(_counters.cpus, 0, tick_array_size); + + // For the CPU load total + get_total_ticks(-1, &_counters.cpus[_counters.nProcs]); + + // For each CPU + for (int i = 0; i < _counters.nProcs; i++) { + get_total_ticks(i, &_counters.cpus[i]); + } + // For JVM load + get_jvm_ticks(&_counters.jvmTicks); + + // initialize context switch system + // the double is only for init + double init_ctx_switch_rate; + perf_context_switch_rate(&init_ctx_switch_rate); + + return true; +} + +CPUPerformanceInterface::CPUPerformance::~CPUPerformance() { + if (_counters.cpus != NULL) { + FREE_C_HEAP_ARRAY(char, _counters.cpus, mtInternal); + } +} + +int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) { + double u, s; + u = get_cpu_load(which_logical_cpu, &_counters, &s, CPU_LOAD_GLOBAL); + if (u < 0) { + *cpu_load = 0.0; + return OS_ERR; + } + // Cap total systemload to 1.0 + *cpu_load = MIN2((u + s), 1.0); + return OS_OK; +} + +int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) { + double u, s; + u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY); + if (u < 0) { + *cpu_load = 0.0; + return OS_ERR; + } + *cpu_load = u + s; + return OS_OK; +} + +int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) { + double u, s, t; + + assert(pjvmUserLoad != NULL, "pjvmUserLoad not inited"); + assert(pjvmKernelLoad != NULL, "pjvmKernelLoad not inited"); + assert(psystemTotalLoad != NULL, "psystemTotalLoad not inited"); + + u = get_cpu_load(-1, &_counters, &s, CPU_LOAD_VM_ONLY); + if (u < 0) { + *pjvmUserLoad = 0.0; + *pjvmKernelLoad = 0.0; + *psystemTotalLoad = 0.0; + return OS_ERR; + } + + cpu_load(-1, &t); + // clamp at user+system and 1.0 + if (u + s > t) { + t = MIN2(u + s, 1.0); + } + + *pjvmUserLoad = u; + *pjvmKernelLoad = s; + *psystemTotalLoad = t; + + return OS_OK; +} + +int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) { + return perf_context_switch_rate(rate); +} + +CPUPerformanceInterface::CPUPerformanceInterface() { + _impl = NULL; +} + +bool CPUPerformanceInterface::initialize() { + _impl = new CPUPerformanceInterface::CPUPerformance(); + return NULL == _impl ? false : _impl->initialize(); +} + +CPUPerformanceInterface::~CPUPerformanceInterface() { + if (_impl != NULL) { + delete _impl; + } +} + +int CPUPerformanceInterface::cpu_load(int which_logical_cpu, double* cpu_load) const { + return _impl->cpu_load(which_logical_cpu, cpu_load); +} + +int CPUPerformanceInterface::cpu_load_total_process(double* cpu_load) const { + return _impl->cpu_load_total_process(cpu_load); +} + +int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) const { + return _impl->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotalLoad); +} + +int CPUPerformanceInterface::context_switch_rate(double* rate) const { + return _impl->context_switch_rate(rate); +} + +class SystemProcessInterface::SystemProcesses : public CHeapObj { + friend class SystemProcessInterface; + private: + class ProcessIterator : public CHeapObj { + friend class SystemProcessInterface::SystemProcesses; + private: + DIR* _dir; + struct dirent* _entry; + bool _valid; + char _exeName[PATH_MAX]; + char _exePath[PATH_MAX]; + + ProcessIterator(); + ~ProcessIterator(); + bool initialize(); + + bool is_valid() const { return _valid; } + bool is_valid_entry(struct dirent* entry) const; + bool is_dir(const char* name) const; + int fsize(const char* name, uint64_t& size) const; + + char* allocate_string(const char* str) const; + void get_exe_name(); + char* get_exe_path(); + char* get_cmdline(); + + int current(SystemProcess* process_info); + int next_process(); + }; + + ProcessIterator* _iterator; + SystemProcesses(); + bool initialize(); + ~SystemProcesses(); + + //information about system processes + int system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const; +}; + +bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_dir(const char* name) const { + struct stat mystat; + int ret_val = 0; + + ret_val = stat(name, &mystat); + if (ret_val < 0) { + return false; + } + ret_val = S_ISDIR(mystat.st_mode); + return ret_val > 0; +} + +int SystemProcessInterface::SystemProcesses::ProcessIterator::fsize(const char* name, uint64_t& size) const { + assert(name != NULL, "name pointer is NULL!"); + size = 0; + struct stat fbuf; + + if (stat(name, &fbuf) < 0) { + return OS_ERR; + } + size = fbuf.st_size; + return OS_OK; +} + +// if it has a numeric name, is a directory and has a 'stat' file in it +bool SystemProcessInterface::SystemProcesses::ProcessIterator::is_valid_entry(struct dirent* entry) const { + char buffer[PATH_MAX]; + uint64_t size = 0; + + if (atoi(entry->d_name) != 0) { + jio_snprintf(buffer, PATH_MAX, "/proc/%s", entry->d_name); + buffer[PATH_MAX - 1] = '\0'; + + if (is_dir(buffer)) { + jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", entry->d_name); + buffer[PATH_MAX - 1] = '\0'; + if (fsize(buffer, size) != OS_ERR) { + return true; + } + } + } + return false; +} + +// get exe-name from /proc//stat +void SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_name() { + FILE* fp; + char buffer[PATH_MAX]; + + jio_snprintf(buffer, PATH_MAX, "/proc/%s/stat", _entry->d_name); + buffer[PATH_MAX - 1] = '\0'; + if ((fp = fopen(buffer, "r")) != NULL) { + if (fgets(buffer, PATH_MAX, fp) != NULL) { + char* start, *end; + // exe-name is between the first pair of ( and ) + start = strchr(buffer, '('); + if (start != NULL && start[1] != '\0') { + start++; + end = strrchr(start, ')'); + if (end != NULL) { + size_t len; + len = MIN2(end - start, sizeof(_exeName) - 1); + memcpy(_exeName, start, len); + _exeName[len] = '\0'; + } + } + } + fclose(fp); + } +} + +// get command line from /proc//cmdline +char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_cmdline() { + FILE* fp; + char buffer[PATH_MAX]; + char* cmdline = NULL; + + jio_snprintf(buffer, PATH_MAX, "/proc/%s/cmdline", _entry->d_name); + buffer[PATH_MAX - 1] = '\0'; + if ((fp = fopen(buffer, "r")) != NULL) { + size_t size = 0; + char dummy; + + // find out how long the file is (stat always returns 0) + while (fread(&dummy, 1, 1, fp) == 1) { + size++; + } + if (size > 0) { + cmdline = NEW_C_HEAP_ARRAY(char, size + 1, mtInternal); + if (cmdline != NULL) { + cmdline[0] = '\0'; + if (fseek(fp, 0, SEEK_SET) == 0) { + if (fread(cmdline, 1, size, fp) == size) { + // the file has the arguments separated by '\0', + // so we translate '\0' to ' ' + for (size_t i = 0; i < size; i++) { + if (cmdline[i] == '\0') { + cmdline[i] = ' '; + } + } + cmdline[size] = '\0'; + } + } + } + } + fclose(fp); + } + return cmdline; +} + +// get full path to exe from /proc//exe symlink +char* SystemProcessInterface::SystemProcesses::ProcessIterator::get_exe_path() { + char buffer[PATH_MAX]; + + jio_snprintf(buffer, PATH_MAX, "/proc/%s/exe", _entry->d_name); + buffer[PATH_MAX - 1] = '\0'; + return realpath(buffer, _exePath); +} + +char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const { + if (str != NULL) { + size_t len = strlen(str); + char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal); + strncpy(tmp, str, len); + tmp[len] = '\0'; + return tmp; + } + return NULL; +} + +int SystemProcessInterface::SystemProcesses::ProcessIterator::current(SystemProcess* process_info) { + if (!is_valid()) { + return OS_ERR; + } + + process_info->set_pid(atoi(_entry->d_name)); + + get_exe_name(); + process_info->set_name(allocate_string(_exeName)); + + if (get_exe_path() != NULL) { + process_info->set_path(allocate_string(_exePath)); + } + + char* cmdline = NULL; + cmdline = get_cmdline(); + if (cmdline != NULL) { + process_info->set_command_line(allocate_string(cmdline)); + FREE_C_HEAP_ARRAY(char, cmdline, mtInternal); + } + + return OS_OK; +} + +int SystemProcessInterface::SystemProcesses::ProcessIterator::next_process() { + struct dirent* entry; + + if (!is_valid()) { + return OS_ERR; + } + + do { + entry = os::readdir(_dir, _entry); + if (entry == NULL) { + // error + _valid = false; + return OS_ERR; + } + if (_entry == NULL) { + // reached end + _valid = false; + return OS_ERR; + } + } while(!is_valid_entry(_entry)); + + _valid = true; + return OS_OK; +} + +SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() { + _dir = NULL; + _entry = NULL; + _valid = false; +} + +bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() { + _dir = opendir("/proc"); + _entry = (struct dirent*)NEW_C_HEAP_ARRAY(char, sizeof(struct dirent) + NAME_MAX + 1, mtInternal); + if (NULL == _entry) { + return false; + } + _valid = true; + next_process(); + + return true; +} + +SystemProcessInterface::SystemProcesses::ProcessIterator::~ProcessIterator() { + if (_entry != NULL) { + FREE_C_HEAP_ARRAY(char, _entry, mtInternal); + } + if (_dir != NULL) { + closedir(_dir); + } +} + +SystemProcessInterface::SystemProcesses::SystemProcesses() { + _iterator = NULL; +} + +bool SystemProcessInterface::SystemProcesses::initialize() { + _iterator = new SystemProcessInterface::SystemProcesses::ProcessIterator(); + return NULL == _iterator ? false : _iterator->initialize(); +} + +SystemProcessInterface::SystemProcesses::~SystemProcesses() { + if (_iterator != NULL) { + delete _iterator; + } +} + +int SystemProcessInterface::SystemProcesses::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) const { + assert(system_processes != NULL, "system_processes pointer is NULL!"); + assert(no_of_sys_processes != NULL, "system_processes counter pointers is NULL!"); + assert(_iterator != NULL, "iterator is NULL!"); + + // initialize pointers + *no_of_sys_processes = 0; + *system_processes = NULL; + + while (_iterator->is_valid()) { + SystemProcess* tmp = new SystemProcess(); + _iterator->current(tmp); + + //if already existing head + if (*system_processes != NULL) { + //move "first to second" + tmp->set_next(*system_processes); + } + // new head + *system_processes = tmp; + // increment + (*no_of_sys_processes)++; + // step forward + _iterator->next_process(); + } + return OS_OK; +} + +int SystemProcessInterface::system_processes(SystemProcess** system_procs, int* no_of_sys_processes) const { + return _impl->system_processes(system_procs, no_of_sys_processes); +} + +SystemProcessInterface::SystemProcessInterface() { + _impl = NULL; +} + +bool SystemProcessInterface::initialize() { + _impl = new SystemProcessInterface::SystemProcesses(); + return NULL == _impl ? false : _impl->initialize(); +} + +SystemProcessInterface::~SystemProcessInterface() { + if (_impl != NULL) { + delete _impl; + } +} + +CPUInformationInterface::CPUInformationInterface() { + _cpu_info = NULL; +} + +bool CPUInformationInterface::initialize() { + _cpu_info = new CPUInformation(); + if (NULL == _cpu_info) { + return false; + } + _cpu_info->set_number_of_hardware_threads(VM_Version_Ext::number_of_threads()); + _cpu_info->set_number_of_cores(VM_Version_Ext::number_of_cores()); + _cpu_info->set_number_of_sockets(VM_Version_Ext::number_of_sockets()); + _cpu_info->set_cpu_name(VM_Version_Ext::cpu_name()); + _cpu_info->set_cpu_description(VM_Version_Ext::cpu_description()); + + return true; +} + +CPUInformationInterface::~CPUInformationInterface() { + if (_cpu_info != NULL) { + if (_cpu_info->cpu_name() != NULL) { + const char* cpu_name = _cpu_info->cpu_name(); + FREE_C_HEAP_ARRAY(char, cpu_name, mtInternal); + _cpu_info->set_cpu_name(NULL); + } + if (_cpu_info->cpu_description() != NULL) { + const char* cpu_desc = _cpu_info->cpu_description(); + FREE_C_HEAP_ARRAY(char, cpu_desc, mtInternal); + _cpu_info->set_cpu_description(NULL); + } + delete _cpu_info; + } +} + +int CPUInformationInterface::cpu_information(CPUInformation& cpu_info) { + if (_cpu_info == NULL) { + return OS_ERR; + } + + cpu_info = *_cpu_info; // shallow copy assignment + return OS_OK; +} diff --git a/src/os/posix/vm/os_posix.cpp b/src/os/posix/vm/os_posix.cpp index 534b19258682a9912f5fd97ed17480c37041deb3..27e3d7e14ca5ab667d313274eaccd31baa4e7b84 100644 --- a/src/os/posix/vm/os_posix.cpp +++ b/src/os/posix/vm/os_posix.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -843,6 +843,68 @@ void os::Posix::print_siginfo_brief(outputStream* os, const siginfo_t* si) { } } +Thread* os::ThreadCrashProtection::_protected_thread = NULL; +os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; +volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; + +os::ThreadCrashProtection::ThreadCrashProtection() { +} + +/* + * See the caveats for this class in os_posix.hpp + * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this + * method and returns false. If none of the signals are raised, returns true. + * The callback is supposed to provide the method that should be protected. + */ +bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { + sigset_t saved_sig_mask; + + Thread::muxAcquire(&_crash_mux, "CrashProtection"); + + _protected_thread = ThreadLocalStorage::thread(); + assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); + + // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask + // since on at least some systems (OS X) siglongjmp will restore the mask + // for the process, not the thread + pthread_sigmask(0, NULL, &saved_sig_mask); + if (sigsetjmp(_jmpbuf, 0) == 0) { + // make sure we can see in the signal handler that we have crash protection + // installed + _crash_protection = this; + cb.call(); + // and clear the crash protection + _crash_protection = NULL; + _protected_thread = NULL; + Thread::muxRelease(&_crash_mux); + return true; + } + // this happens when we siglongjmp() back + pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL); + _crash_protection = NULL; + _protected_thread = NULL; + Thread::muxRelease(&_crash_mux); + return false; +} + +void os::ThreadCrashProtection::restore() { + assert(_crash_protection != NULL, "must have crash protection"); + siglongjmp(_jmpbuf, 1); +} + +void os::ThreadCrashProtection::check_crash_protection(int sig, + Thread* thread) { + + if (thread != NULL && + thread == _protected_thread && + _crash_protection != NULL) { + + if (sig == SIGSEGV || sig == SIGBUS) { + _crash_protection->restore(); + } + } +} + os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); } diff --git a/src/os/posix/vm/os_posix.hpp b/src/os/posix/vm/os_posix.hpp index 5313f41466056e9c93f77e3865b970075ae88660..c1047914c8e498ec7dc4d7535348793243deb357 100644 --- a/src/os/posix/vm/os_posix.hpp +++ b/src/os/posix/vm/os_posix.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,33 @@ public: }; +/* + * Crash protection utility used by JFR. Wrap the callback + * with a sigsetjmp and in case of a SIGSEGV/SIGBUS we siglongjmp + * back. + * To be able to use this - don't take locks, don't rely on destructors, + * don't make OS library calls, don't allocate memory, don't print, + * don't call code that could leave the heap / memory in an inconsistent state, + * or anything else where we are not in control if we suddenly jump out. + */ +class ThreadCrashProtection : public StackObj { +public: + static bool is_crash_protected(Thread* thr) { + return _crash_protection != NULL && _protected_thread == thr; + } + + ThreadCrashProtection(); + bool call(os::CrashProtectionCallback& cb); + + static void check_crash_protection(int signal, Thread* thread); +private: + static Thread* _protected_thread; + static ThreadCrashProtection* _crash_protection; + static volatile intptr_t _crash_mux; + void restore(); + sigjmp_buf _jmpbuf; +}; + /* * Crash protection for the watcher thread. Wrap the callback * with a sigsetjmp and in case of a SIGSEGV/SIGBUS we siglongjmp diff --git a/src/os/posix/vm/semaphore_posix.cpp b/src/os/posix/vm/semaphore_posix.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8f4ea3c5bb220325cfa3a58841e5eb91e33f5546 --- /dev/null +++ b/src/os/posix/vm/semaphore_posix.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled/precompiled.hpp" +#ifndef __APPLE__ +#include "runtime/os.hpp" +// POSIX unamed semaphores are not supported on OS X. +#include "semaphore_posix.hpp" +#include + +#define check_with_errno(check_type, cond, msg) \ + do { \ + int err = errno; \ + check_type(cond, msg); \ +} while (false) + +#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg) +#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg) + +PosixSemaphore::PosixSemaphore(uint value) { + int ret = sem_init(&_semaphore, 0, value); + + guarantee_with_errno(ret == 0, "Failed to initialize semaphore"); +} + +PosixSemaphore::~PosixSemaphore() { + sem_destroy(&_semaphore); +} + +void PosixSemaphore::signal(uint count) { + for (uint i = 0; i < count; i++) { + int ret = sem_post(&_semaphore); + + assert_with_errno(ret == 0, "sem_post failed"); + } +} + +void PosixSemaphore::wait() { + int ret; + + do { + ret = sem_wait(&_semaphore); + } while (ret != 0 && errno == EINTR); + + assert_with_errno(ret == 0, "sem_wait failed"); +} + +bool PosixSemaphore::trywait() { + int ret; + + do { + ret = sem_trywait(&_semaphore); + } while (ret != 0 && errno == EINTR); + + assert_with_errno(ret == 0 || errno == EAGAIN, "trywait failed"); + + return ret == 0; +} + +bool PosixSemaphore::timedwait(struct timespec ts) { + while (true) { + int result = sem_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIMEDOUT) { + return false; + } else { + assert_with_errno(false, "timedwait failed"); + return false; + } + } +} +#endif // __APPLE__ + diff --git a/src/os/posix/vm/semaphore_posix.hpp b/src/os/posix/vm/semaphore_posix.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0ca87f598d222415d21f93ab699807cb736936a0 --- /dev/null +++ b/src/os/posix/vm/semaphore_posix.hpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_POSIX_VM_SEMAPHORE_POSIX_HPP +#define OS_POSIX_VM_SEMAPHORE_POSIX_HPP + +#include "memory/allocation.hpp" + +#include + +class PosixSemaphore : public CHeapObj { + sem_t _semaphore; + + // Prevent copying and assignment. + PosixSemaphore(const PosixSemaphore&); + PosixSemaphore& operator=(const PosixSemaphore&); + + public: + PosixSemaphore(uint value = 0); + ~PosixSemaphore(); + + void signal(uint count = 1); + + void wait(); + + bool trywait(); + bool timedwait(struct timespec ts); +}; + +typedef PosixSemaphore SemaphoreImpl; + +#endif // OS_POSIX_VM_SEMAPHORE_POSIX_HPP diff --git a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp index 2d0d51973444ff5c65c7ae4ae3371517d8de2f40..433f37f032e510bf36171886a73f1040642b6fa7 100644 --- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp +++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,6 +102,10 @@ inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* #ifdef AMD64 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } +inline void Atomic::store (julong store_value, julong* dest) { + assert(EnableJFR, "sanity check"); + *dest = store_value; +} inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { intptr_t addend = add_value; diff --git a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp index 10240a0b1c2cecdecc451be685db64d38eb66ebb..6556c897c4b52db5045b071632bdfc23514c3d6b 100644 --- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp +++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,8 +73,21 @@ inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } +inline bool OrderAccess::load_acquire(const volatile bool* p) { + assert(EnableJFR, "sanity check"); + return *p; +} +inline julong OrderAccess::load_acquire(const volatile julong* p) { + assert(EnableJFR, "sanity check"); + return Atomic::load((volatile jlong*)p); +} inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } +inline uintptr_t OrderAccess::load_ptr_acquire(const volatile uintptr_t* p) { + assert(EnableJFR, "sanity check"); + return *p; +} + inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } diff --git a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp index 1a7375afc7b7486085f7392859a77eb735690aba..014525737ba15cc791878f119bac4fbc11ecc801 100644 --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -224,6 +224,8 @@ JVM_handle_linux_signal(int sig, // (no destructors can be run) os::WatcherThreadCrashProtection::check_crash_protection(sig, t); + os::ThreadCrashProtection::check_crash_protection(sig, t); + SignalHandlerMark shm(t); // Note: it's not uncommon that JNI code uses signal/sigset to install diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp index ce5918e5d280d047e063f7ddf7b59846016746e0..aab4936e0d4b7f0d81802f85c7f1656448df714c 100644 --- a/src/share/vm/c1/c1_GraphBuilder.cpp +++ b/src/share/vm/c1/c1_GraphBuilder.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3461,10 +3461,11 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { break; #ifdef TRACE_HAVE_INTRINSICS - case vmIntrinsics::_classID: - case vmIntrinsics::_threadID: - preserves_state = true; - cantrap = true; + case vmIntrinsics::_getClassId: + cantrap = false; + break; + + case vmIntrinsics::_getEventWriter: break; case vmIntrinsics::_counterTime: @@ -4411,6 +4412,18 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes log->inline_fail("reason unknown"); } } +#if INCLUDE_TRACE + EventCompilerInlining event; + if (event.should_commit()) { + event.set_compileId(compilation()->env()->task()->compile_id()); + event.set_message(msg); + event.set_succeeded(success); + event.set_bci(bci()); + event.set_caller(method()->get_Method()); + event.set_callee(callee->to_trace_struct()); + event.commit(); + } +#endif // INCLUDE_TRACE if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) { return; diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp index 21cd08da9e9c1969af0ea18070bc8813d1c1ae58..d5ede3be136ebf1c94c5d2ab707246b5ead525ec 100644 --- a/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/src/share/vm/c1/c1_LIRGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,9 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/g1/heapRegion.hpp" #endif // INCLUDE_ALL_GCS +#ifdef TRACE_HAVE_INTRINSICS +#include "trace/traceMacros.hpp" +#endif #ifdef ASSERT #define __ gen()->lir(__FILE__, __LINE__)-> @@ -3069,39 +3072,47 @@ void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intri } #ifdef TRACE_HAVE_INTRINSICS -void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) { - LIR_Opr thread = getThreadPointer(); - LIR_Opr osthread = new_pointer_register(); - __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread); - size_t thread_id_size = OSThread::thread_id_size(); - if (thread_id_size == (size_t) BytesPerLong) { - LIR_Opr id = new_register(T_LONG); - __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id); - __ convert(Bytecodes::_l2i, id, rlock_result(x)); - } else if (thread_id_size == (size_t) BytesPerInt) { - __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x)); - } else { - ShouldNotReachHere(); - } +void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { + CodeEmitInfo* info = state_for(x); + CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check + + assert(info != NULL, "must have info"); + LIRItem arg(x->argument_at(0), this); + + arg.load_item(); + LIR_Opr klass = new_register(T_METADATA); + __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info); + LIR_Opr id = new_register(T_LONG); + ByteSize offset = TRACE_KLASS_TRACE_ID_OFFSET; + LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG); + + __ move(trace_id_addr, id); + __ logical_or(id, LIR_OprFact::longConst(0x01l), id); + __ store(id, trace_id_addr); + +#ifdef TRACE_ID_META_BITS + __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id); +#endif +#ifdef TRACE_ID_CLASS_SHIFT + __ unsigned_shift_right(id, TRACE_ID_CLASS_SHIFT, id); +#endif + + __ move(id, rlock_result(x)); } -void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { - CodeEmitInfo* info = state_for(x); - CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check - BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG); - assert(info != NULL, "must have info"); - LIRItem arg(x->argument_at(1), this); - arg.load_item(); - LIR_Opr klass = new_pointer_register(); - __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info); - LIR_Opr id = new_register(T_LONG); - ByteSize offset = TRACE_ID_OFFSET; - LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG); - __ move(trace_id_addr, id); - __ logical_or(id, LIR_OprFact::longConst(0x01l), id); - __ store(id, trace_id_addr); - __ logical_and(id, LIR_OprFact::longConst(~0x3l), id); - __ move(id, rlock_result(x)); +void LIRGenerator::do_getEventWriter(Intrinsic* x) { + LabelObj* L_end = new LabelObj(); + + LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(), + in_bytes(TRACE_THREAD_DATA_WRITER_OFFSET), + T_OBJECT); + LIR_Opr result = rlock_result(x); + __ move_wide(jobj_addr, result); + __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_OBJECT, L_end->label()); + __ move_wide(new LIR_Address(result, T_OBJECT), result); + + __ branch_destination(L_end->label()); } #endif @@ -3116,8 +3127,16 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) { } #ifdef TRACE_HAVE_INTRINSICS - case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break; - case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break; + case vmIntrinsics::_getClassId: + if (EnableJFR) { + do_ClassIDIntrinsic(x); + } + break; + case vmIntrinsics::_getEventWriter: + if (EnableJFR) { + do_getEventWriter(x); + } + break; case vmIntrinsics::_counterTime: do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x); break; diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp index 8312a4bb6f61ff9316c01b112bbe93164c8ddebc..f7362575c86f5ada62b888d02ef7d58d981f73b1 100644 --- a/src/share/vm/c1/c1_LIRGenerator.hpp +++ b/src/share/vm/c1/c1_LIRGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -437,8 +437,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x); #ifdef TRACE_HAVE_INTRINSICS - void do_ThreadIDIntrinsic(Intrinsic* x); void do_ClassIDIntrinsic(Intrinsic* x); + void do_getEventWriter(Intrinsic* x); #endif ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp index 6646e143029752ed3fc05dddaeaa962fd32b53b9..99a9c4208e0a77e4dc18959c9b23705786b9abe8 100644 --- a/src/share/vm/ci/ciMethod.cpp +++ b/src/share/vm/ci/ciMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1467,3 +1467,13 @@ void ciMethod::print_impl(outputStream* st) { st->print(" loaded=false"); } } + +#if INCLUDE_TRACE +TraceStructCalleeMethod ciMethod::to_trace_struct() const { + TraceStructCalleeMethod result; + result.set_type(holder()->name()->as_utf8()); + result.set_name(name()->as_utf8()); + result.set_descriptor(signature()->as_symbol()->as_utf8()); + return result; +} +#endif diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp index 5560e6da813c2a8d50774c05a32f5502743df48b..95f6a7c1302e7fb994cd9fc4e1d8fa141c7e0a07 100644 --- a/src/share/vm/ci/ciMethod.hpp +++ b/src/share/vm/ci/ciMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "compiler/methodLiveness.hpp" #include "prims/methodHandles.hpp" #include "utilities/bitMap.hpp" +#include "trace/tracing.hpp" class ciMethodBlocks; class MethodLiveness; @@ -93,12 +94,6 @@ class ciMethod : public ciMetadata { ciMethod(methodHandle h_m, ciInstanceKlass* holder); ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor); - Method* get_Method() const { - Method* m = (Method*)_metadata; - assert(m != NULL, "illegal use of unloaded method"); - return m; - } - oop loader() const { return _holder->loader(); } const char* type_string() { return "ciMethod"; } @@ -156,6 +151,11 @@ class ciMethod : public ciMetadata { } } + Method* get_Method() const { + Method* m = (Method*)_metadata; + assert(m != NULL, "illegal use of unloaded method"); + return m; + } // Method code and related information. address code() { if (_code == NULL) load_code(); return _code; } @@ -347,6 +347,10 @@ class ciMethod : public ciMetadata { // Print the name of this method in various incarnations. void print_name(outputStream* st = tty); void print_short_name(outputStream* st = tty); + +#if INCLUDE_TRACE + TraceStructCalleeMethod to_trace_struct() const; +#endif }; #endif // SHARE_VM_CI_CIMETHOD_HPP diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp index 07d07e4f2cb69ddd1d0968cde4b4662dd7c41cf3..e23f8002f8f9660988025298dddbd910c241ec37 100644 --- a/src/share/vm/classfile/classFileParser.cpp +++ b/src/share/vm/classfile/classFileParser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,7 @@ #include "runtime/timer.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" +#include "trace/traceMacros.hpp" #include "utilities/array.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" @@ -3885,6 +3886,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, // This class and superclass u2 this_class_index = cfs->get_u2_fast(); + _this_class_index = this_class_index; //used by jfr check_property( valid_cp_range(this_class_index, cp_size) && cp->tag_at(this_class_index).is_unresolved_klass(), @@ -4212,6 +4214,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, ClassLoadingService::notify_class_loaded(InstanceKlass::cast(this_klass()), false /* not shared class */); + TRACE_INIT_ID(InstanceKlass::cast(this_klass())); if (TraceClassLoading) { ResourceMark rm; @@ -5273,3 +5276,19 @@ char* ClassFileParser::skip_over_field_signature(char* signature, } return NULL; } + +const ClassFileStream* ClassFileParser::clone_stream() const { + assert(_stream != NULL, "invariant"); + return _stream->clone(); +} + +void ClassFileParser::set_klass_to_deallocate(InstanceKlass* klass) { + +#ifdef ASSERT + if (klass != NULL) { + assert(NULL == _klass, "leaking?"); + } +#endif + + _klass = klass; +} diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp index 73ccdeb724d0b0fad8c8418a0577b997bf62972f..c216af16b022c1c4e3a8809173d9df7f17426235 100644 --- a/src/share/vm/classfile/classFileParser.hpp +++ b/src/share/vm/classfile/classFileParser.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { bool _relax_verify; u2 _major_version; u2 _minor_version; + u2 _this_class_index; Symbol* _class_name; ClassLoaderData* _loader_data; KlassHandle _host_klass; @@ -90,6 +91,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { void create_combined_annotations(TRAPS); void init_parsed_class_attributes(ClassLoaderData* loader_data) { + _this_class_index = 0; _loader_data = loader_data; _synthetic_flag = false; _sourcefile_index = 0; @@ -486,6 +488,10 @@ PRAGMA_DIAG_POP bool verify, TRAPS); + u2 this_class_index() const { return _this_class_index; } + const ClassFileStream* clone_stream() const; + void set_klass_to_deallocate(InstanceKlass* klass); + // Verifier checks static void check_super_class_access(instanceKlassHandle this_klass, TRAPS); static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS); diff --git a/src/share/vm/classfile/classFileStream.cpp b/src/share/vm/classfile/classFileStream.cpp index 4cd811f88b37f58501888de65608e46e8b8b8031..5fcc5bb9b4cd6c9e5a8c603fa56ad9172d6c2ca8 100644 --- a/src/share/vm/classfile/classFileStream.cpp +++ b/src/share/vm/classfile/classFileStream.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,33 @@ ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) { _need_verify = false; } +const u1* ClassFileStream::clone_buffer() const { + u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length()); + memcpy(new_buffer_start, _buffer_start, length()); + return new_buffer_start; +} + +const char* const ClassFileStream::clone_source() const { + const char* const src = source(); + char* source_copy = NULL; + if (src != NULL) { + size_t source_len = strlen(src); + source_copy = NEW_RESOURCE_ARRAY(char, source_len + 1); + strncpy(source_copy, src, source_len + 1); + } + return source_copy; +} + +// Caller responsible for ResourceMark +// clone stream with a rewound position +const ClassFileStream* ClassFileStream::clone() const { + const u1* const new_buffer_start = clone_buffer(); + return new ClassFileStream(const_cast(new_buffer_start), + length(), + clone_source()/*, + need_verify()*/); +} + u1 ClassFileStream::get_u1(TRAPS) { if (_need_verify) { guarantee_more(1, CHECK_0); diff --git a/src/share/vm/classfile/classFileStream.hpp b/src/share/vm/classfile/classFileStream.hpp index cf69c32eba7f950e22dcfe4e22530a3e1beca878..459ce437568cab962651b96c4ad08c40cf31020a 100644 --- a/src/share/vm/classfile/classFileStream.hpp +++ b/src/share/vm/classfile/classFileStream.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,15 +57,24 @@ class ClassFileStream: public ResourceObj { bool _need_verify; // True if verification is on for the class file void truncated_file_error(TRAPS); + protected: + const u1* clone_buffer() const; + const char* const clone_source() const; public: // Constructor ClassFileStream(u1* buffer, int length, const char* source); + virtual const ClassFileStream* clone() const; + // Buffer access u1* buffer() const { return _buffer_start; } int length() const { return _buffer_end - _buffer_start; } u1* current() const { return _current; } void set_current(u1* pos) { _current = pos; } + // for relative positioning + juint current_offset() const { + return (juint)(_current - _buffer_start); + } const char* source() const { return _source; } void set_verify(bool flag) { _need_verify = flag; } diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp index a33263b0f42127fcfbe63813336db34327b9d053..fbcff87272abf634c71b1d2525f3194b38e2823a 100644 --- a/src/share/vm/classfile/classLoader.cpp +++ b/src/share/vm/classfile/classLoader.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1127,6 +1127,8 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { parsed_name, context.should_verify(classpath_index), THREAD); + + TRACE_KLASS_CREATION(result, parser, THREAD); if (HAS_PENDING_EXCEPTION) { ResourceMark rm; if (DumpSharedSpaces) { diff --git a/src/share/vm/classfile/classLoaderData.cpp b/src/share/vm/classfile/classLoaderData.cpp index b4eb8f542d9b4e8de1e89f03b89604bf02a67223..4dc795ec382d78bb7ca57baf452f49cd7f6688a0 100644 --- a/src/share/vm/classfile/classLoaderData.cpp +++ b/src/share/vm/classfile/classLoaderData.cpp @@ -82,6 +82,7 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { // empty + TRACE_INIT_ID(this); } void ClassLoaderData::init_dependencies(TRAPS) { @@ -646,6 +647,20 @@ void ClassLoaderDataGraph::cld_do(CLDClosure* cl) { } } +void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) { + // this method is only used by jfr now, if you need to use this method in another case, + // this check should be removed. + assert(EnableJFR && FlightRecorder, "just check"); + + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + // Only walk the head until any clds not purged from prior unloading + // (CMS doesn't purge right away). + for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) { + assert(cld->is_unloading(), "invariant"); + cl->do_cld(cld); + } +} + void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) { CLDClosure* closure = cld->keep_alive() ? strong : weak; @@ -980,9 +995,7 @@ void ClassLoaderDataGraph::class_unload_event(Klass* const k) { EventClassUnload event(UNTIMED); event.set_endtime(_class_unload_time); event.set_unloadedClass(k); - oop defining_class_loader = k->class_loader(); - event.set_definingClassLoader(defining_class_loader != NULL ? - defining_class_loader->klass() : (Klass*)NULL); + event.set_definingClassLoader(k->class_loader_data()); event.commit(); } diff --git a/src/share/vm/classfile/classLoaderData.hpp b/src/share/vm/classfile/classLoaderData.hpp index 8083b70c5ada909618db71b8bb669fe97b03541c..09f5329283e55e0f99a3264979545fd9e7e68a7a 100644 --- a/src/share/vm/classfile/classLoaderData.hpp +++ b/src/share/vm/classfile/classLoaderData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ #if INCLUDE_TRACE #include "utilities/ticks.hpp" #endif +#include "jfr/utilities/jfrLog.hpp" // // A class loader represents a linkset. Conceptually, a linkset identifies @@ -82,6 +83,7 @@ class ClassLoaderDataGraph : public AllStatic { static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); // cld do static void cld_do(CLDClosure* cl); + static void cld_unloading_do(CLDClosure* cl); static void roots_cld_do(CLDClosure* strong, CLDClosure* weak); static void keep_alive_cld_do(CLDClosure* cl); static void always_strong_cld_do(CLDClosure* cl); @@ -128,6 +130,8 @@ class ClassLoaderDataGraph : public AllStatic { class ClassLoaderData : public CHeapObj { friend class VMStructs; + friend class CLDClaimContext; + friend class CLDClaimStateClosure; private: class Dependencies VALUE_OBJ_CLASS_SPEC { objArrayOop _list_head; @@ -213,6 +217,8 @@ class ClassLoaderData : public CHeapObj { static Metaspace* _ro_metaspace; static Metaspace* _rw_metaspace; + TRACE_DEFINE_TRACE_ID_FIELD; + void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } @@ -325,6 +331,8 @@ class ClassLoaderData : public CHeapObj { Metaspace* ro_metaspace(); Metaspace* rw_metaspace(); void initialize_shared_metaspaces(); + + TRACE_DEFINE_TRACE_ID_METHODS; }; // An iterator that distributes Klasses to parallel worker threads. diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp index 2345d7451eb8db07ed08a3b41e9e201f4030b086..ae1a60f250f16a7f749039424f07e2c34d22090e 100644 --- a/src/share/vm/classfile/javaClasses.cpp +++ b/src/share/vm/classfile/javaClasses.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1047,7 +1047,7 @@ void java_lang_Thread::set_thread_status(oop java_thread, // Read thread status value from threadStatus field in java.lang.Thread java class. java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) { - assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() || + assert((EnableJFR && Threads_lock->owned_by_self()) || Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() || JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm"); // The threadStatus is only present starting in 1.5 diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp index da2717d3625d94d05751c040440f34fe101cd2af..90ab8cbc51cf2d020a4271ed1ed1e094575388dd 100644 --- a/src/share/vm/classfile/systemDictionary.cpp +++ b/src/share/vm/classfile/systemDictionary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1003,15 +1003,16 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name, // // Note: "name" is updated. - instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, - loader_data, - protection_domain, - host_klass, - cp_patches, - parsed_name, - true, - THREAD); - + ClassFileParser parser(st); + instanceKlassHandle k = parser.parseClassFile(class_name, + loader_data, + protection_domain, + host_klass, + cp_patches, + parsed_name, + true, + THREAD); + TRACE_KLASS_CREATION(k, parser, THREAD); if (host_klass.not_null() && k.not_null()) { assert(EnableInvokeDynamic, ""); @@ -1085,12 +1086,14 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name, // // Note: "name" is updated. - instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, - loader_data, - protection_domain, - parsed_name, - verify, - THREAD); + ClassFileParser parser(st); + instanceKlassHandle k = parser.parseClassFile(class_name, + loader_data, + protection_domain, + parsed_name, + verify, + THREAD); + TRACE_KLASS_CREATION(k, parser, THREAD); const char* pkg = "java/"; size_t pkglen = strlen(pkg); @@ -1385,6 +1388,19 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha } } +static void class_define_event(InstanceKlass* k, + const ClassLoaderData* def_cld) { +#if INCLUDE_TRACE + EventClassDefine event; + if (event.should_commit()) { + ResourceMark m; + event.set_definedClass(k); + event.set_definingClassLoader(def_cld); + event.commit(); + } +#endif // INCLUDE_TRACE +} + void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ClassLoaderData* loader_data = k->class_loader_data(); @@ -1455,6 +1471,7 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { } + class_define_event(k(), loader_data); } // Support parallel classloading @@ -2692,16 +2709,11 @@ void SystemDictionary::post_class_load_event(const Ticks& start_time, instanceKlassHandle k, Handle initiating_loader) { #if INCLUDE_TRACE - EventClassLoad event(UNTIMED); + EventClassLoad event; if (event.should_commit()) { - event.set_starttime(start_time); event.set_loadedClass(k()); - oop defining_class_loader = k->class_loader(); - event.set_definingClassLoader(defining_class_loader != NULL ? - defining_class_loader->klass() : (Klass*)NULL); - oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader(); - event.set_initiatingClassLoader(class_loader != NULL ? - class_loader->klass() : (Klass*)NULL); + event.set_definingClassLoader(k->class_loader_data()); + event.set_initiatingClassLoader(ClassLoaderData::class_loader_data_or_null(initiating_loader())); event.commit(); } #endif // INCLUDE_TRACE diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp index 57a9d669b7ebbe6c0e8111fb4ec5fd5221096a67..987e246fa196b876e6de74cbe22a2a211adb9b7c 100644 --- a/src/share/vm/classfile/systemDictionary.hpp +++ b/src/share/vm/classfile/systemDictionary.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,6 @@ class LoaderConstraintTable; template class HashtableBucket; class ResolutionErrorTable; class SymbolPropertyTable; -class Ticks; // Certain classes are preloaded, such as java.lang.Object and java.lang.String. // They are all "well-known", in the sense that no class loader is allowed diff --git a/src/share/vm/code/codeBlob.cpp b/src/share/vm/code/codeBlob.cpp index 87ae60f81ec44ff834d272332e07b358d1d91292..263c8eeea72d6d747947e9bf18975fc24e0aaab3 100644 --- a/src/share/vm/code/codeBlob.cpp +++ b/src/share/vm/code/codeBlob.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ #include "c1/c1_Runtime1.hpp" #endif -unsigned int align_code_offset(int offset) { +unsigned int CodeBlob::align_code_offset(int offset) { // align the size to CodeEntryAlignment return ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) diff --git a/src/share/vm/code/codeBlob.hpp b/src/share/vm/code/codeBlob.hpp index 4f83fac6a2601cc66d75104d2b5bafe01e7c4df1..af9ec07f84a30fe9fdbf2da1e764e3a4fd8d1bdb 100644 --- a/src/share/vm/code/codeBlob.hpp +++ b/src/share/vm/code/codeBlob.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,15 @@ #include "runtime/frame.hpp" #include "runtime/handles.hpp" +// CodeBlob Types, used for jfr +// Used in the CodeCache to assign CodeBlobs to different CodeHeaps +struct CodeBlobType { + enum { + All = 0, // All types (No code cache segmentation) + NumTypes = 1 // Number of CodeBlobTypes + }; +}; + // CodeBlob - superclass for all entries in the CodeCache. // // Suptypes are: @@ -71,6 +80,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC { public: // Returns the space needed for CodeBlob static unsigned int allocation_size(CodeBuffer* cb, int header_size); + static unsigned int align_code_offset(int offset); // Creation // a) simple CodeBlob @@ -205,6 +215,7 @@ class BufferBlob: public CodeBlob { friend class AdapterBlob; friend class VtableBlob; friend class MethodHandlesAdapterBlob; + friend class WhiteBox; private: // Creation support diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp index 5f9792d8fbbf91603f450c69218c8eaca143a162..27c9fc3cf203f4118a965296ecedab7236df4e16 100644 --- a/src/share/vm/code/codeCache.cpp +++ b/src/share/vm/code/codeCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,6 +188,14 @@ CodeBlob* CodeCache::allocate(int size, bool is_critical) { if (cb != NULL) break; if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed + if (EnableJFR) { + if (CodeCache_lock->owned_by_self()) { + MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + report_codemem_full(); + } else { + report_codemem_full(); + } + } return NULL; } if (PrintCodeCacheExtension) { @@ -779,6 +787,7 @@ void CodeCache::report_codemem_full() { _codemem_full_count++; EventCodeCacheFull event; if (event.should_commit()) { + event.set_codeBlobType((u1)CodeBlobType::All); event.set_startAddress((u8)low_bound()); event.set_commitedTopAddress((u8)high()); event.set_reservedTopAddress((u8)high_bound()); diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp index da380e622edb4841f730caeb8ba9f39dc81c7b30..bec803bdfa88d2c6d966a808a4e757c60bc43b61 100644 --- a/src/share/vm/compiler/compileBroker.cpp +++ b/src/share/vm/compiler/compileBroker.cpp @@ -2035,7 +2035,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { assert(task->compile_id() != CICrashAt, "just as planned"); if (event.should_commit()) { event.set_method(target->get_Method()); - event.set_compileID(compile_id); + event.set_compileId(compile_id); event.set_compileLevel(task->comp_level()); event.set_succeded(task->is_success()); event.set_isOsr(is_osr); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 500390af1ee0d36fed7a868ab431b20816f905cd..5bd9871b62933ce9eb83023ec4dd8a1a2eff191b 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,6 @@ class G1NewTracer; class G1OldTracer; class EvacuationFailedInfo; class nmethod; -class Ticks; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -1082,6 +1081,7 @@ public: ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } + G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; } virtual size_t capacity() const; virtual size_t used() const; diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index c4463f50fcc12a0bae7482ab2cdee03f7b8abf0f..15ad1afb5729ae6a064f2b0c29b261bb74f99610 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -282,6 +282,9 @@ G1CollectorPolicy::G1CollectorPolicy() : double max_gc_time = (double) MaxGCPauseMillis / 1000.0; double time_slice = (double) GCPauseIntervalMillis / 1000.0; _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); + if (EnableJFR) { + _ihop_control = create_ihop_control(); + } uintx confidence_perc = G1ConfidencePercent; // Put an artificial ceiling on this so that it's not set to a silly value. @@ -320,6 +323,13 @@ G1CollectorPolicy::G1CollectorPolicy() : _collectionSetChooser = new CollectionSetChooser(); } +G1CollectorPolicy::~G1CollectorPolicy() { + if (EnableJFR) { + assert(_ihop_control != NULL, "sanity check"); + delete _ihop_control; + } +} + void G1CollectorPolicy::initialize_alignments() { _space_alignment = HeapRegion::GrainBytes; size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); @@ -507,6 +517,10 @@ void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { _reserve_regions = (uint) ceil(reserve_regions_d); _young_gen_sizer->heap_size_changed(new_number_of_regions); + + if (EnableJFR) { + _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); + } } uint G1CollectorPolicy::calculate_young_list_desired_min_length( @@ -1189,6 +1203,15 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms); _collectionSetChooser->verify(); + + if (EnableJFR) { + _ihop_control->send_trace_event(_g1->gc_tracer_stw()); + } +} + +G1IHOPControl* G1CollectorPolicy::create_ihop_control() { + assert(EnableJFR, "sanity check"); + return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); } #define EXT_SIZE_FORMAT "%.1f%s" diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index 96e4dc63dd27688e4b5477ee6538aab8f2294221..308a1293632540e0d8a56075b366eff120ec6fc8 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/g1/collectionSetChooser.hpp" #include "gc_implementation/g1/g1Allocator.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" +#include "gc_implementation/g1/g1IHOPControl.hpp" #include "memory/collectorPolicy.hpp" // A G1CollectorPolicy makes policy decisions that determine the @@ -161,6 +162,8 @@ public: class G1CollectorPolicy: public CollectorPolicy { private: + static G1IHOPControl* create_ihop_control(); + // either equal to the number of parallel threads, if ParallelGCThreads // has been set, or 1 otherwise int _parallel_gc_threads; @@ -173,6 +176,7 @@ private: }; G1MMUTracker* _mmu_tracker; + G1IHOPControl* _ihop_control; void initialize_alignments(); void initialize_flags(); @@ -636,6 +640,7 @@ private: public: G1CollectorPolicy(); + virtual ~G1CollectorPolicy(); virtual G1CollectorPolicy* as_g1_policy() { return this; } diff --git a/src/share/vm/gc_implementation/g1/g1HeapRegionTraceType.hpp b/src/share/vm/gc_implementation/g1/g1HeapRegionTraceType.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d346b93dd4ba8c52fe7a1b7bf3635d3315d9332c --- /dev/null +++ b/src/share/vm/gc_implementation/g1/g1HeapRegionTraceType.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP +#define SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class G1HeapRegionTraceType : AllStatic { + public: + enum Type { + Free, + Eden, + Survivor, + StartsHumongous, + ContinuesHumongous, + Old, + Pinned, + OpenArchive, + ClosedArchive, + G1HeapRegionTypeEndSentinel + }; + + static const char* to_string(G1HeapRegionTraceType::Type type) { + switch (type) { + case Free: return "Free"; + case Eden: return "Eden"; + case Survivor: return "Survivor"; + case StartsHumongous: return "Starts Humongous"; + case ContinuesHumongous: return "Continues Humongous"; + case Old: return "Old"; + case Pinned: return "Pinned"; + case OpenArchive: return "OpenArchive"; + case ClosedArchive: return "ClosedArchive"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP diff --git a/src/share/vm/gc_implementation/g1/g1IHOPControl.cpp b/src/share/vm/gc_implementation/g1/g1IHOPControl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..acb8b6b6845ab8fd4482db3cf0d66c3c206b75a9 --- /dev/null +++ b/src/share/vm/gc_implementation/g1/g1IHOPControl.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1IHOPControl.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "jfr/utilities/jfrLog.hpp" + +G1IHOPControl::G1IHOPControl(double initial_ihop_percent) : + _initial_ihop_percent(initial_ihop_percent), + _target_occupancy(0), + _last_allocated_bytes(0), + _last_allocation_time_s(0.0) +{ + assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100."); +} + +void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) { + log_debug(gc, ihop)("Target occupancy update: old: " SIZE_FORMAT "B, new: " SIZE_FORMAT "B", + _target_occupancy, new_target_occupancy); + _target_occupancy = new_target_occupancy; +} + +void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) { + assert(allocation_time_s >= 0.0, "Allocation time must be positive."); + + _last_allocation_time_s = allocation_time_s; + _last_allocated_bytes = allocated_bytes; +} + +// should be defined in globalDefinitions.hpp +template +inline double percent_of(T numerator, T denominator) { + return denominator != 0 ? (double)numerator / denominator * 100.0 : 0.0; +} + +void G1IHOPControl::print() { + assert(_target_occupancy > 0, "Target occupancy still not updated yet."); + size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold(); + log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, " + "recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms", + cur_conc_mark_start_threshold, + percent_of(cur_conc_mark_start_threshold, _target_occupancy), + _target_occupancy, + G1CollectedHeap::heap()->used(), + _last_allocated_bytes, + _last_allocation_time_s * 1000.0, + _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0, + last_marking_length_s() * 1000.0); +} + +void G1IHOPControl::send_trace_event(G1NewTracer* tracer) { + assert(_target_occupancy > 0, "Target occupancy still not updated yet."); + tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(), + _target_occupancy, + G1CollectedHeap::heap()->used(), + _last_allocated_bytes, + _last_allocation_time_s, + last_marking_length_s()); +} + +G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent) : + G1IHOPControl(ihop_percent), + _last_marking_length_s(0.0) { +} diff --git a/src/share/vm/gc_implementation/g1/g1IHOPControl.hpp b/src/share/vm/gc_implementation/g1/g1IHOPControl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c9f9a108dc881f5e32746aa3452faa131fcbd13c --- /dev/null +++ b/src/share/vm/gc_implementation/g1/g1IHOPControl.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1IHOPCONTROL_HPP +#define SHARE_VM_GC_G1_G1IHOPCONTROL_HPP + +#include "memory/allocation.hpp" +#include "utilities/numberSeq.hpp" + +class G1NewTracer; + +// Base class for algorithms that calculate the heap occupancy at which +// concurrent marking should start. This heap usage threshold should be relative +// to old gen size. +class G1IHOPControl : public CHeapObj { + protected: + // The initial IHOP value relative to the target occupancy. + double _initial_ihop_percent; + // The target maximum occupancy of the heap. The target occupancy is the number + // of bytes when marking should be finished and reclaim started. + size_t _target_occupancy; + + // Most recent complete mutator allocation period in seconds. + double _last_allocation_time_s; + // Amount of bytes allocated during _last_allocation_time_s. + size_t _last_allocated_bytes; + + // Initialize an instance with the initial IHOP value in percent. The target + // occupancy will be updated at the first heap expansion. + G1IHOPControl(double initial_ihop_percent); + + // Most recent time from the end of the initial mark to the start of the first + // mixed gc. + virtual double last_marking_length_s() const = 0; + public: + virtual ~G1IHOPControl() { } + + // Get the current non-young occupancy at which concurrent marking should start. + virtual size_t get_conc_mark_start_threshold() = 0; + + // Adjust target occupancy. + virtual void update_target_occupancy(size_t new_target_occupancy); + // Update information about time during which allocations in the Java heap occurred, + // how large these allocations were in bytes, and an additional buffer. + // The allocations should contain any amount of space made unusable for further + // allocation, e.g. any waste caused by TLAB allocation, space at the end of + // humongous objects that can not be used for allocation, etc. + // Together with the target occupancy, this additional buffer should contain the + // difference between old gen size and total heap size at the start of reclamation, + // and space required for that reclamation. + virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); + // Update the time spent in the mutator beginning from the end of initial mark to + // the first mixed gc. + virtual void update_marking_length(double marking_length_s) = 0; + + virtual void print(); + virtual void send_trace_event(G1NewTracer* tracer); +}; + +// The returned concurrent mark starting occupancy threshold is a fixed value +// relative to the maximum heap size. +class G1StaticIHOPControl : public G1IHOPControl { + // Most recent mutator time between the end of initial mark to the start of the + // first mixed gc. + double _last_marking_length_s; + protected: + double last_marking_length_s() const { return _last_marking_length_s; } + public: + G1StaticIHOPControl(double ihop_percent); + + size_t get_conc_mark_start_threshold() { + guarantee(_target_occupancy > 0, "Target occupancy must have been initialized."); + return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0); + } + + virtual void update_marking_length(double marking_length_s) { + assert(marking_length_s > 0.0, "Marking length must be larger than zero."); + _last_marking_length_s = marking_length_s; + } +}; +#endif // SHARE_VM_GC_G1_G1IHOPCONTROL_HPP diff --git a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp index bd91c8fd29a639e60f93cf58dd8558f6161de56b..ee5786825410f4a46ca7e1e51620dc243ea6e0f1 100644 --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,6 +105,11 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) { ++_no_entries; } _array[_head_index] = G1MMUTrackerQueueElem(start, end); + + if (EnableJFR) { + double slice_time = calculate_gc_time(end); + G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time, gc_thread); + } } // basically the _internal call does not remove expired entries diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp index fc86a50cf8aeed56a603952af6ef39332fa5eb8e..40b9f04a138de0606e48c437f742d9df7f3339ba 100644 --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -198,6 +198,21 @@ InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop co return dest(state); } +void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, + oop const old, size_t word_sz, uint age, + HeapWord * const obj_ptr, AllocationContext_t context) const { + assert(EnableJFR, "sanity check"); + ParGCAllocBuffer* alloc_buf = _g1_par_allocator->alloc_buffer(dest_state, context); + if (alloc_buf->contains(obj_ptr)) { + _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, + dest_state.value() == InCSetState::Old, + alloc_buf->word_sz()); + } else { + _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age, + dest_state.value() == InCSetState::Old); + } +} + oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, oop const old, markOop const old_mark) { @@ -225,6 +240,10 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, return _g1h->handle_evacuation_failure_par(this, old); } } + if (EnableJFR && _g1h->_gc_tracer_stw->should_report_promotion_events()) { + // The events are checked individually as part of the actual commit + report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context); + } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp index d5350310e15e9d6dda733a210bd27122871171b1..091dff7ffb0f3630767e3fc84ff5f38b91fa75d5 100644 --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -213,6 +213,10 @@ class G1ParScanThreadState : public StackObj { AllocationContext_t const context); inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); + inline InCSetState next_state(InCSetState const state, markOop const m, uint& age, AllocationContext_t context); + void report_promotion_event(InCSetState const dest_state, + oop const old, size_t word_sz, uint age, + HeapWord * const obj_ptr, AllocationContext_t context) const; public: oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark); diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp index eefa1c949932b88dcba477052e4f34f98ef0f946..b3f1ea0a798888883d67f89dd8ff58b5ca19bc92 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "gc_implementation/g1/heapRegionBounds.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionManager.inline.hpp" +#include "gc_implementation/g1/heapRegionTracer.hpp" #include "gc_implementation/shared/liveRange.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/iterator.hpp" @@ -217,7 +218,9 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { "Should be normal before the humongous object allocation"); assert(top() == bottom(), "should be empty"); assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); - + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::StartsHumongous); + } _type.set_starts_humongous(); _humongous_start_region = this; @@ -231,7 +234,9 @@ void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { "Should be normal before the humongous object allocation"); assert(top() == bottom(), "should be empty"); assert(first_hr->startsHumongous(), "pre-condition"); - + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); + } _type.set_continues_humongous(); _humongous_start_region = first_hr; } @@ -303,6 +308,16 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { record_timestamp(); } +void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { + assert(EnableJFR, "sanity check"); + HeapRegionTracer::send_region_type_change(_hrm_index, + get_trace_type(), + to, + (uintptr_t)bottom(), + used()); +} + + CompactibleSpace* HeapRegion::next_compaction_space() const { return G1CollectedHeap::heap()->next_compaction_region(this); } diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index 76627e7ba4ceaac81c94830ed09bc5d345e39d28..9e77ecee7711523552a21d647b3edb4b0e6e6ad6 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/heapRegionType.hpp" #include "gc_implementation/g1/survRateGroup.hpp" +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/space.inline.hpp" @@ -211,6 +212,8 @@ class HeapRegion: public G1OffsetTableContigSpace { G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } + void report_region_type_change(G1HeapRegionTraceType::Type to); + protected: // The index of this region in the heap region sequence. uint _hrm_index; @@ -405,6 +408,7 @@ class HeapRegion: public G1OffsetTableContigSpace { const char* get_type_str() const { return _type.get_str(); } const char* get_short_type_str() const { return _type.get_short_str(); } + G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); } bool is_free() const { return _type.is_free(); } @@ -667,13 +671,40 @@ class HeapRegion: public G1OffsetTableContigSpace { } } - void set_free() { _type.set_free(); } + void set_free() { + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::Free); + } + _type.set_free(); + } - void set_eden() { _type.set_eden(); } - void set_eden_pre_gc() { _type.set_eden_pre_gc(); } - void set_survivor() { _type.set_survivor(); } + void set_eden() { + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::Eden); + } + _type.set_eden(); + } - void set_old() { _type.set_old(); } + void set_old() { + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::Old); + } + _type.set_old(); + } + + void set_eden_pre_gc() { + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::Eden); + } + _type.set_eden_pre_gc(); + } + + void set_survivor() { + if (EnableJFR) { + report_region_type_change(G1HeapRegionTraceType::Survivor); + } + _type.set_survivor(); + } // Determine if an object has been allocated since the last // mark performed by the collector. This returns true iff the object diff --git a/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp b/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7227e583f8339c12f20453d5af55388fef01d0cd --- /dev/null +++ b/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/heapRegionTracer.hpp" +#include "trace/tracing.hpp" + +void HeapRegionTracer::send_region_type_change(uint index, + G1HeapRegionTraceType::Type from, + G1HeapRegionTraceType::Type to, + uintptr_t start, + size_t used) { + EventG1HeapRegionTypeChange e; + if (e.should_commit()) { + e.set_index(index); + e.set_from(from); + e.set_to(to); + e.set_start(start); + e.set_used(used); + e.commit(); + } +} + diff --git a/src/share/vm/gc_implementation/g1/heapRegionTracer.hpp b/src/share/vm/gc_implementation/g1/heapRegionTracer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..fd03bf5c3c00271e2861596cae699c345408c40e --- /dev/null +++ b/src/share/vm/gc_implementation/g1/heapRegionTracer.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP +#define SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP + +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp" +#include "memory/allocation.hpp" + +class HeapRegionTracer : AllStatic { + public: + static void send_region_type_change(uint index, + G1HeapRegionTraceType::Type from, + G1HeapRegionTraceType::Type to, + uintptr_t start, + size_t used); +}; + +#endif // SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP + diff --git a/src/share/vm/gc_implementation/g1/heapRegionType.cpp b/src/share/vm/gc_implementation/g1/heapRegionType.cpp index 347b58d79960c5f5b45ce6af9aef716c0a1f012b..fe73a3f4f5b8df39d405ce6179c91a704f6e6cf8 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionType.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegionType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp" #include "gc_implementation/g1/heapRegionType.hpp" bool HeapRegionType::is_valid(Tag tag) { @@ -67,3 +68,18 @@ const char* HeapRegionType::get_short_str() const { // keep some compilers happy return NULL; } + +G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() { + hrt_assert_is_valid(_tag); + switch (_tag) { + case FreeTag: return G1HeapRegionTraceType::Free; + case EdenTag: return G1HeapRegionTraceType::Eden; + case SurvTag: return G1HeapRegionTraceType::Survivor; + case HumStartsTag: return G1HeapRegionTraceType::StartsHumongous; + case HumContTag: return G1HeapRegionTraceType::ContinuesHumongous; + case OldTag: return G1HeapRegionTraceType::Old; + default: + ShouldNotReachHere(); + return G1HeapRegionTraceType::Free; // keep some compilers happy + } +} diff --git a/src/share/vm/gc_implementation/g1/heapRegionType.hpp b/src/share/vm/gc_implementation/g1/heapRegionType.hpp index b00590a6b78101d44caa59edf547f66318c29f8b..8b93c2712d1078c540cb74c48a59de1318826580 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionType.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegionType.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP #include "memory/allocation.hpp" +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp" #define hrt_assert_is_valid(tag) \ assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag))) @@ -127,6 +128,7 @@ public: const char* get_str() const; const char* get_short_str() const; + G1HeapRegionTraceType::Type get_trace_type(); HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); } }; diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp index 69292400f33c50a11aa1047a968e9a420a12156d..3d1e18d2367283c1f69fd57df792c178423bffb4 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -148,6 +148,10 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC { claimed_stack_depth()->push(p); } + inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab); + protected: static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } public: diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp index b2de74d41754c7653dcc07acfd97331d37ca00b1..b94aea4ef78f53356b01bf3af85207976e74f821 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,34 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) { claim_or_forward_internal_depth(p); } +inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, + size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab) { + assert(EnableJFR, "sanity check"); + // Skip if memory allocation failed + if (new_obj != NULL) { + const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); + + if (lab != NULL) { + // Promotion of object through newly allocated PLAB + if (gc_tracer->should_report_promotion_in_new_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + size_t lab_size = lab->capacity(); + gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, + age, tenured, lab_size); + } + } else { + // Promotion of object directly to heap + if (gc_tracer->should_report_promotion_outside_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, + age, tenured); + } + } + } +} + // // This method is pretty bulky. It would be nice to split it up // into smaller submethods, but we need to be careful not to hurt @@ -98,6 +126,9 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) { if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = (oop)young_space()->cas_allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); + } } else { // Flush and fill _young_lab.flush(); @@ -107,6 +138,9 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) { _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = (oop) _young_lab.allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); + } } else { _young_gen_is_full = true; } @@ -127,11 +161,20 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) { new_obj_is_tenured = true; if (new_obj == NULL) { + uint age = 0; + if (EnableJFR) { + // Find the objects age, MT safe. + age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + } if (!_old_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); + } } else { // Flush and fill _old_lab.flush(); @@ -148,6 +191,9 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) { _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = (oop) _old_lab.allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); + } } } } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp index 896b705c2049bfe063007dd2b6c87d91bc2db741..a7efb42092f4f9a99ceeb2dba0c8187e830064e2 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,8 @@ class PSScavenge: AllStatic { // Private accessors static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; } + static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; } + public: // Accessors static uint tenuring_threshold() { return _tenuring_threshold; } diff --git a/src/share/vm/gc_implementation/shared/ageTable.cpp b/src/share/vm/gc_implementation/shared/ageTable.cpp index d148cd4d3192ad2afe7003f0040ecfb1cc6eece3..bf34f83fb61ecde0b3695dd9cfa74a9a4f6debed 100644 --- a/src/share/vm/gc_implementation/shared/ageTable.cpp +++ b/src/share/vm/gc_implementation/shared/ageTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" +#include "gc_implementation/shared/ageTableTracer.hpp" #include "memory/collectorPolicy.hpp" #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" @@ -92,7 +93,7 @@ uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) { } uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold; - if (PrintTenuringDistribution || UsePerfData) { + if (PrintTenuringDistribution || UsePerfData || (EnableJFR && AgeTableTracer::is_tenuring_distribution_event_enabled())) { if (PrintTenuringDistribution) { gclog_or_tty->cr(); @@ -110,6 +111,11 @@ uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) { age, sizes[age]*oopSize, total*oopSize); } } + + if (EnableJFR) { + AgeTableTracer::send_tenuring_distribution_event(age, sizes[age] * oopSize); + } + if (UsePerfData) { _perf_sizes[age]->set_value(sizes[age]*oopSize); } diff --git a/src/share/vm/gc_implementation/shared/ageTableTracer.cpp b/src/share/vm/gc_implementation/shared/ageTableTracer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b55e4b8a7e149fad71514ab2ece228c625fc1ce5 --- /dev/null +++ b/src/share/vm/gc_implementation/shared/ageTableTracer.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/ageTableTracer.hpp" +#include "gc_implementation/shared/gcId.hpp" +#include "trace/tracing.hpp" + +void AgeTableTracer::send_tenuring_distribution_event(uint age, size_t size) { + EventTenuringDistribution e; + if (e.should_commit()) { + // FIXME gc id ... + e.set_gcId(GCId::peek().id() - 1); + e.set_age(age); + e.set_size(size); + e.commit(); + } +} + +bool AgeTableTracer::is_tenuring_distribution_event_enabled() { + return EventTenuringDistribution::is_enabled(); +} + diff --git a/src/share/vm/gc_implementation/shared/ageTableTracer.hpp b/src/share/vm/gc_implementation/shared/ageTableTracer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5cce68e9a333ac40b105159fb5fa77fd5780e3c6 --- /dev/null +++ b/src/share/vm/gc_implementation/shared/ageTableTracer.hpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_AGETABLETRACER_HPP +#define SHARE_VM_GC_SHARED_AGETABLETRACER_HPP + +#include "memory/allocation.hpp" + +class AgeTableTracer : AllStatic { + public: + static void send_tenuring_distribution_event(uint age, size_t size); + static bool is_tenuring_distribution_event_enabled(); +}; + +#endif // SHARE_VM_GC_SHARED_AGETABLETRACER_HPP + diff --git a/src/share/vm/gc_implementation/shared/gcConfiguration.cpp b/src/share/vm/gc_implementation/shared/gcConfiguration.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9664143d225509837385897fc207139f6da2091 --- /dev/null +++ b/src/share/vm/gc_implementation/shared/gcConfiguration.cpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc_interface/collectedHeap.hpp" +#include "gc_implementation/shared/gcConfiguration.hpp" +#include "memory/universe.hpp" +#include "runtime/arguments.hpp" +#include "runtime/globals.hpp" +#include "utilities/debug.hpp" + +GCName GCConfiguration::young_collector() const { + if (UseG1GC) { + return G1New; + } + + if (UseParallelGC) { + return ParallelScavenge; + } + + if (UseConcMarkSweepGC) { + return ParNew; + } + + return DefNew; +} + +GCName GCConfiguration::old_collector() const { + if (UseG1GC) { + return G1Old; + } + + if (UseConcMarkSweepGC) { + return ConcurrentMarkSweep; + } + + if (UseParallelOldGC) { + return ParallelOld; + } + + return SerialOld; +} + +uint GCConfiguration::num_parallel_gc_threads() const { + return ParallelGCThreads; +} + +uint GCConfiguration::num_concurrent_gc_threads() const { + return ConcGCThreads; +} + +bool GCConfiguration::uses_dynamic_gc_threads() const { + return UseDynamicNumberOfGCThreads; +} + +bool GCConfiguration::is_explicit_gc_concurrent() const { + return ExplicitGCInvokesConcurrent; +} + +bool GCConfiguration::is_explicit_gc_disabled() const { + return DisableExplicitGC; +} + +bool GCConfiguration::has_pause_target_default_value() const { + return FLAG_IS_DEFAULT(MaxGCPauseMillis); +} + +uintx GCConfiguration::pause_target() const { + return MaxGCPauseMillis; +} + +uintx GCConfiguration::gc_time_ratio() const { + return GCTimeRatio; +} + +bool GCTLABConfiguration::uses_tlabs() const { + return UseTLAB; +} + +size_t GCTLABConfiguration::min_tlab_size() const { + return MinTLABSize; +} + +uint GCTLABConfiguration::tlab_refill_waste_limit() const { + return TLABRefillWasteFraction; +} + +intx GCSurvivorConfiguration::max_tenuring_threshold() const { + return MaxTenuringThreshold; +} + +intx GCSurvivorConfiguration::initial_tenuring_threshold() const { + return InitialTenuringThreshold; +} + +size_t GCHeapConfiguration::max_size() const { + return MaxHeapSize; +} + +size_t GCHeapConfiguration::min_size() const { + return Arguments::min_heap_size(); +} + +size_t GCHeapConfiguration::initial_size() const { + return InitialHeapSize; +} + +bool GCHeapConfiguration::uses_compressed_oops() const { + return UseCompressedOops; +} + +Universe::NARROW_OOP_MODE GCHeapConfiguration::narrow_oop_mode() const { + return Universe::narrow_oop_mode(); +} + +uint GCHeapConfiguration::object_alignment_in_bytes() const { + return ObjectAlignmentInBytes; +} + +int GCHeapConfiguration::heap_address_size_in_bits() const { + return BitsPerHeapOop; +} + +bool GCYoungGenerationConfiguration::has_max_size_default_value() const { + return FLAG_IS_DEFAULT(MaxNewSize); +} + +uintx GCYoungGenerationConfiguration::max_size() const { + return MaxNewSize; +} + +uintx GCYoungGenerationConfiguration::min_size() const { + return NewSize; +} + +intx GCYoungGenerationConfiguration::new_ratio() const { + return NewRatio; +} diff --git a/src/share/vm/gc_implementation/shared/gcConfiguration.hpp b/src/share/vm/gc_implementation/shared/gcConfiguration.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3ebea5c3047e39d9219dd8a1259ab3ead1196206 --- /dev/null +++ b/src/share/vm/gc_implementation/shared/gcConfiguration.hpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP +#define SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP + +#include "gc_interface/gcName.hpp" +#include "memory/universe.hpp" +#include "utilities/globalDefinitions.hpp" + +class GCConfiguration { + public: + GCName young_collector() const; + GCName old_collector() const; + uint num_parallel_gc_threads() const; + uint num_concurrent_gc_threads() const; + bool uses_dynamic_gc_threads() const; + bool is_explicit_gc_concurrent() const; + bool is_explicit_gc_disabled() const; + uintx gc_time_ratio() const; + + bool has_pause_target_default_value() const; + uintx pause_target() const; +}; + +class GCTLABConfiguration { + public: + bool uses_tlabs() const; + size_t min_tlab_size() const; + uint tlab_refill_waste_limit() const; +}; + +class GCSurvivorConfiguration { + public: + intx initial_tenuring_threshold() const; + intx max_tenuring_threshold() const; +}; + +class GCHeapConfiguration { + public: + size_t max_size() const; + size_t min_size() const; + size_t initial_size() const; + bool uses_compressed_oops() const; + Universe::NARROW_OOP_MODE narrow_oop_mode() const; + uint object_alignment_in_bytes() const; + int heap_address_size_in_bits() const; +}; + +class GCYoungGenerationConfiguration { + public: + bool has_max_size_default_value() const; + uintx max_size() const; + + uintx min_size() const; + intx new_ratio() const; +}; + +#endif // SHARE_VM_GC_SHARED_GCCONFIGURATION_HPP diff --git a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp index 23cb113c11b949ef00e9635cd0922cc59b8a4253..36dfa897f1e716368292edc402ac03a51ebf83f0 100644 --- a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp +++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,11 +78,13 @@ class MetaspaceSizes : public StackObj { class GCHeapSummary; class PSHeapSummary; +class G1HeapSummary; class GCHeapSummaryVisitor { public: virtual void visit(const GCHeapSummary* heap_summary) const = 0; virtual void visit(const PSHeapSummary* heap_summary) const {} + virtual void visit(const G1HeapSummary* heap_summary) const {} }; class GCHeapSummary : public StackObj { @@ -125,6 +127,24 @@ class PSHeapSummary : public GCHeapSummary { } }; +class G1HeapSummary : public GCHeapSummary { + size_t _edenUsed; + size_t _edenCapacity; + size_t _survivorUsed; + uint _numberOfRegions; + public: + G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, uint numberOfRegions) : + GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _numberOfRegions(numberOfRegions) { } + const size_t edenUsed() const { return _edenUsed; } + const size_t edenCapacity() const { return _edenCapacity; } + const size_t survivorUsed() const { return _survivorUsed; } + const uint numberOfRegions() const { return _numberOfRegions; } + + virtual void accept(GCHeapSummaryVisitor* visitor) const { + visitor->visit(this); + } +}; + class MetaspaceSummary : public StackObj { size_t _capacity_until_GC; MetaspaceSizes _meta_space; diff --git a/src/share/vm/gc_implementation/shared/gcTrace.cpp b/src/share/vm/gc_implementation/shared/gcTrace.cpp index ddac9531667c17e85aa921b6d41c1f31188f4faf..fc5f5c88d4a4677bea82d4d6decb5d4d5991f8ce 100644 --- a/src/share/vm/gc_implementation/shared/gcTrace.cpp +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -172,6 +172,30 @@ void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) { _tenuring_threshold = tenuring_threshold; } +bool YoungGCTracer::should_report_promotion_events() const { + return should_report_promotion_in_new_plab_event() || + should_report_promotion_outside_plab_event(); +} + +bool YoungGCTracer::should_report_promotion_in_new_plab_event() const { + return should_send_promotion_in_new_plab_event(); +} + +bool YoungGCTracer::should_report_promotion_outside_plab_event() const { + return should_send_promotion_outside_plab_event(); +} + +void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size); +} + +void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + send_promotion_outside_plab_event(klass, obj_size, age, tenured); +} + void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { assert_set_gc_id(); @@ -199,6 +223,13 @@ void OldGCTracer::report_concurrent_mode_failure() { } #if INCLUDE_ALL_GCS +void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec, bool gc_thread) { + send_g1_mmu_event(time_slice_sec * MILLIUNITS, + gc_time_sec * MILLIUNITS, + max_time_sec * MILLIUNITS, + gc_thread); +} + void G1NewTracer::report_yc_type(G1YCType type) { assert_set_gc_id(); @@ -224,4 +255,19 @@ void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) { send_evacuation_failed_event(ef_info); ef_info.reset(); } + +void G1NewTracer::report_basic_ihop_statistics(size_t threshold, + size_t target_ccupancy, + size_t current_occupancy, + size_t last_allocation_size, + double last_allocation_duration, + double last_marking_length) { + send_basic_ihop_statistics(threshold, + target_ccupancy, + current_occupancy, + last_allocation_size, + last_allocation_duration, + last_marking_length); +} + #endif diff --git a/src/share/vm/gc_implementation/shared/gcTrace.hpp b/src/share/vm/gc_implementation/shared/gcTrace.hpp index dd13344155fcd6fb0b6cf74d9df7b4750e742658..87d6bdd67c7b101cd6732ccdfbe575e56a499345 100644 --- a/src/share/vm/gc_implementation/shared/gcTrace.hpp +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -157,9 +157,39 @@ class YoungGCTracer : public GCTracer { void report_promotion_failed(const PromotionFailedInfo& pf_info); void report_tenuring_threshold(const uint tenuring_threshold); + /* + * Methods for reporting Promotion in new or outside PLAB Events. + * + * The object age is always required as it is not certain that the mark word + * of the oop can be trusted at this stage. + * + * obj_size is the size of the promoted object in bytes. + * + * tenured should be true if the object has been promoted to the old + * space during this GC, if the object is copied to survivor space + * from young space or survivor space (aging) tenured should be false. + * + * plab_size is the size of the newly allocated PLAB in bytes. + */ + bool should_report_promotion_events() const; + bool should_report_promotion_in_new_plab_event() const; + bool should_report_promotion_outside_plab_event() const; + void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; + private: void send_young_gc_event() const; void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const; + bool should_send_promotion_in_new_plab_event() const; + bool should_send_promotion_outside_plab_event() const; + void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; }; class OldGCTracer : public GCTracer { @@ -210,6 +240,13 @@ class ParNewTracer : public YoungGCTracer { }; #if INCLUDE_ALL_GCS +class G1MMUTracer : public AllStatic { + static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms, bool gc_thread); + + public: + static void report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec, bool gc_thread); +}; + class G1NewTracer : public YoungGCTracer { G1YoungGCInfo _g1_young_gc_info; @@ -221,10 +258,25 @@ class G1NewTracer : public YoungGCTracer { void report_evacuation_info(EvacuationInfo* info); void report_evacuation_failed(EvacuationFailedInfo& ef_info); + void report_basic_ihop_statistics(size_t threshold, + size_t target_occupancy, + size_t current_occupancy, + size_t last_allocation_size, + double last_allocation_duration, + double last_marking_length); + private: void send_g1_young_gc_event(); void send_evacuation_info_event(EvacuationInfo* info); void send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const; + + void send_basic_ihop_statistics(size_t threshold, + size_t target_occupancy, + size_t current_occupancy, + size_t last_allocation_size, + double last_allocation_duration, + double last_marking_length); + }; #endif diff --git a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp index 5462135a0145c3e655a9e0ca98ebe6a45360ebab..aae98b243dc47d887a4240d77566837f05268677 100644 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" +#include "tracefiles/traceEventClasses.hpp" #endif // All GC dependencies against the trace framework is contained within this file. @@ -41,7 +42,7 @@ typedef uintptr_t TraceAddress; void GCTracer::send_garbage_collection_event() const { - EventGCGarbageCollection event(UNTIMED); + EventGarbageCollection event(UNTIMED); if (event.should_commit()) { event.set_gcId(_shared_gc_info.gc_id().id()); event.set_name(_shared_gc_info.name()); @@ -89,7 +90,7 @@ void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspa } void ParallelOldTracer::send_parallel_old_event() const { - EventGCParallelOld e(UNTIMED); + EventParallelOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); @@ -100,7 +101,7 @@ void ParallelOldTracer::send_parallel_old_event() const { } void YoungGCTracer::send_young_gc_event() const { - EventGCYoungGarbageCollection e(UNTIMED); + EventYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_tenuringThreshold(_tenuring_threshold); @@ -110,8 +111,48 @@ void YoungGCTracer::send_young_gc_event() const { } } +bool YoungGCTracer::should_send_promotion_in_new_plab_event() const { + return EventPromoteObjectInNewPLAB::is_enabled(); +} + +bool YoungGCTracer::should_send_promotion_outside_plab_event() const { + return EventPromoteObjectOutsidePLAB::is_enabled(); +} + +void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + + EventPromoteObjectInNewPLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_objectClass(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.set_plabSize(plab_size); + event.commit(); + } +} + +void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + + EventPromoteObjectOutsidePLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_gcId(GCId::peek().id() - 1); + event.set_objectClass(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.commit(); + } +} + + void OldGCTracer::send_old_gc_event() const { - EventGCOldGarbageCollection e(UNTIMED); + EventOldGarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_starttime(_shared_gc_info.start_timestamp()); @@ -133,7 +174,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_in EventPromotionFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); - e.set_data(to_trace_struct(pf_info)); + e.set_promotionFailed(to_trace_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); e.commit(); } @@ -150,7 +191,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() { #if INCLUDE_ALL_GCS void G1NewTracer::send_g1_young_gc_event() { - EventGCG1GarbageCollection e(UNTIMED); + EventG1GarbageCollection e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type(_g1_young_gc_info.type()); @@ -160,16 +201,31 @@ void G1NewTracer::send_g1_young_gc_event() { } } +void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms, bool gc_thread) { + EventG1MMU e; + if (e.should_commit()) { + if (gc_thread) { + e.set_gcId(G1CollectedHeap::heap()->gc_tracer_cm()->gc_id().id()); + } else { + e.set_gcId(G1CollectedHeap::heap()->gc_tracer_stw()->gc_id().id()); + } + e.set_timeSlice(time_slice_ms); + e.set_gcTime(gc_time_ms); + e.set_pauseTarget(max_time_ms); + e.commit(); + } +} + void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { - EventEvacuationInfo e; + EventEvacuationInformation e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); e.set_cSetRegions(info->collectionset_regions()); e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedAfter(info->collectionset_used_after()); e.set_allocationRegions(info->allocation_regions()); - e.set_allocRegionsUsedBefore(info->alloc_regions_used_before()); - e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); + e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before()); + e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); e.set_bytesCopied(info->bytes_copied()); e.set_regionsFreed(info->regions_freed()); e.commit(); @@ -180,10 +236,31 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_in EventEvacuationFailed e; if (e.should_commit()) { e.set_gcId(_shared_gc_info.gc_id().id()); - e.set_data(to_trace_struct(ef_info)); + e.set_evacuationFailed(to_trace_struct(ef_info)); e.commit(); } } + +void G1NewTracer::send_basic_ihop_statistics(size_t threshold, + size_t target_occupancy, + size_t current_occupancy, + size_t last_allocation_size, + double last_allocation_duration, + double last_marking_length) { + EventG1BasicIHOP evt; + if (evt.should_commit()) { + evt.set_gcId(_shared_gc_info.gc_id().id()); + evt.set_threshold(threshold); + evt.set_targetOccupancy(target_occupancy); + evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0); + evt.set_currentOccupancy(current_occupancy); + evt.set_recentMutatorAllocationSize(last_allocation_size); + evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS); + evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0); + evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS); + evt.commit(); + } +} #endif static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) { @@ -224,6 +301,20 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { } } + void visit(const G1HeapSummary* g1_heap_summary) const { + visit((GCHeapSummary*)g1_heap_summary); + EventG1HeapSummary e; + if (e.should_commit()) { + e.set_gcId(_gc_id.id()); + e.set_when((u1)_when); + e.set_edenUsedSize(g1_heap_summary->edenUsed()); + e.set_edenTotalSize(g1_heap_summary->edenCapacity()); + e.set_survivorUsedSize(g1_heap_summary->survivorUsed()); + e.set_numberOfRegions(g1_heap_summary->numberOfRegions()); + e.commit(); + } + } + void visit(const PSHeapSummary* ps_heap_summary) const { visit((GCHeapSummary*)ps_heap_summary); diff --git a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp index fff7eea8ca9c4da75f9cf786ee02b27049ceac28..3aed30bd5d37637875d8e43cc65b83d4cd54a7fb 100644 --- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,8 @@ GCTraceTime::~GCTraceTime() { if (_doit) { const Tickspan duration = stop_counter - _start_counter; - double duration_in_seconds = TicksToTimeHelper::seconds(duration); + double duration_in_seconds = TimeHelper::counter_to_seconds(duration.value()); + if (_print_cr) { gclog_or_tty->print_cr(", %3.7f secs]", duration_in_seconds); } else { diff --git a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp index 8b9d6a141f5091e4b03f798d3cf6870504b7c8b4..176fe615a1e2b7f597177c519425ead2969a2e14 100644 --- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,28 +32,44 @@ #include "utilities/macros.hpp" #include "utilities/ticks.hpp" #if INCLUDE_SERVICES - -void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { -#if INCLUDE_TRACE - assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), - "Only call this method if the event is enabled"); - - EventObjectCountAfterGC event(UNTIMED); - event.set_gcId(gc_id.id()); - event.set_class(entry->klass()); - event.set_count(entry->count()); - event.set_totalSize(entry->words() * BytesPerWord); - event.set_endtime(timestamp); - event.commit(); -#endif // INCLUDE_TRACE -} - bool ObjectCountEventSender::should_send_event() { #if INCLUDE_TRACE - return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId); + return _should_send_requestable_event || + Tracing::is_event_enabled(EventObjectCountAfterGC::eventId); #else return false; #endif // INCLUDE_TRACE } +bool ObjectCountEventSender::_should_send_requestable_event = false; + +void ObjectCountEventSender::enable_requestable_event() { + _should_send_requestable_event = true; +} + +void ObjectCountEventSender::disable_requestable_event() { + _should_send_requestable_event = false; +} + +template +void ObjectCountEventSender::send_event_if_enabled(Klass* klass, jlong count, julong size, GCId gc_id, const Ticks& timestamp) { + T event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(gc_id.id()); + event.set_objectClass(klass); + event.set_count(count); + event.set_totalSize(size); + event.set_endtime(timestamp); + event.commit(); + } +} + +void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { + Klass* klass = entry->klass(); + jlong count = entry->count(); + julong total_size = entry->words() * BytesPerWord; + + send_event_if_enabled(klass, count, total_size, gc_id, timestamp); + send_event_if_enabled(klass, count, total_size, gc_id, timestamp); +} #endif // INCLUDE_SERVICES diff --git a/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp index b68e86dea8a84adf1028c6bca92a72b1859d9a6a..515e67a32c855862dcc0b04117ed6ed17284ff72 100644 --- a/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,10 +32,25 @@ #if INCLUDE_SERVICES class KlassInfoEntry; -class Ticks; class ObjectCountEventSender : public AllStatic { + static bool _should_send_requestable_event; + + template + static void send_event_if_enabled(Klass* klass, jlong count, julong size, GCId gc_id, const Ticks& timestamp); + + public: + static void enable_requestable_event(); + static void disable_requestable_event(); + + public: + // The following two functions have the exact same signature as + // hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp + // + // This file will replace the open file if a closed build is performed. + // These function signatures can therefore not be changed if the open + // signatures aren't changed as well. static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp); static bool should_send_event(); }; diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp index 777925945cefab623058b95bd3a4f6ccd20eb637..55cc6e0efb3431c6947cafeac1f6e3f6d2d069f2 100644 --- a/src/share/vm/gc_interface/allocTracer.cpp +++ b/src/share/vm/gc_interface/allocTracer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,19 +29,21 @@ #include "runtime/handles.hpp" #include "utilities/globalDefinitions.hpp" -void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) { - EventAllocObjectOutsideTLAB event; +void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) { + TRACE_ALLOCATION(obj, alloc_size, thread); + EventObjectAllocationOutsideTLAB event; if (event.should_commit()) { - event.set_class(klass()); + event.set_objectClass(klass()); event.set_allocationSize(alloc_size); event.commit(); } } -void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) { - EventAllocObjectInNewTLAB event; +void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread) { + TRACE_ALLOCATION(obj, alloc_size, thread); + EventObjectAllocationInNewTLAB event; if (event.should_commit()) { - event.set_class(klass()); + event.set_objectClass(klass()); event.set_allocationSize(alloc_size); event.set_tlabSize(tlab_size); event.commit(); diff --git a/src/share/vm/gc_interface/allocTracer.hpp b/src/share/vm/gc_interface/allocTracer.hpp index ddd9877d26b2d1d81c577d4f82940bf7fd03df99..7b9dcbd75a92d147d2b54a45ac275858f6f5e255 100644 --- a/src/share/vm/gc_interface/allocTracer.hpp +++ b/src/share/vm/gc_interface/allocTracer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,8 @@ class AllocTracer : AllStatic { public: - static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); - static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); + static void send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread); + static void send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread); static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId); }; diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp index ddad402c25980374d8c0a9903901914a6122aa9c..7b64c0273eed42c61eff0c61dfbf1144e21cce5b 100644 --- a/src/share/vm/gc_interface/collectedHeap.cpp +++ b/src/share/vm/gc_interface/collectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,10 +88,12 @@ MetaspaceSummary CollectedHeap::create_metaspace_summary() { MetaspaceAux::committed_bytes(), MetaspaceAux::used_bytes(), MetaspaceAux::reserved_bytes()); + const MetaspaceSizes data_space( MetaspaceAux::committed_bytes(Metaspace::NonClassType), MetaspaceAux::used_bytes(Metaspace::NonClassType), MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); + const MetaspaceSizes class_space( MetaspaceAux::committed_bytes(Metaspace::ClassType), MetaspaceAux::used_bytes(Metaspace::ClassType), @@ -286,7 +288,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thre return NULL; } - AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); + AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread); if (ZeroTLAB) { // ..and clear it. diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp index 302d0c7cb3add3283725ea1d799bf4fff76ada4b..89bfc5b9aa2b49555e7df71700e83ce69c0b000f 100644 --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t si "Unexpected exception, will result in uninitialized storage"); THREAD->incr_allocated_bytes(size * HeapWordSize); - AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + AllocTracer::send_allocation_outside_tlab_event(klass, result, size * HeapWordSize, THREAD); return result; } diff --git a/src/share/vm/jfr/dcmd/jfrDcmds.cpp b/src/share/vm/jfr/dcmd/jfrDcmds.cpp new file mode 100644 index 0000000000000000000000000000000000000000..67358b6b6a66fa632b1b8ad821d0ec83f5736922 --- /dev/null +++ b/src/share/vm/jfr/dcmd/jfrDcmds.cpp @@ -0,0 +1,631 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/vmSymbols.hpp" +#include "jfr/jfr.hpp" +#include "jfr/dcmd/jfrDcmds.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "memory/resourceArea.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "runtime/handles.inline.hpp" +#include "services/diagnosticArgument.hpp" +#include "services/diagnosticFramework.hpp" +#include "trace/traceBackend.hpp" +#include "utilities/globalDefinitions.hpp" + +#ifdef _WINDOWS +#define JFR_FILENAME_EXAMPLE "C:\\Users\\user\\My Recording.jfr" +#endif + +#ifdef __APPLE__ +#define JFR_FILENAME_EXAMPLE "/Users/user/My Recording.jfr" +#endif + +#ifndef JFR_FILENAME_EXAMPLE +#define JFR_FILENAME_EXAMPLE "/home/user/My Recording.jfr" +#endif + +// JNIHandle management + +// ------------------------------------------------------------------ +// push_jni_handle_block +// +// Push on a new block of JNI handles. +static void push_jni_handle_block(Thread* const thread) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); + + // Allocate a new block for JNI handles. + // Inlined code from jni_PushLocalFrame() + JNIHandleBlock* prev_handles = thread->active_handles(); + JNIHandleBlock* entry_handles = JNIHandleBlock::allocate_block(thread); + assert(entry_handles != NULL && prev_handles != NULL, "should not be NULL"); + entry_handles->set_pop_frame_link(prev_handles); // make sure prev handles get gc'd. + thread->set_active_handles(entry_handles); +} + +// ------------------------------------------------------------------ +// pop_jni_handle_block +// +// Pop off the current block of JNI handles. +static void pop_jni_handle_block(Thread* const thread) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); + + // Release our JNI handle block + JNIHandleBlock* entry_handles = thread->active_handles(); + JNIHandleBlock* prev_handles = entry_handles->pop_frame_link(); + // restore + thread->set_active_handles(prev_handles); + entry_handles->set_pop_frame_link(NULL); + JNIHandleBlock::release_block(entry_handles, thread); // may block +} + +class JNIHandleBlockManager : public StackObj { + private: + Thread* const _thread; + public: + JNIHandleBlockManager(Thread* thread) : _thread(thread) { + push_jni_handle_block(_thread); + } + + ~JNIHandleBlockManager() { + pop_jni_handle_block(_thread); + } +}; + + +static bool is_disabled(outputStream* output) { + if (Jfr::is_disabled()) { + if (output != NULL) { + output->print_cr("Flight Recorder is disabled.\n"); + } + return true; + } + return false; +} + +static bool is_recorder_instance_created(outputStream* output) { + if (!JfrRecorder::is_created()) { + if (output != NULL) { + output->print_cr("No available recordings.\n"); + output->print_cr("Use JFR.start to start a recording.\n"); + } + return false; + } + return true; +} + +static bool invalid_state(outputStream* out, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + return is_disabled(out); +} + +static void print_pending_exception(outputStream* output, oop throwable) { + assert(throwable != NULL, "invariant"); + + oop msg = java_lang_Throwable::message(throwable); + + if (msg != NULL) { + char* text = java_lang_String::as_utf8_string(msg); + output->print_raw_cr(text); + } +} + +static void print_message(outputStream* output, const char* message) { + if (message != NULL) { + output->print_raw(message); + } +} + +static void handle_dcmd_result(outputStream* output, + const oop result, + const DCmdSource source, + TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + assert(output != NULL, "invariant"); + if (HAS_PENDING_EXCEPTION) { + print_pending_exception(output, PENDING_EXCEPTION); + // Don't clear excption on startup, JVM should fail initialization. + if (DCmd_Source_Internal != source) { + CLEAR_PENDING_EXCEPTION; + } + return; + } + + assert(!HAS_PENDING_EXCEPTION, "invariant"); + + if (result != NULL) { + const char* result_chars = java_lang_String::as_utf8_string(result); + print_message(output, result_chars); + } +} + +static oop construct_dcmd_instance(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + assert(args->klass() != NULL, "invariant"); + args->set_name("", CHECK_NULL); + args->set_signature("()V", CHECK_NULL); + JfrJavaSupport::new_object(args, CHECK_NULL); + return (oop)args->result()->get_jobject(); +} + +JfrDumpFlightRecordingDCmd::JfrDumpFlightRecordingDCmd(outputStream* output, + bool heap) : DCmdWithParser(output, heap), + _name("name", "Recording name, e.g. \\\"My Recording\\\"", "STRING", true, NULL), + _filename("filename", "Copy recording data to file, i.e \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", true), + _path_to_gc_roots("path-to-gc-roots", "Collect path to GC roots", "BOOLEAN", false, "false") { + _dcmdparser.add_dcmd_option(&_name); + _dcmdparser.add_dcmd_option(&_filename); + _dcmdparser.add_dcmd_option(&_path_to_gc_roots); +}; + +int JfrDumpFlightRecordingDCmd::num_arguments() { + ResourceMark rm; + JfrDumpFlightRecordingDCmd* dcmd = new JfrDumpFlightRecordingDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } + return 0; +} + +void JfrDumpFlightRecordingDCmd::execute(DCmdSource source, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) { + return; + } + + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JNIHandleBlockManager jni_handle_management(THREAD); + + JavaValue result(T_OBJECT); + JfrJavaArguments constructor_args(&result); + constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdDump", CHECK); + const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK); + Handle h_dcmd_instance(THREAD, dcmd); + assert(h_dcmd_instance.not_null(), "invariant"); + + jstring name = NULL; + if (_name.is_set() && _name.value() != NULL) { + name = JfrJavaSupport::new_string(_name.value(), CHECK); + } + + jstring filepath = NULL; + if (_filename.is_set() && _filename.value() != NULL) { + filepath = JfrJavaSupport::new_string(_filename.value(), CHECK); + } + + jobject path_to_gc_roots = NULL; + if (_path_to_gc_roots.is_set()) { + path_to_gc_roots = JfrJavaSupport::new_java_lang_Boolean(_path_to_gc_roots.value(), CHECK); + } + + static const char klass[] = "jdk/jfr/internal/dcmd/DCmdDump"; + static const char method[] = "execute"; + static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Boolean;)Ljava/lang/String;"; + + JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); + execute_args.set_receiver(h_dcmd_instance); + + // arguments + execute_args.push_jobject(name); + execute_args.push_jobject(filepath); + execute_args.push_jobject(path_to_gc_roots); + + JfrJavaSupport::call_virtual(&execute_args, THREAD); + handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); +} + +JfrCheckFlightRecordingDCmd::JfrCheckFlightRecordingDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), + _name("name","Recording text, e.g. \\\"My Recording\\\" or omit to see all recordings","STRING",false, NULL), + _verbose("verbose","Print event settings for the recording(s)","BOOLEAN", + false, "false") { + _dcmdparser.add_dcmd_option(&_name); + _dcmdparser.add_dcmd_option(&_verbose); +}; + +int JfrCheckFlightRecordingDCmd::num_arguments() { + ResourceMark rm; + JfrCheckFlightRecordingDCmd* dcmd = new JfrCheckFlightRecordingDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } + return 0; +} + +void JfrCheckFlightRecordingDCmd::execute(DCmdSource source, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) { + return; + } + + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JNIHandleBlockManager jni_handle_management(THREAD); + + JavaValue result(T_OBJECT); + JfrJavaArguments constructor_args(&result); + constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdCheck", CHECK); + const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK); + Handle h_dcmd_instance(THREAD, dcmd); + assert(h_dcmd_instance.not_null(), "invariant"); + + jstring name = NULL; + if (_name.is_set() && _name.value() != NULL) { + name = JfrJavaSupport::new_string(_name.value(), CHECK); + } + + jobject verbose = NULL; + if (_verbose.is_set()) { + verbose = JfrJavaSupport::new_java_lang_Boolean(_verbose.value(), CHECK); + } + + static const char klass[] = "jdk/jfr/internal/dcmd/DCmdCheck"; + static const char method[] = "execute"; + static const char signature[] = "(Ljava/lang/String;Ljava/lang/Boolean;)Ljava/lang/String;"; + + JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); + execute_args.set_receiver(h_dcmd_instance); + + // arguments + execute_args.push_jobject(name); + execute_args.push_jobject(verbose); + + JfrJavaSupport::call_virtual(&execute_args, THREAD); + handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); +} + +JfrStartFlightRecordingDCmd::JfrStartFlightRecordingDCmd(outputStream* output, + bool heap) : DCmdWithParser(output, heap), + _name("name", "Name that can be used to identify recording, e.g. \\\"My Recording\\\"", "STRING", false, NULL), + _settings("settings", "Settings file(s), e.g. profile or default. See JRE_HOME/lib/jfr", "STRING SET", false), + _delay("delay", "Delay recording start with (s)econds, (m)inutes), (h)ours), or (d)ays, e.g. 5h.", "NANOTIME", false, "0"), + _duration("duration", "Duration of recording in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 300s.", "NANOTIME", false, "0"), + _filename("filename", "Resulting recording filename, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false), + _disk("disk", "Recording should be persisted to disk", "BOOLEAN", false), + _maxage("maxage", "Maximum time to keep recorded data (on disk) in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 60m, or 0 for no limit", "NANOTIME", false, "0"), + _maxsize("maxsize", "Maximum amount of bytes to keep (on disk) in (k)B, (M)B or (G)B, e.g. 500M, or 0 for no limit", "MEMORY SIZE", false, "0"), + _dump_on_exit("dumponexit", "Dump running recording when JVM shuts down", "BOOLEAN", false), + _path_to_gc_roots("path-to-gc-roots", "Collect path to GC roots", "BOOLEAN", false, "false") { + _dcmdparser.add_dcmd_option(&_name); + _dcmdparser.add_dcmd_option(&_settings); + _dcmdparser.add_dcmd_option(&_delay); + _dcmdparser.add_dcmd_option(&_duration); + _dcmdparser.add_dcmd_option(&_disk); + _dcmdparser.add_dcmd_option(&_filename); + _dcmdparser.add_dcmd_option(&_maxage); + _dcmdparser.add_dcmd_option(&_maxsize); + _dcmdparser.add_dcmd_option(&_dump_on_exit); + _dcmdparser.add_dcmd_option(&_path_to_gc_roots); +}; + +int JfrStartFlightRecordingDCmd::num_arguments() { + ResourceMark rm; + JfrStartFlightRecordingDCmd* dcmd = new JfrStartFlightRecordingDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } + return 0; +} + +void JfrStartFlightRecordingDCmd::execute(DCmdSource source, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + if (invalid_state(output(), THREAD)) { + return; + } + + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JNIHandleBlockManager jni_handle_management(THREAD); + + JavaValue result(T_OBJECT); + JfrJavaArguments constructor_args(&result); + constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdStart", THREAD); + const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK); + Handle h_dcmd_instance(THREAD, dcmd); + assert(h_dcmd_instance.not_null(), "invariant"); + + jstring name = NULL; + if (_name.is_set() && _name.value() != NULL) { + name = JfrJavaSupport::new_string(_name.value(), CHECK); + } + + jstring filename = NULL; + if (_filename.is_set() && _filename.value() != NULL) { + filename = JfrJavaSupport::new_string(_filename.value(), CHECK); + } + + jobject maxage = NULL; + if (_maxage.is_set()) { + maxage = JfrJavaSupport::new_java_lang_Long(_maxage.value()._nanotime, CHECK); + } + + jobject maxsize = NULL; + if (_maxsize.is_set()) { + maxsize = JfrJavaSupport::new_java_lang_Long(_maxsize.value()._size, CHECK); + } + + jobject duration = NULL; + if (_duration.is_set()) { + duration = JfrJavaSupport::new_java_lang_Long(_duration.value()._nanotime, CHECK); + } + + jobject delay = NULL; + if (_delay.is_set()) { + delay = JfrJavaSupport::new_java_lang_Long(_delay.value()._nanotime, CHECK); + } + + jobject disk = NULL; + if (_disk.is_set()) { + disk = JfrJavaSupport::new_java_lang_Boolean(_disk.value(), CHECK); + } + + jobject dump_on_exit = NULL; + if (_dump_on_exit.is_set()) { + dump_on_exit = JfrJavaSupport::new_java_lang_Boolean(_dump_on_exit.value(), CHECK); + } + + jobject path_to_gc_roots = NULL; + if (_path_to_gc_roots.is_set()) { + path_to_gc_roots = JfrJavaSupport::new_java_lang_Boolean(_path_to_gc_roots.value(), CHECK); + } + + jobjectArray settings = NULL; + if (_settings.is_set()) { + const int length = _settings.value()->array()->length(); + settings = JfrJavaSupport::new_string_array(length, CHECK); + assert(settings != NULL, "invariant"); + for (int i = 0; i < length; ++i) { + jobject element = JfrJavaSupport::new_string(_settings.value()->array()->at(i), CHECK); + assert(element != NULL, "invariant"); + JfrJavaSupport::set_array_element(settings, element, i, CHECK); + } + } + + static const char klass[] = "jdk/jfr/internal/dcmd/DCmdStart"; + static const char method[] = "execute"; + static const char signature[] = "(Ljava/lang/String;[Ljava/lang/String;Ljava/lang/Long;" + "Ljava/lang/Long;Ljava/lang/Boolean;Ljava/lang/String;" + "Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Boolean;Ljava/lang/Boolean;)Ljava/lang/String;"; + + JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); + execute_args.set_receiver(h_dcmd_instance); + + // arguments + execute_args.push_jobject(name); + execute_args.push_jobject(settings); + execute_args.push_jobject(delay); + execute_args.push_jobject(duration); + execute_args.push_jobject(disk); + execute_args.push_jobject(filename); + execute_args.push_jobject(maxage); + execute_args.push_jobject(maxsize); + execute_args.push_jobject(dump_on_exit); + execute_args.push_jobject(path_to_gc_roots); + + JfrJavaSupport::call_virtual(&execute_args, THREAD); + handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); +} + +JfrStopFlightRecordingDCmd::JfrStopFlightRecordingDCmd(outputStream* output, + bool heap) : DCmdWithParser(output, heap), + _name("name", "Recording text,.e.g \\\"My Recording\\\"", "STRING", false, NULL), + _filename("filename", "Copy recording data to file, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false, NULL) { + _dcmdparser.add_dcmd_option(&_name); + _dcmdparser.add_dcmd_option(&_filename); +}; + +int JfrStopFlightRecordingDCmd::num_arguments() { + ResourceMark rm; + JfrStopFlightRecordingDCmd* dcmd = new JfrStopFlightRecordingDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } + return 0; +} + +void JfrStopFlightRecordingDCmd::execute(DCmdSource source, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + if (invalid_state(output(), THREAD) || !is_recorder_instance_created(output())) { + return; + } + + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JNIHandleBlockManager jni_handle_management(THREAD); + + JavaValue result(T_OBJECT); + JfrJavaArguments constructor_args(&result); + constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdStop", CHECK); + const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK); + Handle h_dcmd_instance(THREAD, dcmd); + assert(h_dcmd_instance.not_null(), "invariant"); + + jstring name = NULL; + if (_name.is_set() && _name.value() != NULL) { + name = JfrJavaSupport::new_string(_name.value(), CHECK); + } + + jstring filepath = NULL; + if (_filename.is_set() && _filename.value() != NULL) { + filepath = JfrJavaSupport::new_string(_filename.value(), CHECK); + } + + static const char klass[] = "jdk/jfr/internal/dcmd/DCmdStop"; + static const char method[] = "execute"; + static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"; + + JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); + execute_args.set_receiver(h_dcmd_instance); + + // arguments + execute_args.push_jobject(name); + execute_args.push_jobject(filepath); + + JfrJavaSupport::call_virtual(&execute_args, THREAD); + handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); +} + +JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* output, + bool heap) : DCmdWithParser(output, heap), + _repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, NULL), + _dump_path("dumppath", "Path to dump,.e.g \\\"My Dump path\\\"", "STRING", false, NULL), + _stack_depth("stackdepth", "Stack Depth", "JLONG", false, "64"), + _global_buffer_count("globalbuffercount", "Number of global buffers,", "JLONG", false, "32"), + _global_buffer_size("globalbuffersize", "Size of a global buffers,", "JLONG", false, "524288"), + _thread_buffer_size("thread_buffer_size", "Size of a thread buffer", "JLONG", false, "8192"), + _memory_size("memorysize", "Overall memory size, ", "JLONG", false, "16777216"), + _max_chunk_size("maxchunksize", "Size of an individual disk chunk", "JLONG", false, "12582912"), + _sample_threads("samplethreads", "Activate Thread sampling", "BOOLEAN", false, "true") { + _dcmdparser.add_dcmd_option(&_repository_path); + _dcmdparser.add_dcmd_option(&_dump_path); + _dcmdparser.add_dcmd_option(&_stack_depth); + _dcmdparser.add_dcmd_option(&_global_buffer_count); + _dcmdparser.add_dcmd_option(&_global_buffer_size); + _dcmdparser.add_dcmd_option(&_thread_buffer_size); + _dcmdparser.add_dcmd_option(&_memory_size); + _dcmdparser.add_dcmd_option(&_max_chunk_size); + _dcmdparser.add_dcmd_option(&_sample_threads); +}; + +int JfrConfigureFlightRecorderDCmd::num_arguments() { + ResourceMark rm; + JfrConfigureFlightRecorderDCmd* dcmd = new JfrConfigureFlightRecorderDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } + return 0; +} + +void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + if (invalid_state(output(), THREAD)) { + return; + } + + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JNIHandleBlockManager jni_handle_management(THREAD); + + JavaValue result(T_OBJECT); + JfrJavaArguments constructor_args(&result); + constructor_args.set_klass("jdk/jfr/internal/dcmd/DCmdConfigure", CHECK); + const oop dcmd = construct_dcmd_instance(&constructor_args, CHECK); + Handle h_dcmd_instance(THREAD, dcmd); + assert(h_dcmd_instance.not_null(), "invariant"); + + jstring repository_path = NULL; + if (_repository_path.is_set() && _repository_path.value() != NULL) { + repository_path = JfrJavaSupport::new_string(_repository_path.value(), CHECK); + } + + jstring dump_path = NULL; + if (_dump_path.is_set() && _dump_path.value() != NULL) { + dump_path = JfrJavaSupport::new_string(_dump_path.value(), CHECK); + } + + jobject stack_depth = NULL; + if (_stack_depth.is_set()) { + stack_depth = JfrJavaSupport::new_java_lang_Integer((jint)_stack_depth.value(), CHECK); + } + + jobject global_buffer_count = NULL; + if (_global_buffer_count.is_set()) { + global_buffer_count = JfrJavaSupport::new_java_lang_Long(_global_buffer_count.value(), CHECK); + } + + jobject global_buffer_size = NULL; + if (_global_buffer_size.is_set()) { + global_buffer_size = JfrJavaSupport::new_java_lang_Long(_global_buffer_size.value(), CHECK); + } + + jobject thread_buffer_size = NULL; + if (_thread_buffer_size.is_set()) { + thread_buffer_size = JfrJavaSupport::new_java_lang_Long(_thread_buffer_size.value(), CHECK); + } + + jobject max_chunk_size = NULL; + if (_max_chunk_size.is_set()) { + max_chunk_size = JfrJavaSupport::new_java_lang_Long(_max_chunk_size.value(), CHECK); + } + + jobject memory_size = NULL; + if (_memory_size.is_set()) { + memory_size = JfrJavaSupport::new_java_lang_Long(_memory_size.value(), CHECK); + } + + jobject sample_threads = NULL; + if (_sample_threads.is_set()) { + sample_threads = JfrJavaSupport::new_java_lang_Boolean(_sample_threads.value(), CHECK); + } + + static const char klass[] = "jdk/jfr/internal/dcmd/DCmdConfigure"; + static const char method[] = "execute"; + static const char signature[] = "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Integer;" + "Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;Ljava/lang/Long;" + "Ljava/lang/Long;Ljava/lang/Boolean;)Ljava/lang/String;"; + + JfrJavaArguments execute_args(&result, klass, method, signature, CHECK); + execute_args.set_receiver(h_dcmd_instance); + + // params + execute_args.push_jobject(repository_path); + execute_args.push_jobject(dump_path); + execute_args.push_jobject(stack_depth); + execute_args.push_jobject(global_buffer_count); + execute_args.push_jobject(global_buffer_size); + execute_args.push_jobject(thread_buffer_size); + execute_args.push_jobject(memory_size); + execute_args.push_jobject(max_chunk_size); + execute_args.push_jobject(sample_threads); + + JfrJavaSupport::call_virtual(&execute_args, THREAD); + handle_dcmd_result(output(), (oop)result.get_jobject(), source, THREAD); +} + +bool register_jfr_dcmds() { + uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI | DCmd_Source_MBean; + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + return true; +} + diff --git a/src/share/vm/jfr/dcmd/jfrDcmds.hpp b/src/share/vm/jfr/dcmd/jfrDcmds.hpp new file mode 100644 index 0000000000000000000000000000000000000000..857772d3b06886464f08a40b6016ee7a9c137e4e --- /dev/null +++ b/src/share/vm/jfr/dcmd/jfrDcmds.hpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JFRDCMDS_HPP +#define SHARE_VM_JFR_JFRDCMDS_HPP + +#include "services/diagnosticCommand.hpp" + +class JfrDumpFlightRecordingDCmd : public DCmdWithParser { + protected: + DCmdArgument _name; + DCmdArgument _filename; + DCmdArgument _path_to_gc_roots; + + public: + JfrDumpFlightRecordingDCmd(outputStream* output, bool heap); + static const char* name() { + return "JFR.dump"; + } + static const char* description() { + return "Copies contents of a JFR recording to file. Either the name or the recording id must be specified."; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + +class JfrCheckFlightRecordingDCmd : public DCmdWithParser { + protected: + DCmdArgument _name; + DCmdArgument _verbose; + + public: + JfrCheckFlightRecordingDCmd(outputStream* output, bool heap); + static const char* name() { + return "JFR.check"; + } + static const char* description() { + return "Checks running JFR recording(s)"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + +class JfrStartFlightRecordingDCmd : public DCmdWithParser { + protected: + DCmdArgument _name; + DCmdArgument _settings; + DCmdArgument _delay; + DCmdArgument _duration; + DCmdArgument _disk; + DCmdArgument _filename; + DCmdArgument _maxage; + DCmdArgument _maxsize; + DCmdArgument _dump_on_exit; + DCmdArgument _path_to_gc_roots; + + public: + JfrStartFlightRecordingDCmd(outputStream* output, bool heap); + static const char* name() { + return "JFR.start"; + } + static const char* description() { + return "Starts a new JFR recording"; + } + static const char* impact() { + return "Medium: Depending on the settings for a recording, the impact can range from low to high."; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + +class JfrStopFlightRecordingDCmd : public DCmdWithParser { + protected: + DCmdArgument _name; + DCmdArgument _filename; + + public: + JfrStopFlightRecordingDCmd(outputStream* output, bool heap); + static const char* name() { + return "JFR.stop"; + } + static const char* description() { + return "Stops a JFR recording"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + +class JfrRuntimeOptions; + +class JfrConfigureFlightRecorderDCmd : public DCmdWithParser { + friend class JfrOptionSet; + protected: + DCmdArgument _repository_path; + DCmdArgument _dump_path; + DCmdArgument _stack_depth; + DCmdArgument _global_buffer_count; + DCmdArgument _global_buffer_size; + DCmdArgument _thread_buffer_size; + DCmdArgument _memory_size; + DCmdArgument _max_chunk_size; + DCmdArgument _sample_threads; + + public: + JfrConfigureFlightRecorderDCmd(outputStream* output, bool heap); + static const char* name() { + return "JFR.configure"; + } + static const char* description() { + return "Configure JFR"; + } + static const char* impact() { + return "Low"; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + +bool register_jfr_dcmds(); + +#endif // SHARE_VM_JFR_JFRDCMDS_HPP diff --git a/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1f355a01c13a08b0430be00a9d1578cd3e70e300 --- /dev/null +++ b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.cpp @@ -0,0 +1,1555 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "classfile/classFileParser.hpp" +#include "classfile/classFileStream.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/stackMapTable.hpp" +#include "classfile/verificationType.hpp" +#include "interpreter/bytecodes.hpp" +#include "jfr/instrumentation/jfrEventClassTransformer.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/jni/jfrUpcalls.hpp" +#include "jfr/recorder/access/jfrEventClass.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/utilities/jfrBigEndian.hpp" +#include "jfr/writers/jfrBigEndianWriter.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" + +#include "oops/instanceKlass.hpp" +#include "oops/method.hpp" +#include "prims/jvmtiRedefineClasses.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/exceptions.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +static const u2 number_of_new_methods = 5; +static const u2 number_of_new_fields = 3; +static const int extra_stream_bytes = 0x280; +static const u2 invalid_cp_index = 0; + +static const char* utf8_constants[] = { + "Code", // 0 + "J", // 1 + "commit", // 2 + "eventHandler", // 3 + "Ljdk/jfr/internal/handlers/EventHandler;", // 4 + "duration", // 5 + "begin", // 6 + "()V", // 7 + "isEnabled", // 8 + "()Z", // 9 + "end", // 10 + "shouldCommit", // 11 + "startTime", // 12 + "", // 13 + "jdk/jfr/FlightRecorder", // 14 + "register", // 15 + "(Ljava/lang/Class;)V", // 16 // LAST_REQUIRED_UTF8 + "StackMapTable", // 17 + "Exceptions", // 18 + "LineNumberTable", // 20 + "LocalVariableTable", // 21 + "LocalVariableTypeTable", // 22 + "RuntimeVisibleAnnotation" // 23 +}; + +enum utf8_req_symbols { + UTF8_REQ_Code, + UTF8_REQ_J_FIELD_DESC, + UTF8_REQ_commit, + UTF8_REQ_eventHandler, + UTF8_REQ_eventHandler_FIELD_DESC, + UTF8_REQ_duration, + UTF8_REQ_begin, + UTF8_REQ_EMPTY_VOID_METHOD_DESC, + UTF8_REQ_isEnabled, + UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC, + UTF8_REQ_end, + UTF8_REQ_shouldCommit, + UTF8_REQ_startTime, + UTF8_REQ_clinit, + UTF8_REQ_FlightRecorder, + UTF8_REQ_register, + UTF8_REQ_CLASS_VOID_METHOD_DESC, + NOF_UTF8_REQ_SYMBOLS +}; + +enum utf8_opt_symbols { + UTF8_OPT_StackMapTable = NOF_UTF8_REQ_SYMBOLS, + UTF8_OPT_Exceptions, + UTF8_OPT_LineNumberTable, + UTF8_OPT_LocalVariableTable, + UTF8_OPT_LocalVariableTypeTable, + UTF8_OPT_RuntimeVisibleAnnotation, + NOF_UTF8_SYMBOLS +}; + +static u1 empty_void_method_code_attribute[] = { + 0x0, + 0x0, + 0x0, + 0xd, // attribute len + 0x0, + 0x0, // max stack + 0x0, + 0x1, // max locals + 0x0, + 0x0, + 0x0, + 0x1, // code length + Bytecodes::_return, + 0x0, + 0x0, // ex table len + 0x0, + 0x0 // attributes_count +}; + +static u1 boolean_method_code_attribute[] = { + 0x0, + 0x0, + 0x0, + 0xe, + 0x0, + 0x1, // max stack + 0x0, + 0x1, // max locals + 0x0, + 0x0, + 0x0, + 0x2, + Bytecodes::_iconst_0, + Bytecodes::_ireturn, + 0x0, + 0x0, // ex table len + 0x0, + 0x0, // attributes_count +}; + +// annotation processing support + +enum { // initial annotation layout + atype_off = 0, // utf8 such as 'Ljava/lang/annotation/Retention;' + count_off = 2, // u2 such as 1 (one value) + member_off = 4, // utf8 such as 'value' + tag_off = 6, // u1 such as 'c' (type) or 'e' (enum) + e_tag_val = 'e', + e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;' + e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME' + e_size = 11, // end of 'e' annotation + c_tag_val = 'c', // payload is type + c_con_off = 7, // utf8 payload, such as 'I' + c_size = 9, // end of 'c' annotation + s_tag_val = 's', // payload is String + s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;' + s_size = 9, + min_size = 6 // smallest possible size (zero members) +}; + +static int skip_annotation_value(const address, int, int); // fwd decl + +// Skip an annotation. Return >=limit if there is any problem. +static int next_annotation_index(const address buffer, int limit, int index) { + assert(buffer != NULL, "invariant"); + index += 2; // skip atype + if ((index += 2) >= limit) { + return limit; + } + int nof_members = JfrBigEndian::read(buffer + index - 2); + while (--nof_members >= 0 && index < limit) { + index += 2; // skip member + index = skip_annotation_value(buffer, limit, index); + } + return index; +} + +// Skip an annotation value. Return >=limit if there is any problem. +static int skip_annotation_value(const address buffer, int limit, int index) { + assert(buffer != NULL, "invariant"); + // value := switch (tag:u1) { + // case B, C, I, S, Z, D, F, J, c: con:u2; + // case e: e_class:u2 e_name:u2; + // case s: s_con:u2; + // case [: do(nval:u2) {value}; + // case @: annotation; + // case s: s_con:u2; + // } + if ((index += 1) >= limit) { + return limit; + } + const u1 tag = buffer[index - 1]; + switch (tag) { + case 'B': + case 'C': + case 'I': + case 'S': + case 'Z': + case 'D': + case 'F': + case 'J': + case 'c': + case 's': + index += 2; // skip con or s_con + break; + case 'e': + index += 4; // skip e_class, e_name + break; + case '[': + { + if ((index += 2) >= limit) { + return limit; + } + int nof_values = JfrBigEndian::read(buffer + index - 2); + while (--nof_values >= 0 && index < limit) { + index = skip_annotation_value(buffer, limit, index); + } + } + break; + case '@': + index = next_annotation_index(buffer, limit, index); + break; + default: + return limit; // bad tag byte + } + return index; +} + +static const u2 number_of_elements_offset = (u2)2; +static const u2 element_name_offset = (u2)(number_of_elements_offset + 2); +static const u2 element_name_size = (u2)2; +static const u2 value_type_relative_offset = (u2)2; +static const u2 value_relative_offset = (u2)(value_type_relative_offset + 1); + +// see JVMS - 4.7.16. The RuntimeVisibleAnnotations Attribute + +class AnnotationElementIterator : public StackObj { + private: + const InstanceKlass* _ik; + const address _buffer; + const u2 _limit; // length of annotation + mutable u2 _current; // element + mutable u2 _next; // element + u2 value_index() const { + return JfrBigEndian::read(_buffer + _current + value_relative_offset); + } + + public: + AnnotationElementIterator(const InstanceKlass* ik, address buffer, u2 limit) : _ik(ik), + _buffer(buffer), + _limit(limit), + _next(element_name_offset), + _current(element_name_offset) { + assert(_buffer != NULL, "invariant"); + assert(_next == element_name_offset, "invariant"); + assert(_current == element_name_offset, "invariant"); + } + + bool has_next() const { + return _next < _limit; + } + + void move_to_next() const { + assert(has_next(), "invariant"); + _current = _next; + if (_next < _limit) { + _next = skip_annotation_value(_buffer, _limit, _next + element_name_size); + } + assert(_next <= _limit, "invariant"); + assert(_current <= _limit, "invariant"); + } + + u2 number_of_elements() const { + return JfrBigEndian::read(_buffer + number_of_elements_offset); + } + + const Symbol* name() const { + assert(_current < _next, "invariant"); + return _ik->constants()->symbol_at(JfrBigEndian::read(_buffer + _current)); + } + + char value_type() const { + return JfrBigEndian::read(_buffer + _current + value_type_relative_offset); + } + + jint read_int() const { + return _ik->constants()->int_at(value_index()); + } + + bool read_bool() const { + return read_int() != 0; + } +}; + +class AnnotationIterator : public StackObj { + private: + const InstanceKlass* _ik; + // ensure _limit field is declared before _buffer + u2 _limit; // length of annotations array + const address _buffer; + mutable u2 _current; // annotation + mutable u2 _next; // annotation + + public: + AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik), + _current(0), + _next(0), + _limit(ar != NULL ? ar->length() : 0), + _buffer(_limit > 2 ? ar->adr_at(2) : NULL) { + if (_buffer != NULL) { + _limit -= 2; // subtract sizeof(u2) number of annotations field + } + } + bool has_next() const { + return _next < _limit; + } + + void move_to_next() const { + assert(has_next(), "invariant"); + _current = _next; + if (_next < _limit) { + _next = next_annotation_index(_buffer, _limit, _next); + } + assert(_next <= _limit, "invariant"); + assert(_current <= _limit, "invariant"); + } + const AnnotationElementIterator elements() const { + assert(_current < _next, "invariant"); + return AnnotationElementIterator(_ik, _buffer + _current, _next - _current); + } + const Symbol* type() const { + assert(_buffer != NULL, "invariant"); + assert(_current < _limit, "invariant"); + return _ik->constants()->symbol_at(JfrBigEndian::read(_buffer + _current)); + } +}; + +static unsigned int unused_hash = 0; +static const char value_name[] = "value"; +static bool has_registered_annotation(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) { + assert(annotation_type != NULL, "invariant"); + AnnotationArray* class_annotations = ik->class_annotations(); + if (class_annotations == NULL) { + return false; + } + + const AnnotationIterator annotation_iterator(ik, class_annotations); + while (annotation_iterator.has_next()) { + annotation_iterator.move_to_next(); + if (annotation_iterator.type() == annotation_type) { + // target annotation found + static const Symbol* value_symbol = + SymbolTable::lookup_only(value_name, sizeof value_name - 1, unused_hash); + assert(value_symbol != NULL, "invariant"); + const AnnotationElementIterator element_iterator = annotation_iterator.elements(); + while (element_iterator.has_next()) { + element_iterator.move_to_next(); + if (value_symbol == element_iterator.name()) { + // "value" element + assert('Z' == element_iterator.value_type(), "invariant"); + value = element_iterator.read_bool(); + return true; + } + } + } + } + return false; +} + +static bool registered_annotation_value(const InstanceKlass* ik, const Symbol* const registered_symbol) { + assert(registered_symbol != NULL, "invariant"); + assert(ik != NULL, "invariant"); + assert(JdkJfrEvent::is_a(ik), "invariant"); + bool registered_value = false; + if (has_registered_annotation(ik, registered_symbol, registered_value)) { + return registered_value; + } + InstanceKlass* super = InstanceKlass::cast(ik->super()); + return registered_annotation_value(super, registered_symbol); +} + +static const char registered_constant[] = "Ljdk/jfr/Registered;"; + +// Evaluate to the value of the first found "Ljdk/jfr/Registered;" annotation. +// Searching moves upwards in the klass hierarchy in order to support +// inherited annotations in addition to the ability to override. +static bool should_register_klass(const InstanceKlass* ik) { + static const Symbol* const registered_symbol = SymbolTable::lookup_only(registered_constant, + sizeof registered_constant - 1, + unused_hash); + assert(registered_symbol != NULL, "invariant"); + return registered_annotation_value(ik, registered_symbol); +} +/* + * Map an utf8 constant back to its CONSTANT_UTF8_INFO + */ +static u2 utf8_info_index(const InstanceKlass* ik, const Symbol* const target, TRAPS) { + assert(target != NULL, "invariant"); + ConstantPool* cp = ik->constants(); + const int cp_len = cp->length(); + for (u2 index = 1; index < cp_len; ++index) { + const constantTag tag = cp->tag_at(index); + if (tag.is_utf8()) { + const Symbol* const utf8_sym = cp->symbol_at(index); + assert(utf8_sym != NULL, "invariant"); + if (utf8_sym == target) { + return index; + } + } + } + // not in constant pool + return invalid_cp_index; +} + +#ifdef ASSERT +static bool is_index_within_range(u2 index, u2 orig_cp_len, u2 new_cp_entries_len) { + return index > 0 && index < orig_cp_len + new_cp_entries_len; +} +#endif + +static u2 add_utf8_info(JfrBigEndianWriter& writer, const char* utf8_constant, u2 orig_cp_len, u2& new_cp_entries_len) { + assert(utf8_constant != NULL, "invariant"); + writer.write(JVM_CONSTANT_Utf8); + writer.write_utf8_u2_len(utf8_constant); + assert(writer.is_valid(), "invariant"); + // return index for the added utf8 info + return orig_cp_len + new_cp_entries_len++; +} + +static u2 add_method_ref_info(JfrBigEndianWriter& writer, + u2 cls_name_index, + u2 method_index, + u2 desc_index, + u2 orig_cp_len, + u2& number_of_new_constants, + TRAPS) { + assert(is_index_within_range(cls_name_index, orig_cp_len, number_of_new_constants), "invariant"); + assert(is_index_within_range(method_index, orig_cp_len, number_of_new_constants), "invariant"); + assert(is_index_within_range(desc_index, orig_cp_len, number_of_new_constants), "invariant"); + writer.write(JVM_CONSTANT_Class); + writer.write(cls_name_index); + const u2 cls_entry_index = orig_cp_len + number_of_new_constants; + ++number_of_new_constants; + writer.write(JVM_CONSTANT_NameAndType); + writer.write(method_index); + writer.write(desc_index); + const u2 nat_entry_index = orig_cp_len + number_of_new_constants; + ++number_of_new_constants; + writer.write(JVM_CONSTANT_Methodref); + writer.write(cls_entry_index); + writer.write(nat_entry_index); + // post-increment number_of_new_constants + // value returned is the index to the added method_ref + return orig_cp_len + number_of_new_constants++; +} + +static u2 add_flr_register_method_constants(JfrBigEndianWriter& writer, + const u2* utf8_indexes, + u2 orig_cp_len, + u2& number_of_new_constants, + TRAPS) { + assert(utf8_indexes != NULL, "invariant"); + return add_method_ref_info(writer, + utf8_indexes[UTF8_REQ_FlightRecorder], + utf8_indexes[UTF8_REQ_register], + utf8_indexes[UTF8_REQ_CLASS_VOID_METHOD_DESC], + orig_cp_len, + number_of_new_constants, + THREAD); +} + +/* + * field_info { + * u2 access_flags; + * u2 name_index; + * u2 descriptor_index; + * u2 attributes_count; + * attribute_info attributes[attributes_count]; + * } + */ +static jlong add_field_info(JfrBigEndianWriter& writer, u2 name_index, u2 desc_index, bool is_static = false) { + assert(name_index > 0, "invariant"); + assert(desc_index > 0, "invariant"); + DEBUG_ONLY(const jlong start_offset = writer.current_offset();) + writer.write(JVM_ACC_SYNTHETIC | JVM_ACC_PRIVATE | (is_static ? JVM_ACC_STATIC : JVM_ACC_TRANSIENT)); // flags + writer.write(name_index); + writer.write(desc_index); + writer.write((u2)0x0); // attributes_count + assert(writer.is_valid(), "invariant"); + DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");) + return writer.current_offset(); +} + +static u2 add_field_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) { + assert(utf8_indexes != NULL, "invariant"); + add_field_info(writer, + utf8_indexes[UTF8_REQ_eventHandler], + utf8_indexes[UTF8_REQ_eventHandler_FIELD_DESC], + true); // static + + add_field_info(writer, + utf8_indexes[UTF8_REQ_startTime], + utf8_indexes[UTF8_REQ_J_FIELD_DESC]); + + add_field_info(writer, + utf8_indexes[UTF8_REQ_duration], + utf8_indexes[UTF8_REQ_J_FIELD_DESC]); + + return number_of_new_fields; +} + +/* + * method_info { + * u2 access_flags; + * u2 name_index; + * u2 descriptor_index; + * u2 attributes_count; + * attribute_info attributes[attributes_count]; + * } + * + * Code_attribute { + * u2 attribute_name_index; + * u4 attribute_length; + * u2 max_stack; + * u2 max_locals; + * u4 code_length; + * u1 code[code_length]; + * u2 exception_table_length; + * { u2 start_pc; + * u2 end_pc; + * u2 handler_pc; + * u2 catch_type; + * } exception_table[exception_table_length]; + * u2 attributes_count; + * attribute_info attributes[attributes_count]; + * } + */ + +static jlong add_method_info(JfrBigEndianWriter& writer, + u2 name_index, + u2 desc_index, + u2 code_index, + const u1* const code, + const size_t code_len) { + assert(name_index > 0, "invariant"); + assert(desc_index > 0, "invariant"); + assert(code_index > 0, "invariant"); + DEBUG_ONLY(const jlong start_offset = writer.current_offset();) + writer.write(JVM_ACC_SYNTHETIC | JVM_ACC_PUBLIC); // flags + writer.write(name_index); + writer.write(desc_index); + writer.write(0x1); // attributes_count ; 1 for "Code" attribute + assert(writer.is_valid(), "invariant"); + DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");) + // Code attribute + writer.write(code_index); // "Code" + writer.bytes(code, code_len); + DEBUG_ONLY(assert((start_offset + 8 + 2 + (jlong)code_len) == writer.current_offset(), "invariant");) + return writer.current_offset(); +} + +/* + * On return, the passed stream will be positioned + * just after the constant pool section in the classfile + * and the cp length is returned. + * + * Stream should come in at the start position. + */ +static u2 position_stream_after_cp(ClassFileStream* stream) { + assert(stream != NULL, "invariant"); + assert(stream->current_offset() == 0, "invariant"); + stream->skip_u4_fast(2); // 8 bytes skipped + const u2 cp_len = stream->get_u2_fast(); + assert(cp_len > 0, "invariant"); + // now spin the stream position to just after the constant pool + for (u2 index = 1; index < cp_len; ++index) { + const u1 tag = stream->get_u1_fast(); // cp tag + switch (tag) { + case JVM_CONSTANT_Class: + case JVM_CONSTANT_String: { + stream->skip_u2_fast(1); // skip 2 bytes + continue; + } + case JVM_CONSTANT_Fieldref: + case JVM_CONSTANT_Methodref: + case JVM_CONSTANT_InterfaceMethodref: + case JVM_CONSTANT_Integer: + case JVM_CONSTANT_Float: + case JVM_CONSTANT_NameAndType: + case JVM_CONSTANT_InvokeDynamic: { + stream->skip_u4_fast(1); // skip 4 bytes + continue; + } + case JVM_CONSTANT_Long: + case JVM_CONSTANT_Double: { + stream->skip_u4_fast(2); // skip 8 bytes + // Skip entry following eigth-byte constant, see JVM book p. 98 + ++index; + continue; + } + case JVM_CONSTANT_Utf8: { + u2 utf8_length = stream->get_u2_fast(); + stream->skip_u1_fast(utf8_length); // skip 2 + len bytes + continue; + } + case JVM_CONSTANT_MethodHandle: + case JVM_CONSTANT_MethodType: { + if (tag == JVM_CONSTANT_MethodHandle) { + stream->skip_u1_fast(1); + stream->skip_u2_fast(1); // skip 3 bytes + } + else if (tag == JVM_CONSTANT_MethodType) { + stream->skip_u2_fast(1); // skip 3 bytes + } + } + continue; + default: + assert(false, "error in skip logic!"); + break; + } // end switch(tag) + } + return cp_len; +} + +/* +* On return, the passed stream will be positioned +* just after the fields section in the classfile +* and the number of fields will be returned. +* +* Stream should come in positioned just before fields_count +*/ +static u2 position_stream_after_fields(ClassFileStream* stream) { + assert(stream != NULL, "invariant"); + assert(stream->current_offset() > 0, "invariant"); + // fields len + const u2 orig_fields_len = stream->get_u2_fast(); + // fields + for (u2 i = 0; i < orig_fields_len; ++i) { + stream->skip_u2_fast(3); + const u2 attrib_info_len = stream->get_u2_fast(); + for (u2 j = 0; j < attrib_info_len; ++j) { + stream->skip_u2_fast(1); + const u4 attrib_len = stream->get_u4_fast(); + stream->skip_u1_fast(attrib_len); + } + } + return orig_fields_len; +} + +/* +* On return, the passed stream will be positioned +* just after the methods section in the classfile +* and the number of methods will be returned. +* +* Stream should come in positioned just before methods_count +*/ +static u2 position_stream_after_methods(JfrBigEndianWriter& writer, + ClassFileStream* stream, + const u2* utf8_indexes, + bool register_klass, + const Method* clinit_method, + u4& orig_method_len_offset) { + assert(stream != NULL, "invariant"); + assert(stream->current_offset() > 0, "invariant"); + assert(utf8_indexes != NULL, "invariant"); + // We will come back to this location when we + // know how many methods there will be. + writer.reserve(sizeof(u2)); + const u2 orig_methods_len = stream->get_u2_fast(); + // Move copy position past original method_count + // in order to not copy the original count + orig_method_len_offset += sizeof(u2); + for (u2 i = 0; i < orig_methods_len; ++i) { + const u4 method_offset = stream->current_offset(); + stream->skip_u2_fast(1); // Access Flags + const u2 name_index = stream->get_u2_fast(); // Name index + stream->skip_u2_fast(1); // Descriptor index + const u2 attributes_count = stream->get_u2_fast(); + for (u2 j = 0; j < attributes_count; ++j) { + stream->skip_u2_fast(1); + const u4 attrib_len = stream->get_u4_fast(); + stream->skip_u1_fast(attrib_len); + } + if (clinit_method != NULL && name_index == clinit_method->name_index()) { + // The method just parsed is an existing method. + // If the class has the @Registered(false) annotation, i.e. marking a class + // for opting out from automatic registration, then we do not need to do anything. + if (!register_klass) { + continue; + } + // Automatic registration with the jfr system is acccomplished + // by pre-pending code to the method of the class. + // We will need to re-create a new in a later step. + // For now, ensure that this method is excluded from the methods + // being copied. + writer.bytes(stream->buffer() + orig_method_len_offset, + method_offset - orig_method_len_offset); + assert(writer.is_valid(), "invariant"); + + // Update copy position to skip copy of method + orig_method_len_offset = stream->current_offset(); + } + } + return orig_methods_len; +} + +static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) { + assert(utf8_indexes != NULL, "invariant"); + add_method_info(writer, + utf8_indexes[UTF8_REQ_begin], + utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC], + utf8_indexes[UTF8_REQ_Code], + empty_void_method_code_attribute, + sizeof(empty_void_method_code_attribute)); + + assert(writer.is_valid(), "invariant"); + + add_method_info(writer, + utf8_indexes[UTF8_REQ_end], + utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC], + utf8_indexes[UTF8_REQ_Code], + empty_void_method_code_attribute, + sizeof(empty_void_method_code_attribute)); + + assert(writer.is_valid(), "invariant"); + + add_method_info(writer, + utf8_indexes[UTF8_REQ_commit], + utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC], + utf8_indexes[UTF8_REQ_Code], + empty_void_method_code_attribute, + sizeof(empty_void_method_code_attribute)); + + assert(writer.is_valid(), "invariant"); + + add_method_info(writer, + utf8_indexes[UTF8_REQ_isEnabled], + utf8_indexes[UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC], + utf8_indexes[UTF8_REQ_Code], + boolean_method_code_attribute, + sizeof(boolean_method_code_attribute)); + + assert(writer.is_valid(), "invariant"); + + add_method_info(writer, + utf8_indexes[UTF8_REQ_shouldCommit], + utf8_indexes[UTF8_REQ_EMPTY_BOOLEAN_METHOD_DESC], + utf8_indexes[UTF8_REQ_Code], + boolean_method_code_attribute, + sizeof(boolean_method_code_attribute)); + assert(writer.is_valid(), "invariant"); + return number_of_new_methods; +} + +static void adjust_exception_table(JfrBigEndianWriter& writer, u2 bci_adjustment_offset, const Method* method, TRAPS) { + const u2 ex_table_length = method != NULL ? (u2)method->exception_table_length() : 0; + writer.write(ex_table_length); // Exception table length + if (ex_table_length > 0) { + assert(method != NULL, "invariant"); + const ExceptionTableElement* const ex_elements = method->exception_table_start(); + for (int i = 0; i < ex_table_length; ++i) { + assert(ex_elements != NULL, "invariant"); + writer.write(ex_elements[i].start_pc + bci_adjustment_offset); + writer.write(ex_elements[i].end_pc + bci_adjustment_offset); + writer.write(ex_elements[i].handler_pc + bci_adjustment_offset); + writer.write(ex_elements[i].catch_type_index); // no adjustment + } + } +} + +enum StackMapFrameTypes { + SAME_FRAME_BEGIN = 0, + SAME_FRAME_END = 63, + SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN = 64, + SAME_LOCALS_1_STACK_ITEM_FRAME_END = 127, + SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED = 247, + CHOP_FRAME_BEGIN = 248, + CHOP_FRAME_END = 250, + SAME_FRAME_EXTENDED = 251, + APPEND_FRAME_BEGIN = 252, + APPEND_FRAME_END = 254, + FULL_FRAME = 255 +}; + +static void adjust_stack_map(JfrBigEndianWriter& writer, + Array* stack_map, + const u2* utf8_indexes, + u2 bci_adjustment_offset, + TRAPS) { + assert(stack_map != NULL, "invariant"); + assert(utf8_indexes != NULL, "invariant"); + writer.write(utf8_indexes[UTF8_OPT_StackMapTable]); + const jlong stack_map_attrib_len_offset = writer.current_offset(); + writer.reserve(sizeof(u4)); + StackMapStream stream(stack_map); + const u2 stack_map_entries = stream.get_u2(THREAD); + // number of entries + writer.write(stack_map_entries); // new stack map entry added + const u1 frame_type = stream.get_u1(THREAD); + // SAME_FRAME and SAME_LOCALS_1_STACK_ITEM_FRAME encode + // their offset_delta into the actual frame type itself. + // If such a frame type is the first frame, then we transform + // it to a SAME_FRAME_EXTENDED or a SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED frame. + // This is done in order to not overflow frame types accidentally + // when adjusting the offset_delta. In changing the frame types, + // we can work with an explicit u2 offset_delta field (like the other frame types) + if (frame_type <= SAME_FRAME_END) { + writer.write(SAME_FRAME_EXTENDED); + writer.write(frame_type + bci_adjustment_offset); + } else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN && + frame_type <= SAME_LOCALS_1_STACK_ITEM_FRAME_END) { + writer.write(SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED); + writer.write((frame_type - SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN) + bci_adjustment_offset); + } else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED) { + // SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED to FULL_FRAME + // has a u2 offset_delta field + writer.write(frame_type); + writer.write(stream.get_u2(THREAD) + bci_adjustment_offset); + } else { + assert(false, "stackMapFrame type is invalid"); + } + + while (!stream.at_end()) { + writer.write(stream.get_u1(THREAD)); + } + + u4 stack_map_attrib_len = writer.current_offset() - stack_map_attrib_len_offset; + // the stack_map_table_attributes_length value is exclusive + stack_map_attrib_len -= sizeof(u4); + writer.write_at_offset(stack_map_attrib_len, stack_map_attrib_len_offset); +} + +static void adjust_line_number_table(JfrBigEndianWriter& writer, + const u2* utf8_indexes, + u4 bci_adjustement_offset, + const Method* method, + TRAPS) { + assert(utf8_indexes != NULL, "invariant"); + assert(method != NULL, "invariant"); + assert(method->has_linenumber_table(), "invariant"); + writer.write(utf8_indexes[UTF8_OPT_LineNumberTable]); + const jlong lnt_attributes_length_offset = writer.current_offset(); + writer.reserve(sizeof(u4)); + const jlong lnt_attributes_entries_offset = writer.current_offset(); + writer.reserve(sizeof(u2)); + u1* lnt = method->compressed_linenumber_table(); + CompressedLineNumberReadStream lnt_stream(lnt); + u2 line_number_table_entries = 0; + while (lnt_stream.read_pair()) { + ++line_number_table_entries; + const u2 bci = (u2)lnt_stream.bci(); + writer.write(bci + (u2)bci_adjustement_offset); + writer.write((u2)lnt_stream.line()); + } + writer.write_at_offset(line_number_table_entries, lnt_attributes_entries_offset); + u4 lnt_table_attributes_len = writer.current_offset() - lnt_attributes_length_offset; + // the line_number_table_attributes_length value is exclusive + lnt_table_attributes_len -= sizeof(u4); + writer.write_at_offset(lnt_table_attributes_len, lnt_attributes_length_offset); +} + +// returns the number of lvtt entries +static u2 adjust_local_variable_table(JfrBigEndianWriter& writer, + const u2* utf8_indexes, + u2 bci_adjustment_offset, + const Method* method, + TRAPS) { + assert(utf8_indexes != NULL, "invariant"); + assert(method != NULL, "invariant"); + assert(method->has_localvariable_table(), "invariant"); + writer.write(utf8_indexes[UTF8_OPT_LocalVariableTable]); + const jlong lvt_attributes_length_offset = writer.current_offset(); + writer.reserve(sizeof(u4)); + const int lvt_len = method->localvariable_table_length(); + writer.write((u2)lvt_len); + const LocalVariableTableElement* table = method->localvariable_table_start(); + assert(table != NULL, "invariant"); + u2 num_lvtt_entries = 0; + for (int i = 0; i < lvt_len; ++i) { + writer.write(table[i].start_bci + bci_adjustment_offset); + writer.write(table[i].length); + writer.write(table[i].name_cp_index); + writer.write(table[i].descriptor_cp_index); + writer.write(table[i].slot); + if (table[i].signature_cp_index > 0) { + ++num_lvtt_entries; + } + } + u4 lvt_table_attributes_len = writer.current_offset() - lvt_attributes_length_offset; + // the lvt_table_attributes_length value is exclusive + lvt_table_attributes_len -= sizeof(u4); + writer.write_at_offset(lvt_table_attributes_len, lvt_attributes_length_offset); + return num_lvtt_entries; +} + +static void adjust_local_variable_type_table(JfrBigEndianWriter& writer, + const u2* utf8_indexes, + u2 bci_adjustment_offset, + u2 num_lvtt_entries, + const Method* method, + TRAPS) { + assert(num_lvtt_entries > 0, "invariant"); + writer.write(utf8_indexes[UTF8_OPT_LocalVariableTypeTable]); + const jlong lvtt_attributes_length_offset = writer.current_offset(); + writer.reserve(sizeof(u4)); + writer.write(num_lvtt_entries); + const LocalVariableTableElement* table = method->localvariable_table_start(); + assert(table != NULL, "invariant"); + const int lvt_len = method->localvariable_table_length(); + for (int i = 0; i < lvt_len; ++i) { + if (table[i].signature_cp_index > 0) { + writer.write(table[i].start_bci + bci_adjustment_offset); + writer.write(table[i].length); + writer.write(table[i].name_cp_index); + writer.write(table[i].signature_cp_index); + writer.write(table[i].slot); + } + } + u4 lvtt_table_attributes_len = writer.current_offset() - lvtt_attributes_length_offset; + // the lvtt_table_attributes_length value is exclusive + lvtt_table_attributes_len -= sizeof(u4); + writer.write_at_offset(lvtt_table_attributes_len, lvtt_attributes_length_offset); +} + +static void adjust_code_attributes(JfrBigEndianWriter& writer, + const u2* utf8_indexes, + u2 bci_adjustment_offset, + const Method* clinit_method, + TRAPS) { + // "Code" attributes + assert(utf8_indexes != NULL, "invariant"); + const jlong code_attributes_offset = writer.current_offset(); + writer.reserve(sizeof(u2)); + u2 number_of_code_attributes = 0; + if (clinit_method != NULL) { + Array* stack_map = clinit_method->stackmap_data(); + if (stack_map != NULL) { + ++number_of_code_attributes; + adjust_stack_map(writer, stack_map, utf8_indexes, bci_adjustment_offset, THREAD); + assert(writer.is_valid(), "invariant"); + } + if (clinit_method != NULL && clinit_method->has_linenumber_table()) { + ++number_of_code_attributes; + adjust_line_number_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD); + assert(writer.is_valid(), "invariant"); + } + if (clinit_method != NULL && clinit_method->has_localvariable_table()) { + ++number_of_code_attributes; + const u2 num_of_lvtt_entries = adjust_local_variable_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD); + assert(writer.is_valid(), "invariant"); + if (num_of_lvtt_entries > 0) { + ++number_of_code_attributes; + adjust_local_variable_type_table(writer, utf8_indexes, bci_adjustment_offset, num_of_lvtt_entries, clinit_method, THREAD); + assert(writer.is_valid(), "invariant"); + } + } + } + + // Store the number of code_attributes + writer.write_at_offset(number_of_code_attributes, code_attributes_offset); +} + +static jlong insert_clinit_method(const InstanceKlass* ik, + const ClassFileParser& parser, + JfrBigEndianWriter& writer, + u2 orig_constant_pool_len, + const u2* utf8_indexes, + const u2 register_method_ref_index, + const Method* clinit_method, + TRAPS) { + assert(utf8_indexes != NULL, "invariant"); + // The injected code length is always this value. + // This is to ensure that padding can be done + // where needed and to simplify size calculations. + static const u2 injected_code_length = 8; + const u2 name_index = utf8_indexes[UTF8_REQ_clinit]; + const u2 desc_index = utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC]; + const u2 max_stack = MAX2(clinit_method != NULL ? clinit_method->verifier_max_stack() : 1, 1); + const u2 max_locals = MAX2(clinit_method != NULL ? clinit_method->max_locals() : 0, 0); + const u2 orig_bytecodes_length = clinit_method != NULL ? (u2)clinit_method->code_size() : 0; + const address orig_bytecodes = clinit_method != NULL ? clinit_method->code_base() : NULL; + const u2 new_code_length = injected_code_length + orig_bytecodes_length; + DEBUG_ONLY(const jlong start_offset = writer.current_offset();) + writer.write(JVM_ACC_STATIC); // flags + writer.write(name_index); + writer.write(desc_index); + writer.write((u2)0x1); // attributes_count // "Code" + assert(writer.is_valid(), "invariant"); + DEBUG_ONLY(assert(start_offset + 8 == writer.current_offset(), "invariant");) + // "Code" attribute + writer.write(utf8_indexes[UTF8_REQ_Code]); // "Code" + const jlong code_attribute_length_offset = writer.current_offset(); + writer.reserve(sizeof(u4)); + writer.write(max_stack); // max stack + writer.write(max_locals); // max locals + writer.write((u4)new_code_length); // code length + + /* BEGIN CLINIT CODE */ + + // Note the use of ldc_w here instead of ldc. + // This is to handle all values of "this_class_index" + writer.write((u1)Bytecodes::_ldc_w); + writer.write((u2)parser.this_class_index()); // load constant "this class" + writer.write((u1)Bytecodes::_invokestatic); + // invoke "FlightRecorder.register(Ljava/lang/Class;") + writer.write(register_method_ref_index); + if (clinit_method == NULL) { + writer.write((u1)Bytecodes::_nop); + writer.write((u1)Bytecodes::_return); + } else { + // If we are pre-pending to original code, + // do padding to minimize disruption to the original. + // It might have dependencies on 4-byte boundaries + // i.e. lookupswitch and tableswitch instructions + writer.write((u1)Bytecodes::_nop); + writer.write((u1)Bytecodes::_nop); + // insert original clinit code + writer.bytes(orig_bytecodes, orig_bytecodes_length); + } + + /* END CLINIT CODE */ + + assert(writer.is_valid(), "invariant"); + adjust_exception_table(writer, injected_code_length, clinit_method, THREAD); + assert(writer.is_valid(), "invariant"); + adjust_code_attributes(writer, utf8_indexes, injected_code_length, clinit_method, THREAD); + assert(writer.is_valid(), "invariant"); + u4 code_attribute_len = writer.current_offset() - code_attribute_length_offset; + // the code_attribute_length value is exclusive + code_attribute_len -= sizeof(u4); + writer.write_at_offset(code_attribute_len, code_attribute_length_offset); + return writer.current_offset(); +} + +// Caller needs ResourceMark +static ClassFileStream* create_new_bytes_for_event_klass(const InstanceKlass* ik, const ClassFileParser& parser, TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + static const u2 public_final_flag_mask = JVM_ACC_PUBLIC | JVM_ACC_FINAL; + ClassFileStream* orig_stream = const_cast(parser.clone_stream()); + const int orig_stream_length = orig_stream->length(); + // allocate an identically sized buffer + u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, orig_stream_length); + if (new_buffer == NULL) { + return NULL; + } + assert(new_buffer != NULL, "invariant"); + // memcpy the entire [B + memcpy(new_buffer, orig_stream->buffer(), orig_stream_length); + const u2 orig_cp_len = position_stream_after_cp(orig_stream); + assert(orig_cp_len > 0, "invariant"); + assert(orig_stream->current_offset() > 0, "invariant"); + orig_stream->skip_u2_fast(3); // access_flags, this_class_index, super_class_index + const u2 iface_len = orig_stream->get_u2_fast(); + orig_stream->skip_u2_fast(iface_len); + // fields len + const u2 orig_fields_len = orig_stream->get_u2_fast(); + // fields + for (u2 i = 0; i < orig_fields_len; ++i) { + orig_stream->skip_u2_fast(3); + const u2 attrib_info_len = orig_stream->get_u2_fast(); + for (u2 j = 0; j < attrib_info_len; ++j) { + orig_stream->skip_u2_fast(1); + const u4 attrib_len = orig_stream->get_u4_fast(); + orig_stream->skip_u1_fast(attrib_len); + } + } + // methods + const u2 orig_methods_len = orig_stream->get_u2_fast(); + for (u2 i = 0; i < orig_methods_len; ++i) { + const u4 access_flag_offset = orig_stream->current_offset(); + const u2 flags = orig_stream->get_u2_fast(); + // Rewrite JVM_ACC_FINAL -> JVM_ACC_PUBLIC + if (public_final_flag_mask == flags) { + JfrBigEndianWriter accessflagsrewriter(new_buffer + access_flag_offset, sizeof(u2)); + accessflagsrewriter.write(JVM_ACC_PUBLIC); + assert(accessflagsrewriter.is_valid(), "invariant"); + } + orig_stream->skip_u2_fast(2); + const u2 attributes_count = orig_stream->get_u2_fast(); + for (u2 j = 0; j < attributes_count; ++j) { + orig_stream->skip_u2_fast(1); + const u4 attrib_len = orig_stream->get_u4_fast(); + orig_stream->skip_u1_fast(attrib_len); + } + } + return new ClassFileStream(new_buffer, orig_stream_length, NULL); +} + +// Attempt to locate an existing UTF8_INFO mapping the utf8_constant. +// If no UTF8_INFO exists, add (append) a new one to the constant pool. +static u2 find_or_add_utf8_info(JfrBigEndianWriter& writer, + const InstanceKlass* ik, + const char* const utf8_constant, + u2 orig_cp_len, + u2& added_cp_entries, + TRAPS) { + assert(utf8_constant != NULL, "invariant"); + TempNewSymbol utf8_sym = SymbolTable::new_symbol(utf8_constant, THREAD); + // lookup existing + const int utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD); + if (utf8_orig_idx != invalid_cp_index) { + // existing constant pool entry found + return utf8_orig_idx; + } + // no existing match, need to add a new utf8 cp entry + assert(invalid_cp_index == utf8_orig_idx, "invariant"); + // add / append new + return add_utf8_info(writer, utf8_constant, orig_cp_len, added_cp_entries); +} + +/* + * This routine will resolve the required utf8_constants array + * to their constant pool indexes (mapping to their UTF8_INFO's) + * Only if a constant is actually needed and does not already exist + * will it be added. + * + * The passed in indexes array will be populated with the resolved indexes. + * The number of newly added constant pool entries is returned. + */ +static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer, + const InstanceKlass* ik, + u2* const utf8_indexes, + u2 orig_cp_len, + const Method* clinit_method, + TRAPS) { + assert(utf8_indexes != NULL, "invariant"); + u2 added_cp_entries = 0; + // resolve all required symbols + for (u2 index = 0; index < NOF_UTF8_REQ_SYMBOLS; ++index) { + utf8_indexes[index] = find_or_add_utf8_info(writer, + ik, + utf8_constants[index], + orig_cp_len, + added_cp_entries, + THREAD); + } + // Now determine optional constants (mainly "Code" attributes) + if (clinit_method != NULL && clinit_method->has_stackmap_table()) { + utf8_indexes[UTF8_OPT_StackMapTable] = + find_or_add_utf8_info(writer, + ik, + utf8_constants[UTF8_OPT_StackMapTable], + orig_cp_len, + added_cp_entries, + THREAD); + } else { + utf8_indexes[UTF8_OPT_StackMapTable] = invalid_cp_index; + } + + if (clinit_method != NULL && clinit_method->has_linenumber_table()) { + utf8_indexes[UTF8_OPT_LineNumberTable] = + find_or_add_utf8_info(writer, + ik, + utf8_constants[UTF8_OPT_LineNumberTable], + orig_cp_len, + added_cp_entries, + THREAD); + } else { + utf8_indexes[UTF8_OPT_LineNumberTable] = invalid_cp_index; + } + + if (clinit_method != NULL && clinit_method->has_localvariable_table()) { + utf8_indexes[UTF8_OPT_LocalVariableTable] = + find_or_add_utf8_info(writer, + ik, + utf8_constants[UTF8_OPT_LocalVariableTable], + orig_cp_len, + added_cp_entries, + THREAD); + + utf8_indexes[UTF8_OPT_LocalVariableTypeTable] = + find_or_add_utf8_info(writer, + ik, + utf8_constants[UTF8_OPT_LocalVariableTypeTable], + orig_cp_len, + added_cp_entries, + THREAD); + } else { + utf8_indexes[UTF8_OPT_LocalVariableTable] = invalid_cp_index; + utf8_indexes[UTF8_OPT_LocalVariableTypeTable] = invalid_cp_index; + } + + return added_cp_entries; +} + +static u1* new_bytes_for_lazy_instrumentation(InstanceKlass* ik, + const ClassFileParser& parser, + jint& size_of_new_bytes, + TRAPS) { + assert(ik != NULL, "invariant"); + // If the class already has a clinit method + // we need to take that into account + const Method* clinit_method = ik->class_initializer(); + const bool register_klass = should_register_klass(ik); + ClassFileStream* orig_stream = const_cast(parser.clone_stream()); + const int orig_stream_size = orig_stream->length(); + assert(orig_stream->current_offset() == 0, "invariant"); + const u2 orig_cp_len = position_stream_after_cp(orig_stream); + assert(orig_cp_len > 0, "invariant"); + assert(orig_stream->current_offset() > 0, "invariant"); + // Dimension and allocate a working byte buffer + // to be used in building up a modified class [B. + const jint new_buffer_size = extra_stream_bytes + orig_stream_size; + u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, new_buffer_size); + if (new_buffer == NULL) { + log_error(jfr, system) ("Thread local allocation (native) for " SIZE_FORMAT + " bytes failed in JfrClassAdapter::on_klass_creation", (size_t)new_buffer_size); + return NULL; + } + assert(new_buffer != NULL, "invariant"); + // [B wrapped in a big endian writer + JfrBigEndianWriter writer(new_buffer, new_buffer_size); + assert(writer.current_offset() == 0, "invariant"); + const u4 orig_access_flag_offset = orig_stream->current_offset(); + // Copy original stream from the beginning up to AccessFlags + // This means the original constant pool contents are copied unmodified + writer.bytes(orig_stream->buffer(), orig_access_flag_offset); + assert(writer.is_valid(), "invariant"); + assert(writer.current_offset() == orig_access_flag_offset, "invariant"); // same positions + // Our writer now sits just after the last original constant pool entry. + // I.e. we are in a good position to append new constant pool entries + // This array will contain the resolved indexes + // in order to reference UTF8_INFO's needed + u2 utf8_indexes[NOF_UTF8_SYMBOLS]; + // Resolve_utf8_indexes will be conservative in attempting to + // locate an existing UTF8_INFO; it will only append constants + // that is absolutely required + u2 number_of_new_constants = resolve_utf8_indexes(writer, ik, utf8_indexes, orig_cp_len, clinit_method, THREAD); + // UTF8_INFO entries now added to the constant pool + // In order to invoke a method we would need additional + // constants, JVM_CONSTANT_Class, JVM_CONSTANT_NameAndType + // and JVM_CONSTANT_Methodref. + const u2 flr_register_method_ref_index = + register_klass ? + add_flr_register_method_constants(writer, + utf8_indexes, + orig_cp_len, + number_of_new_constants, + THREAD) : invalid_cp_index; + + // New constant pool entries added and all UTF8_INFO indexes resolved + // Now update the class file constant_pool_count with an updated count + writer.write_at_offset(orig_cp_len + number_of_new_constants, 8); + assert(writer.is_valid(), "invariant"); + orig_stream->skip_u2_fast(3); // access_flags, this_class_index, super_class_index + const u2 iface_len = orig_stream->get_u2_fast(); // interfaces + orig_stream->skip_u2_fast(iface_len); + const u4 orig_fields_len_offset = orig_stream->current_offset(); + // Copy from AccessFlags up to and including interfaces + writer.bytes(orig_stream->buffer() + orig_access_flag_offset, + orig_fields_len_offset - orig_access_flag_offset); + assert(writer.is_valid(), "invariant"); + const jlong new_fields_len_offset = writer.current_offset(); + const u2 orig_fields_len = position_stream_after_fields(orig_stream); + u4 orig_method_len_offset = orig_stream->current_offset(); + // Copy up to and including fields + writer.bytes(orig_stream->buffer() + orig_fields_len_offset, orig_method_len_offset - orig_fields_len_offset); + assert(writer.is_valid(), "invariant"); + // We are sitting just after the original number of field_infos + // so this is a position where we can add (append) new field_infos + const u2 number_of_new_fields = add_field_infos(writer, utf8_indexes); + assert(writer.is_valid(), "invariant"); + const jlong new_method_len_offset = writer.current_offset(); + // Additional field_infos added, update classfile fields_count + writer.write_at_offset(orig_fields_len + number_of_new_fields, new_fields_len_offset); + assert(writer.is_valid(), "invariant"); + // Our current location is now at classfile methods_count + const u2 orig_methods_len = position_stream_after_methods(writer, + orig_stream, + utf8_indexes, + register_klass, + clinit_method, + orig_method_len_offset); + const u4 orig_attributes_count_offset = orig_stream->current_offset(); + // Copy existing methods + writer.bytes(orig_stream->buffer() + orig_method_len_offset, orig_attributes_count_offset - orig_method_len_offset); + assert(writer.is_valid(), "invariant"); + // We are sitting just after the original number of method_infos + // so this is a position where we can add (append) new method_infos + u2 number_of_new_methods = add_method_infos(writer, utf8_indexes); + + // We have just added the new methods. + // + // What about the state of ? + // We would need to do: + // 1. Nothing (@Registered(false) annotation) + // 2. Build up a new - and if the original class already contains a , + // merging will be neccessary. + // + if (register_klass) { + insert_clinit_method(ik, parser, writer, orig_cp_len, utf8_indexes, flr_register_method_ref_index, clinit_method, THREAD); + } + number_of_new_methods += clinit_method != NULL ? 0 : register_klass ? 1 : 0; + // Update classfile methods_count + writer.write_at_offset(orig_methods_len + number_of_new_methods, new_method_len_offset); + assert(writer.is_valid(), "invariant"); + // Copy last remaining bytes + writer.bytes(orig_stream->buffer() + orig_attributes_count_offset, orig_stream_size - orig_attributes_count_offset); + assert(writer.is_valid(), "invariant"); + assert(writer.current_offset() > orig_stream->length(), "invariant"); + size_of_new_bytes = (jint)writer.current_offset(); + return new_buffer; +} + +static void log_pending_exception(oop throwable) { + assert(throwable != NULL, "invariant"); + oop msg = java_lang_Throwable::message(throwable); + if (msg != NULL) { + char* text = java_lang_String::as_utf8_string(msg); + if (text != NULL) { + log_error(jfr, system) ("%s", text); + } + } +} + +static bool should_force_instrumentation() { + return !JfrOptionSet::allow_event_retransforms() || JfrEventClassTransformer::is_force_instrumentation(); +} + +static ClassFileStream* create_new_bytes_for_subklass(InstanceKlass* ik, const ClassFileParser& parser, Thread* t) { + assert(JdkJfrEvent::is_a(ik), "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t)); + jint size_of_new_bytes = 0; + u1* new_bytes = new_bytes_for_lazy_instrumentation(ik, parser, size_of_new_bytes, t); + if (new_bytes == NULL) { + return NULL; + } + assert(new_bytes != NULL, "invariant"); + assert(size_of_new_bytes > 0, "invariant"); + + bool force_instrumentation = should_force_instrumentation(); + if (Jfr::is_recording() || force_instrumentation) { + jint size_instrumented_data = 0; + unsigned char* instrumented_data = NULL; + const jclass super = (jclass)JNIHandles::make_local(ik->super()->java_mirror()); + JfrUpcalls::new_bytes_eager_instrumentation(TRACE_ID(ik), + force_instrumentation, + super, + size_of_new_bytes, + new_bytes, + &size_instrumented_data, + &instrumented_data, + t); + if (t->has_pending_exception()) { + log_pending_exception(t->pending_exception()); + t->clear_pending_exception(); + return NULL; + } + assert(instrumented_data != NULL, "invariant"); + assert(size_instrumented_data > 0, "invariant"); + return new ClassFileStream(instrumented_data, size_instrumented_data, NULL); + } + return new ClassFileStream(new_bytes, size_of_new_bytes, NULL); +} + +static bool cache_bytes(InstanceKlass* ik, ClassFileStream* new_stream, InstanceKlass* new_ik, TRAPS) { + assert(ik != NULL, "invariant"); + assert(new_ik != NULL, "invariant"); + assert(new_ik->name() != NULL, "invariant"); + assert(new_stream != NULL, "invariant"); + assert(!HAS_PENDING_EXCEPTION, "invariant"); + static const bool can_retransform = JfrOptionSet::allow_retransforms(); + if (!can_retransform) { + return true; + } + const jint stream_len = new_stream->length(); + JvmtiCachedClassFileData* p = + (JvmtiCachedClassFileData*)NEW_C_HEAP_ARRAY_RETURN_NULL(u1, offset_of(JvmtiCachedClassFileData, data) + stream_len, mtInternal); + if (p == NULL) { + log_error(jfr, system)("Allocation using C_HEAP_ARRAY for " SIZE_FORMAT + " bytes failed in JfrClassAdapter::on_klass_creation", (size_t)offset_of(JvmtiCachedClassFileData, data) + stream_len); + return false; + } + p->length = stream_len; + memcpy(p->data, new_stream->buffer(), stream_len); + new_ik->set_cached_class_file(p); + JvmtiCachedClassFileData* const cached_class_data = ik->get_cached_class_file(); + if (cached_class_data != NULL) { + os::free(cached_class_data); + ik->set_cached_class_file(NULL); + } + return true; +} + +static instanceKlassHandle create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS) { + assert(stream != NULL, "invariant"); + ResourceMark rm(THREAD); + ClassLoaderData* const cld = ik->class_loader_data(); + Handle pd(THREAD, ik->protection_domain()); + Symbol* const class_name = ik->name(); + const char* const klass_name = class_name != NULL ? class_name->as_C_string() : ""; + TempNewSymbol parsed_name = NULL; + ClassFileParser new_parser(stream); + if (HAS_PENDING_EXCEPTION) { + log_pending_exception(PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; + return NULL; + } + instanceKlassHandle new_ik = new_parser.parseClassFile(class_name, + cld, + pd, + parsed_name, + true, + THREAD); + if (HAS_PENDING_EXCEPTION) { + log_pending_exception(PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; + return NULL; + } + assert(new_ik() != NULL, "invariant"); + assert(new_ik->name() != NULL, "invariant"); + assert(strncmp(ik->name()->as_C_string(), new_ik()->name()->as_C_string(), strlen(ik->name()->as_C_string())) == 0, "invariant"); + return cache_bytes(ik, stream, new_ik(), THREAD) ? new_ik : NULL; +} + +static void rewrite_klass_pointer(instanceKlassHandle& ik, instanceKlassHandle new_ik, ClassFileParser& parser, TRAPS) { + assert(ik() != NULL, "invariant"); + assert(new_ik() != NULL, "invariant"); + assert(new_ik->name() != NULL, "invariant"); + assert(JdkJfrEvent::is(new_ik()) || JdkJfrEvent::is_subklass(new_ik()), "invariant"); + assert(!HAS_PENDING_EXCEPTION, "invariant"); + // assign original InstanceKlass* back onto "its" parser object for proper destruction + parser.set_klass_to_deallocate(ik()); + // now rewrite original pointer to newly created InstanceKlass + ik = new_ik; +} + +// During retransform/redefine, copy the Method specific trace flags +// from the previous ik ("the original klass") to the new ik ("the scratch_klass"). +// The open code for retransform/redefine does not know about these. +// In doing this migration here, we ensure the new Methods (defined in scratch klass) +// will carry over trace tags from the old Methods being replaced, +// ensuring flag/tag continuity while being transparent to open code. +static void copy_method_trace_flags(const InstanceKlass* the_original_klass, const InstanceKlass* the_scratch_klass) { + assert(the_original_klass != NULL, "invariant"); + assert(the_scratch_klass != NULL, "invariant"); + assert(the_original_klass->name() == the_scratch_klass->name(), "invariant"); + const Array* old_methods = the_original_klass->methods(); + const Array* new_methods = the_scratch_klass->methods(); + const bool equal_array_length = old_methods->length() == new_methods->length(); + // The Method array has the property of being sorted. + // If they are the same length, there is a one-to-one mapping. + // If they are unequal, there was a method added (currently only + // private static methods allowed to be added), use lookup. + for (int i = 0; i < old_methods->length(); ++i) { + const Method* const old_method = old_methods->at(i); + Method* const new_method = equal_array_length ? new_methods->at(i) : + the_scratch_klass->find_method(old_method->name(), old_method->signature()); + assert(new_method != NULL, "invariant"); + assert(new_method->name() == old_method->name(), "invariant"); + assert(new_method->signature() == old_method->signature(), "invariant"); + *new_method->trace_flags_addr() = old_method->trace_flags(); + assert(new_method->trace_flags() == old_method->trace_flags(), "invariant"); + } +} + +static bool is_retransforming(const InstanceKlass* ik, TRAPS) { + assert(ik != NULL, "invariant"); + assert(JdkJfrEvent::is_a(ik), "invariant"); + Symbol* const name = ik->name(); + assert(name != NULL, "invariant"); + Handle class_loader(THREAD, ik->class_loader()); + Handle protection_domain(THREAD, ik->protection_domain()); + // nota bene: use lock-free dictionary lookup + const InstanceKlass* prev_ik = (const InstanceKlass*)SystemDictionary::find(name, class_loader, protection_domain, THREAD); + if (prev_ik == NULL) { + return false; + } + // an existing ik implies a retransform/redefine + assert(prev_ik != NULL, "invariant"); + assert(JdkJfrEvent::is_a(prev_ik), "invariant"); + copy_method_trace_flags(prev_ik, ik); + return true; +} + +// target for TRACE_KLASS_CREATION hook +void JfrEventClassTransformer::on_klass_creation(instanceKlassHandle& ik, ClassFileParser& parser, TRAPS) { + assert(ik() != NULL, "invariant"); + if (JdkJfrEvent::is(ik())) { + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + ClassFileStream* new_stream = create_new_bytes_for_event_klass(InstanceKlass::cast(ik()), parser, THREAD); + if (new_stream == NULL) { + log_error(jfr, system)("JfrClassAdapter: unable to create ClassFileStream"); + return; + } + assert(new_stream != NULL, "invariant"); + instanceKlassHandle new_ik = create_new_instance_klass(InstanceKlass::cast(ik()), new_stream, THREAD); + if (new_ik() == NULL) { + log_error(jfr, system)("JfrClassAdapter: unable to create InstanceKlass"); + return; + } + assert(new_ik() != NULL, "invariant"); + // We now need to explicitly tag the replaced klass as the jdk.jfr.Event klass + assert(!JdkJfrEvent::is(new_ik()), "invariant"); + JdkJfrEvent::tag_as(new_ik()); + assert(JdkJfrEvent::is(new_ik()), "invariant"); + rewrite_klass_pointer(ik, new_ik, parser, THREAD); + return; + } + assert(JdkJfrEvent::is_subklass(ik()), "invariant"); + if (is_retransforming(ik(), THREAD)) { + // not the initial klass load + return; + } + if (ik->is_abstract()) { + // abstract classes are not instrumented + return; + } + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + ClassFileStream* const new_stream = create_new_bytes_for_subklass(ik(), parser, THREAD); + if (NULL == new_stream) { + log_error(jfr, system)("JfrClassAdapter: unable to create ClassFileStream"); + return; + } + assert(new_stream != NULL, "invariant"); + instanceKlassHandle new_ik = create_new_instance_klass(ik(), new_stream, THREAD); + if (new_ik() == NULL) { + log_error(jfr, system)("JfrClassAdapter: unable to create InstanceKlass"); + return; + } + assert(new_ik() != NULL, "invariant"); + // would have been tagged already as a subklass during the normal process of traceid assignment + assert(JdkJfrEvent::is_subklass(new_ik()), "invariant"); + traceid id = ik->trace_id(); + ik->set_trace_id(0); + new_ik->set_trace_id(id); + rewrite_klass_pointer(ik, new_ik, parser, THREAD); +} + +static bool _force_instrumentation = false; +void JfrEventClassTransformer::set_force_instrumentation(bool force_instrumentation) { + _force_instrumentation = force_instrumentation; +} + +bool JfrEventClassTransformer::is_force_instrumentation() { + return _force_instrumentation; +} diff --git a/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.hpp b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3c5f306b99171c4d88453171f58e601e60f34f73 --- /dev/null +++ b/src/share/vm/jfr/instrumentation/jfrEventClassTransformer.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP +#define SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP + +#include "memory/allocation.hpp" +#include "utilities/exceptions.hpp" + +class ClassFileParser; +class InstanceKlass; + +// +// Intercepts the initial class load of jdk.jfr.Event and subclasses. +// Will replace the sent in InstanceKlass* with a class file schema extended InstanceKlass*. +// +class JfrEventClassTransformer : AllStatic { + public: + static void on_klass_creation(instanceKlassHandle& ik, ClassFileParser& parser, TRAPS); + static void set_force_instrumentation(bool force_instrumentation); + static bool is_force_instrumentation(); +}; + +#endif // SHARE_VM_JFR_INSTRUMENTATION_JFREVENTCLASSTRANSFORMER_HPP diff --git a/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.cpp b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0952cf682f27920ca0f13897a28a3b012d8acaf4 --- /dev/null +++ b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.cpp @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "jfr/instrumentation/jfrJvmtiAgent.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/jni/jfrUpcalls.hpp" +#include "jfr/recorder/access/jfrEventClass.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/resourceArea.hpp" +#include "prims/jvmtiExport.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/exceptions.hpp" + +static const size_t ERROR_MSG_BUFFER_SIZE = 256; +static JfrJvmtiAgent* agent = NULL; +static jvmtiEnv* jfr_jvmti_env = NULL; + +static void check_jvmti_error(jvmtiEnv* jvmti, jvmtiError errnum, const char* str) { + if (errnum != JVMTI_ERROR_NONE) { + char* errnum_str = NULL; + jvmti->GetErrorName(errnum, &errnum_str); + log_error(jfr, system)("ERROR: JfrJvmtiAgent: " INT32_FORMAT " (%s): %s\n", + errnum, + NULL == errnum_str ? "Unknown" : errnum_str, + NULL == str ? "" : str); + } +} + +static jvmtiError set_event_notification_mode(jvmtiEventMode mode, + jvmtiEvent event, + jthread event_thread, + ...) { + if (jfr_jvmti_env == NULL) { + return JVMTI_ERROR_NONE; + } + const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventNotificationMode(mode, event, event_thread); + check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventNotificationMode"); + return jvmti_ret_code; +} + +static jvmtiError update_class_file_load_hook_event(jvmtiEventMode mode) { + return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL); +} + +static JavaThread* current_java_thread() { + Thread* this_thread = Thread::current(); + assert(this_thread != NULL && this_thread->is_Java_thread(), "invariant"); + return static_cast(this_thread); +} + +// jvmti event callbacks require C linkage +extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jclass class_being_redefined, + jobject loader, + const char* name, + jobject protection_domain, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data) { + if (class_being_redefined == NULL) { + return; + } + JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));; + ThreadInVMfromNative tvmfn(jt); + JfrUpcalls::on_retransform(JfrTraceId::get(class_being_redefined), + class_being_redefined, + class_data_len, + class_data, + new_class_data_len, + new_class_data, + jt); +} + +// caller needs ResourceMark +static jclass* create_classes_array(jint classes_count, TRAPS) { + assert(classes_count > 0, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); + ThreadInVMfromNative tvmfn((JavaThread*)THREAD); + jclass* const classes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jclass, classes_count); + if (NULL == classes) { + char error_buffer[ERROR_MSG_BUFFER_SIZE]; + jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE, + "Thread local allocation (native) of " SIZE_FORMAT " bytes failed " + "in retransform classes", sizeof(jclass) * classes_count); + log_error(jfr, system)("%s", error_buffer); + JfrJavaSupport::throw_out_of_memory_error(error_buffer, CHECK_NULL); + } + return classes; +} + +static void log_and_throw(TRAPS) { + if (!HAS_PENDING_EXCEPTION) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); + ThreadInVMfromNative tvmfn((JavaThread*)THREAD); + log_error(jfr, system)("JfrJvmtiAgent::retransformClasses failed"); + JfrJavaSupport::throw_class_format_error("JfrJvmtiAgent::retransformClasses failed", THREAD); + } +} + +static void check_exception_and_log(JNIEnv* env, TRAPS) { + assert(env != NULL, "invariant"); + if (env->ExceptionOccurred()) { + // array index out of bound + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); + ThreadInVMfromNative tvmfn((JavaThread*)THREAD); + log_error(jfr, system)("GetObjectArrayElement threw an exception"); + return; + } +} + +void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, TRAPS) { + assert(env != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); + if (classes_array == NULL) { + return; + } + const jint classes_count = env->GetArrayLength(classes_array); + if (classes_count <= 0) { + return; + } + ResourceMark rm(THREAD); + jclass* const classes = create_classes_array(classes_count, CHECK); + assert(classes != NULL, "invariant"); + for (jint i = 0; i < classes_count; i++) { + jclass clz = (jclass)env->GetObjectArrayElement(classes_array, i); + check_exception_and_log(env, THREAD); + + // inspecting the oop/klass requires a thread transition + { + ThreadInVMfromNative transition((JavaThread*)THREAD); + if (JdkJfrEvent::is_a(clz)) { + // should have been tagged already + assert(JdkJfrEvent::is_subklass(clz), "invariant"); + } else { + // outside the event hierarchy + JdkJfrEvent::tag_as_host(clz); + } + } + + classes[i] = clz; + } + if (jfr_jvmti_env->RetransformClasses(classes_count, classes) != JVMTI_ERROR_NONE) { + log_and_throw(THREAD); + } +} + +static jvmtiError register_callbacks(JavaThread* jt) { + assert(jfr_jvmti_env != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); + jvmtiEventCallbacks callbacks; + /* Set callbacks */ + memset(&callbacks, 0, sizeof(callbacks)); + callbacks.ClassFileLoadHook = jfr_on_class_file_load_hook; + const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks)); + check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventCallbacks"); + return jvmti_ret_code; +} + +static jvmtiError register_capabilities(JavaThread* jt) { + assert(jfr_jvmti_env != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); + jvmtiCapabilities capabilities; + /* Add JVMTI capabilities */ + (void)memset(&capabilities, 0, sizeof(capabilities)); + capabilities.can_retransform_classes = 1; + capabilities.can_retransform_any_class = 1; + const jvmtiError jvmti_ret_code = jfr_jvmti_env->AddCapabilities(&capabilities); + check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "Add Capabilities"); + return jvmti_ret_code; +} + +static jint create_jvmti_env(JavaThread* jt) { + assert(jfr_jvmti_env == NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); + extern struct JavaVM_ main_vm; + JavaVM* vm = &main_vm; + return vm->GetEnv((void **)&jfr_jvmti_env, JVMTI_VERSION); +} + +static jvmtiError unregister_callbacks(JavaThread* jt) { + if (jfr_jvmti_env == NULL) { + return JVMTI_ERROR_NONE; + } + jvmtiEventCallbacks callbacks; + /* Set empty callbacks */ + memset(&callbacks, 0, sizeof(callbacks)); + const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks)); + check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventCallbacks"); + return jvmti_ret_code; +} + +JfrJvmtiAgent::JfrJvmtiAgent() {} + +JfrJvmtiAgent::~JfrJvmtiAgent() { + JavaThread* jt = current_java_thread(); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); + ThreadToNativeFromVM transition(jt); + update_class_file_load_hook_event(JVMTI_DISABLE); + unregister_callbacks(jt); + if (jfr_jvmti_env != NULL) { + jfr_jvmti_env->DisposeEnvironment(); + jfr_jvmti_env = NULL; + } + agent = NULL; +} + +static bool initialize() { + JavaThread* const jt = current_java_thread(); + assert(jt != NULL, "invariant"); + assert(jt->thread_state() == _thread_in_vm, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); + ThreadToNativeFromVM transition(jt); + if (create_jvmti_env(jt) == JNI_ERR) { + assert(jfr_jvmti_env == NULL, "invariant"); + return false; + } + assert(jfr_jvmti_env != NULL, "invariant"); + if (register_capabilities(jt) != JVMTI_ERROR_NONE) { + return false; + } + if (register_callbacks(jt) != JVMTI_ERROR_NONE) { + return false; + } + if (update_class_file_load_hook_event(JVMTI_ENABLE) != JVMTI_ERROR_NONE) { + return false; + } + return true; +} + +bool JfrJvmtiAgent::create() { + assert(jfr_jvmti_env == NULL, "invariant"); + agent = new JfrJvmtiAgent(); + if (agent == NULL) { + return false; + } + if (!initialize()) { + delete agent; + agent = NULL; + return false; + } + return true; +} + +void JfrJvmtiAgent::destroy() { + if (agent != NULL) { + delete agent; + agent = NULL; + } +} + diff --git a/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.hpp b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8750a60371d1922c5712a90414ee94af46dcc4a9 --- /dev/null +++ b/src/share/vm/jfr/instrumentation/jfrJvmtiAgent.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP +#define SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP + +#include "jfr/utilities/jfrAllocation.hpp" + +class JfrJvmtiAgent : public JfrCHeapObj { + friend class JfrRecorder; + private: + JfrJvmtiAgent(); + ~JfrJvmtiAgent(); + static bool create(); + static void destroy(); + public: + static void retransform_classes(JNIEnv* env, jobjectArray classes, TRAPS); +}; + +#endif // SHARE_VM_JFR_INSTRUMENTATION_JFRJVMTIAGENT_HPP diff --git a/src/share/vm/jfr/jfr.cpp b/src/share/vm/jfr/jfr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..916f466d3e57ebd6a730947b0b9198d765aff1ff --- /dev/null +++ b/src/share/vm/jfr/jfr.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/jfr.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" +#include "jfr/recorder/repository/jfrEmergencyDump.hpp" + +bool Jfr::is_enabled() { + return JfrRecorder::is_enabled(); +} + +bool Jfr::is_disabled() { + return JfrRecorder::is_disabled(); +} + +bool Jfr::is_recording() { + return JfrRecorder::is_recording(); +} + +jint Jfr::on_vm_init() { + return JfrRecorder::on_vm_init() ? JNI_OK : JNI_ERR; +} + +jint Jfr::on_vm_start() { + return JfrRecorder::on_vm_start() ? JNI_OK : JNI_ERR; +} + +void Jfr::on_unloading_classes() { + if (JfrRecorder::is_created()) { + JfrCheckpointManager::write_constant_tag_set_for_unloaded_classes(); + } +} + +void Jfr::on_thread_exit(JavaThread* thread) { + if (JfrRecorder::is_recording()) { + JfrThreadData::on_exit(thread); + } +} + +void Jfr::on_thread_destruct(Thread* thread) { + if (JfrRecorder::is_created()) { + JfrThreadData::on_destruct(thread); + } +} + +void Jfr::on_vm_shutdown(bool exception_handler) { + if (JfrRecorder::is_recording()) { + JfrEmergencyDump::on_vm_shutdown(exception_handler); + } +} diff --git a/src/share/vm/jfr/jfr.hpp b/src/share/vm/jfr/jfr.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ccc32253f2e5bd01df649673088e463046a97ffa --- /dev/null +++ b/src/share/vm/jfr/jfr.hpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JFR_HPP +#define SHARE_VM_JFR_JFR_HPP + +#include "jni.h" +#include "memory/allocation.hpp" + +class JavaThread; +class Thread; + +// +// The VM interface to Flight Recorder. +// +class Jfr : AllStatic { + public: + static bool is_enabled(); + static bool is_disabled(); + static bool is_recording(); + static jint on_vm_init(); + static jint on_vm_start(); + static void on_unloading_classes(); + static void on_thread_exit(JavaThread* thread); + static void on_thread_destruct(Thread* thread); + static void on_vm_shutdown(bool exception_handler = false); +}; + +#endif // SHARE_VM_JFR_JFR_HPP diff --git a/src/share/vm/jfr/jni/jfrGetAllEventClasses.cpp b/src/share/vm/jfr/jni/jfrGetAllEventClasses.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d43074c1910723fcc867d69a43593f005eef3c30 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrGetAllEventClasses.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "jfr/jni/jfrGetAllEventClasses.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/recorder/access/jfrEventClass.hpp" +#include "oops/instanceKlass.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/stack.inline.hpp" + + // incremented during class unloading (safepoint) for each unloaded event class +static jlong unloaded_event_classes = 0; + +jlong JfrEventClasses::unloaded_event_classes_count() { + return unloaded_event_classes; +} + +void JfrEventClasses::increment_unloaded_event_class() { + // incremented during class unloading (safepoint) for each unloaded event class + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + ++unloaded_event_classes; +} + +static jobject empty_java_util_arraylist = NULL; + +static oop new_java_util_arraylist(TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, "java/util/ArrayList", "", "()V", CHECK_NULL); + JfrJavaSupport::new_object(&args, CHECK_NULL); + return (oop)result.get_jobject(); +} + +static bool initialize(TRAPS) { + static bool initialized = false; + if (!initialized) { + unloaded_event_classes = 0; + assert(NULL == empty_java_util_arraylist, "invariant"); + const oop array_list = new_java_util_arraylist(CHECK_false); + empty_java_util_arraylist = JfrJavaSupport::global_jni_handle(array_list, THREAD); + initialized = empty_java_util_arraylist != NULL; + } + return initialized; +} + +/* + * Abstract klasses are filtered out unconditionally. + * If a klass is not yet initialized, i.e yet to run its + * it is also filtered out so we don't accidentally + * trigger initialization. + */ +static bool is_whitelisted(const Klass* k) { + assert(k != NULL, "invariant"); + return !(k->is_abstract() || k->should_be_initialized()); +} + +static void fill_klasses(GrowableArray& event_subklasses, const Klass* event_klass, Thread* thread) { + assert(event_subklasses.length() == 0, "invariant"); + assert(event_klass != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); + + Stack mark_stack; + MutexLocker ml(Compile_lock, thread); + mark_stack.push(event_klass->subklass()); + + while (!mark_stack.is_empty()) { + const Klass* const current = mark_stack.pop(); + assert(current != NULL, "null element in stack!"); + + if (is_whitelisted(current)) { + event_subklasses.append(current); + } + + // subclass (depth) + const Klass* next_klass = current->subklass(); + if (next_klass != NULL) { + mark_stack.push(next_klass); + } + + // siblings (breadth) + next_klass = current->next_sibling(); + if (next_klass != NULL) { + mark_stack.push(next_klass); + } + } + assert(mark_stack.is_empty(), "invariant"); +} + + static void transform_klasses_to_local_jni_handles(GrowableArray& event_subklasses, Thread* thread) { + assert(event_subklasses.is_nonempty(), "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); + + for (int i = 0; i < event_subklasses.length(); ++i) { + const InstanceKlass* k = static_cast(event_subklasses.at(i)); + assert(is_whitelisted(k), "invariant"); + event_subklasses.at_put(i, JfrJavaSupport::local_jni_handle(k->java_mirror(), thread)); + } +} + +static const int initial_size_growable_array = 64; + +jobject JfrEventClasses::get_all_event_classes(TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + initialize(THREAD); + assert(empty_java_util_arraylist != NULL, "should have been setup already!"); + static const char jdk_jfr_event_name[] = "jdk/jfr/Event"; + unsigned int unused_hash = 0; + Symbol* const event_klass_name = SymbolTable::lookup_only(jdk_jfr_event_name, sizeof jdk_jfr_event_name - 1, unused_hash); + + if (NULL == event_klass_name) { + // not loaded yet + return empty_java_util_arraylist; + } + + const Klass* const klass = SystemDictionary::resolve_or_null(event_klass_name, THREAD); + assert(klass != NULL, "invariant"); + assert(JdkJfrEvent::is(klass), "invariant"); + + if (klass->subklass() == NULL) { + return empty_java_util_arraylist; + } + + ResourceMark rm(THREAD); + GrowableArray event_subklasses(THREAD, initial_size_growable_array); + fill_klasses(event_subklasses, klass, THREAD); + + if (event_subklasses.is_empty()) { + return empty_java_util_arraylist; + } + + transform_klasses_to_local_jni_handles(event_subklasses, THREAD); + + Handle h_array_list(THREAD, new_java_util_arraylist(THREAD)); + assert(h_array_list.not_null(), "invariant"); + + static const char add_method_name[] = "add"; + static const char add_method_signature[] = "(Ljava/lang/Object;)Z"; + const Klass* const array_list_klass = JfrJavaSupport::klass(empty_java_util_arraylist); + assert(array_list_klass != NULL, "invariant"); + + const Symbol* const add_method_sym = SymbolTable::lookup(add_method_name, sizeof add_method_name - 1, THREAD); + assert(add_method_sym != NULL, "invariant"); + + const Symbol* const add_method_sig_sym = SymbolTable::lookup(add_method_signature, sizeof add_method_signature - 1, THREAD); + assert(add_method_signature != NULL, "invariant"); + + JavaValue result(T_BOOLEAN); + for (int i = 0; i < event_subklasses.length(); ++i) { + const jclass clazz = (const jclass)event_subklasses.at(i); + assert(JdkJfrEvent::is_subklass(clazz), "invariant"); + JfrJavaArguments args(&result, array_list_klass, add_method_sym, add_method_sig_sym); + args.set_receiver(h_array_list()); + args.push_jobject(clazz); + JfrJavaSupport::call_virtual(&args, THREAD); + if (HAS_PENDING_EXCEPTION || JNI_FALSE == result.get_jboolean()) { + return empty_java_util_arraylist; + } + } + return JfrJavaSupport::local_jni_handle(h_array_list(), THREAD); +} diff --git a/src/share/vm/jfr/jni/jfrGetAllEventClasses.hpp b/src/share/vm/jfr/jni/jfrGetAllEventClasses.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a85c9a766d036bf41146b6bc5538f7f7b8e16f76 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrGetAllEventClasses.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP +#define SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP + +#include "jni.h" +#include "memory/allocation.hpp" +#include "utilities/exceptions.hpp" + +// +// Responsible for the delivery of currently loaded jdk.jfr.Event subklasses to Java. +// +class JfrEventClasses : AllStatic { + public: + static void increment_unloaded_event_class(); + static jlong unloaded_event_classes_count(); + static jobject get_all_event_classes(TRAPS); +}; + +#endif // SHARE_VM_JFR_JNI_JFRGETALLEVENTCLASSES_HPP diff --git a/src/share/vm/jfr/jni/jfrJavaCall.cpp b/src/share/vm/jfr/jni/jfrJavaCall.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39030837b2a9b380fc3b910455df2ad8ebe4a4c0 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJavaCall.cpp @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "jfr/jni/jfrJavaCall.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/javaCalls.hpp" +#include "utilities/globalDefinitions.hpp" + +#ifdef ASSERT +static bool is_large_value(const JavaValue& value) { + return value.get_type() == T_LONG || value.get_type() == T_DOUBLE; +} +#endif // ASSERT + +static Symbol* resolve(const char* str, TRAPS) { + assert(str != NULL, "invariant"); + return SymbolTable::lookup(str, (int)strlen(str), THREAD); +} + +static Klass* resolve(Symbol* k_sym, TRAPS) { + assert(k_sym != NULL, "invariant"); + return SystemDictionary::resolve_or_fail(k_sym, true, THREAD); +} + +JfrJavaArguments::Parameters::Parameters() : _storage_index(0), _java_stack_slots(0) { + JavaValue value(T_VOID); + push(value); +} + +void JfrJavaArguments::Parameters::push(const JavaValue& value) { + assert(_storage != NULL, "invariant"); + assert(!is_large_value(value), "invariant"); + assert(_storage_index < SIZE, "invariant"); + _storage[_storage_index++] = value; + _java_stack_slots++; +} + +void JfrJavaArguments::Parameters::push_large(const JavaValue& value) { + assert(_storage != NULL, "invariant"); + assert(is_large_value(value), "invariant"); + assert(_storage_index < SIZE, "invariant"); + _storage[_storage_index++] = value; + _java_stack_slots += 2; +} + +void JfrJavaArguments::Parameters::set_receiver(const oop receiver) { + assert(_storage != NULL, "invariant"); + assert(receiver != NULL, "invariant"); + JavaValue value(T_OBJECT); + value.set_jobject((jobject)receiver); + _storage[0] = value; +} + +void JfrJavaArguments::Parameters::set_receiver(Handle receiver) { + set_receiver(receiver()); +} + +oop JfrJavaArguments::Parameters::receiver() const { + assert(has_receiver(), "invariant"); + assert(_storage[0].get_type() == T_OBJECT, "invariant"); + return (oop)_storage[0].get_jobject(); +} + +bool JfrJavaArguments::Parameters::has_receiver() const { + assert(_storage != NULL, "invariant"); + assert(_storage_index >= 1, "invariant"); + assert(_java_stack_slots >= 1, "invariant"); + return _storage[0].get_type() == T_OBJECT; +} + +void JfrJavaArguments::Parameters::push_oop(const oop obj) { + JavaValue value(T_OBJECT); + value.set_jobject((jobject)obj); + push(value); +} + +void JfrJavaArguments::Parameters::push_oop(Handle h_obj) { + push_oop(h_obj()); +} + +void JfrJavaArguments::Parameters::push_jobject(jobject h) { + JavaValue value(T_ADDRESS); + value.set_jobject(h); + push(value); +} + +void JfrJavaArguments::Parameters::push_jint(jint i) { + JavaValue value(T_INT); + value.set_jint(i); + push(value); +} + +void JfrJavaArguments::Parameters::push_jfloat(jfloat f) { + JavaValue value(T_FLOAT); + value.set_jfloat(f); + push(value); +} + +void JfrJavaArguments::Parameters::push_jdouble(jdouble d) { + JavaValue value(T_DOUBLE); + value.set_jdouble(d); + push_large(value); +} + +void JfrJavaArguments::Parameters::push_jlong(jlong l) { + JavaValue value(T_LONG); + value.set_jlong(l); + push_large(value); +} + +// including receiver (even if there is none) +inline int JfrJavaArguments::Parameters::length() const { + assert(_storage_index >= 1, "invariant"); + return _storage_index; +} + +inline int JfrJavaArguments::Parameters::java_stack_slots() const { + return _java_stack_slots; +} + +const JavaValue& JfrJavaArguments::Parameters::values(int idx) const { + assert(idx >= 0, "invariant"); + assert(idx < SIZE, "invariant"); + return _storage[idx]; +} + +void JfrJavaArguments::Parameters::copy(JavaCallArguments& args, TRAPS) const { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + if (has_receiver()) { + args.set_receiver(Handle(THREAD, receiver())); + } + for (int i = 1; i < length(); ++i) { + switch(values(i).get_type()) { + case T_BOOLEAN: + case T_CHAR: + case T_SHORT: + case T_INT: + args.push_int(values(i).get_jint()); + break; + case T_LONG: + args.push_long(values(i).get_jlong()); + break; + case T_FLOAT: + args.push_float(values(i).get_jfloat()); + break; + case T_DOUBLE: + args.push_double(values(i).get_jdouble()); + break; + case T_OBJECT: + args.push_oop(Handle(THREAD, (oop)values(i).get_jobject())); + break; + case T_ADDRESS: + args.push_jobject(values(i).get_jobject()); + break; + default: + ShouldNotReachHere(); + } + } +} + +JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(0) { + assert(result != NULL, "invariant"); +} + +JfrJavaArguments::JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS) : + _result(result), + _klass(NULL), + _name(NULL), + _signature(NULL), + _array_length(0) { + assert(result != NULL, "invariant"); + if (klass_name != NULL) { + set_klass(klass_name, CHECK); + } + if (name != NULL) { + set_name(name, CHECK); + } + if (signature != NULL) { + set_signature(signature, THREAD); + } +} + +JfrJavaArguments::JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature) : _result(result), + _klass(NULL), + _name(NULL), + _signature(NULL), + _array_length(0) { + assert(result != NULL, "invariant"); + if (klass != NULL) { + set_klass(klass); + } + if (name != NULL) { + set_name(name); + } + if (signature != NULL) { + set_signature(signature); + } +} + +Klass* JfrJavaArguments::klass() const { + assert(_klass != NULL, "invariant"); + return const_cast(_klass); +} + +void JfrJavaArguments::set_klass(const char* klass_name, TRAPS) { + assert(klass_name != NULL, "invariant"); + Symbol* const k_sym = resolve(klass_name, CHECK); + assert(k_sym != NULL, "invariant"); + const Klass* const klass = resolve(k_sym, CHECK); + set_klass(klass); +} + +void JfrJavaArguments::set_klass(const Klass* klass) { + assert(klass != NULL, "invariant"); + _klass = klass; +} + +Symbol* JfrJavaArguments::name() const { + assert(_name != NULL, "invariant"); + return const_cast(_name); +} + +void JfrJavaArguments::set_name(const char* name, TRAPS) { + assert(name != NULL, "invariant"); + const Symbol* const sym = resolve(name, CHECK); + set_name(sym); +} + +void JfrJavaArguments::set_name(const Symbol* name) { + assert(name != NULL, "invariant"); + _name = name; +} + +Symbol* JfrJavaArguments::signature() const { + assert(_signature != NULL, "invariant"); + return const_cast(_signature); +} + +void JfrJavaArguments::set_signature(const char* signature, TRAPS) { + assert(signature != NULL, "invariant"); + const Symbol* const sym = resolve(signature, CHECK); + set_signature(sym); +} + +void JfrJavaArguments::set_signature(const Symbol* signature) { + assert(signature != NULL, "invariant"); + _signature = signature; +} + +int JfrJavaArguments::array_length() const { + return _array_length; +} + +void JfrJavaArguments::set_array_length(int length) { + assert(length >= 0, "invariant"); + _array_length = length; +} + +JavaValue* JfrJavaArguments::result() const { + assert(_result != NULL, "invariant"); + return const_cast(_result); +} + +int JfrJavaArguments::length() const { + return _params.length(); +} + +bool JfrJavaArguments::has_receiver() const { + return _params.has_receiver(); +} + +oop JfrJavaArguments::receiver() const { + return _params.receiver(); +} + +void JfrJavaArguments::set_receiver(const oop receiver) { + _params.set_receiver(receiver); +} + +void JfrJavaArguments::set_receiver(Handle receiver) { + _params.set_receiver(receiver); +} + +void JfrJavaArguments::push_oop(const oop obj) { + _params.push_oop(obj); +} + +void JfrJavaArguments::push_oop(Handle h_obj) { + _params.push_oop(h_obj); +} + +void JfrJavaArguments::push_jobject(jobject h) { + _params.push_jobject(h); +} + +void JfrJavaArguments::push_int(jint i) { + _params.push_jint(i); +} + +void JfrJavaArguments::push_float(jfloat f) { + _params.push_jfloat(f); +} + +void JfrJavaArguments::push_double(jdouble d) { + _params.push_jdouble(d); +} + +void JfrJavaArguments::push_long(jlong l) { + _params.push_jlong(l); +} + +const JavaValue& JfrJavaArguments::param(int idx) const { + return _params.values(idx); +} + +int JfrJavaArguments::java_call_arg_slots() const { + return _params.java_stack_slots(); +} + +void JfrJavaArguments::copy(JavaCallArguments& args, TRAPS) { + _params.copy(args, THREAD); +} + +void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JavaCallArguments jcas(args->java_call_arg_slots()); + args->copy(jcas, CHECK); + JavaCalls::call_static(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD); +} + +void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + assert(args->has_receiver(), "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JavaCallArguments jcas(args->java_call_arg_slots()); + args->copy(jcas, CHECK); + JavaCalls::call_special(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD); +} + +void JfrJavaCall::call_virtual(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + assert(args->has_receiver(), "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + ResourceMark rm(THREAD); + HandleMark hm(THREAD); + JavaCallArguments jcas(args->java_call_arg_slots()); + args->copy(jcas, CHECK); + JavaCalls::call_virtual(args->result(), args->klass(), args->name(), args->signature(), &jcas, THREAD); +} diff --git a/src/share/vm/jfr/jni/jfrJavaCall.hpp b/src/share/vm/jfr/jni/jfrJavaCall.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0c0a5207e9c390e612720c7f468e16e7d3e18393 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJavaCall.hpp @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JNI_JFRJAVACALL_HPP +#define SHARE_VM_JFR_JNI_JFRJAVACALL_HPP + +#include "jni.h" +#include "jfr/utilities/jfrAllocation.hpp" +#include "utilities/exceptions.hpp" + +class JavaCallArguments; +class JavaThread; +class JavaValue; +class Klass; +class Symbol; + +class JfrJavaArguments : public StackObj { + friend class JfrJavaCall; + public: + JfrJavaArguments(JavaValue* result); + JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS); + JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature); + + Klass* klass() const; + void set_klass(const char* klass_name, TRAPS); + void set_klass(const Klass* klass); + + Symbol* name() const; + void set_name(const char* name, TRAPS); + void set_name(const Symbol* name); + + Symbol* signature() const; + void set_signature(const char* signature, TRAPS); + void set_signature(const Symbol* signature); + + int array_length() const; + void set_array_length(int length); + + JavaValue* result() const; + + bool has_receiver() const; + void set_receiver(const oop receiver); + void set_receiver(Handle receiver); + oop receiver() const; + + // parameters + void push_oop(const oop obj); + void push_oop(Handle h_obj); + void push_jobject(jobject h); + void push_int(jint i); + void push_double(jdouble d); + void push_long(jlong l); + void push_float(jfloat f); + + int length() const; + const JavaValue& param(int idx) const; + + private: + class Parameters { + friend class JfrJavaArguments; + private: + enum { SIZE = 16}; + JavaValue _storage[SIZE]; + int _storage_index; + int _java_stack_slots; + + Parameters(); + Parameters(const Parameters&); // no impl + Parameters& operator=(const Parameters&); // no impl + + void push(const JavaValue& value); + void push_large(const JavaValue& value); + + void push_oop(const oop obj); + void push_oop(Handle h_obj); + void push_jobject(jobject h); + void push_jint(jint i); + void push_jdouble(jdouble d); + void push_jlong(jlong l); + void push_jfloat(jfloat f); + + bool has_receiver() const; + void set_receiver(const oop receiver); + void set_receiver(Handle receiver); + oop receiver() const; + + int length() const; + int java_stack_slots() const; + + void copy(JavaCallArguments& args, TRAPS) const; + const JavaValue& values(int idx) const; + }; + + Parameters _params; + const JavaValue* const _result; + const Klass* _klass; + const Symbol* _name; + const Symbol* _signature; + int _array_length; + + int java_call_arg_slots() const; + void copy(JavaCallArguments& args, TRAPS); +}; + +class JfrJavaCall : public AllStatic { + friend class JfrJavaSupport; + private: + static void call_static(JfrJavaArguments* args, TRAPS); + static void call_special(JfrJavaArguments* args, TRAPS); + static void call_virtual(JfrJavaArguments* args, TRAPS); +}; + +#endif // SHARE_VM_JFR_JNI_JFRJAVACALL_HPP diff --git a/src/share/vm/jfr/jni/jfrJavaSupport.cpp b/src/share/vm/jfr/jni/jfrJavaSupport.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9c2c64fea6360e49022d795de1a6a8650ea8695d --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJavaSupport.cpp @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jni.h" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmSymbols.hpp" +#include "jfr/jni/jfrJavaCall.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/resourceArea.hpp" +#include "oops/instanceOop.hpp" +#include "oops/oop.inline.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/fieldDescriptor.hpp" +#include "runtime/java.hpp" +#include "runtime/jniHandles.hpp" +#include "runtime/synchronizer.hpp" +#include "runtime/thread.inline.hpp" + +#ifdef ASSERT +void JfrJavaSupport::check_java_thread_in_vm(Thread* t) { + assert(t != NULL, "invariant"); + assert(t->is_Java_thread(), "invariant"); + assert(((JavaThread*)t)->thread_state() == _thread_in_vm, "invariant"); +} + +void JfrJavaSupport::check_java_thread_in_native(Thread* t) { + assert(t != NULL, "invariant"); + assert(t->is_Java_thread(), "invariant"); + assert(((JavaThread*)t)->thread_state() == _thread_in_native, "invariant"); +} +#endif + +/* + * Handles and references + */ +jobject JfrJavaSupport::local_jni_handle(const oop obj, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + return t->active_handles()->allocate_handle(obj); +} + +jobject JfrJavaSupport::local_jni_handle(const jobject handle, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + const oop obj = JNIHandles::resolve(handle); + return obj == NULL ? NULL : local_jni_handle(obj, t); +} + +void JfrJavaSupport::destroy_local_jni_handle(jobject handle) { + JNIHandles::destroy_local(handle); +} + +jobject JfrJavaSupport::global_jni_handle(const oop obj, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + HandleMark hm(t); + return JNIHandles::make_global(Handle(t, obj)); +} + +jobject JfrJavaSupport::global_jni_handle(const jobject handle, Thread* t) { + const oop obj = JNIHandles::resolve(handle); + return obj == NULL ? NULL : global_jni_handle(obj, t); +} + +void JfrJavaSupport::destroy_global_jni_handle(jobject handle) { + JNIHandles::destroy_global(handle); +} + +oop JfrJavaSupport::resolve_non_null(jobject obj) { + return JNIHandles::resolve_non_null(obj); +} + +/* + * Method invocation + */ +void JfrJavaSupport::call_static(JfrJavaArguments* args, TRAPS) { + JfrJavaCall::call_static(args, THREAD); +} + +void JfrJavaSupport::call_special(JfrJavaArguments* args, TRAPS) { + JfrJavaCall::call_special(args, THREAD); +} + +void JfrJavaSupport::call_virtual(JfrJavaArguments* args, TRAPS) { + JfrJavaCall::call_virtual(args, THREAD); +} + +void JfrJavaSupport::notify_all(jobject object, TRAPS) { + assert(object != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + HandleMark hm(THREAD); + Handle h_obj(THREAD, resolve_non_null(object)); + assert(h_obj.not_null(), "invariant"); + ObjectSynchronizer::jni_enter(h_obj, THREAD); + ObjectSynchronizer::notifyall(h_obj, THREAD); + ObjectSynchronizer::jni_exit(h_obj(), THREAD); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); +} + +/* + * Object construction + */ +static void object_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, TRAPS) { + assert(args != NULL, "invariant"); + assert(result != NULL, "invariant"); + assert(klass != NULL, "invariant"); + assert(klass->is_initialized(), "invariant"); + + HandleMark hm(THREAD); + instanceOop obj = klass->allocate_instance(CHECK); + instanceHandle h_obj(THREAD, obj); + assert(h_obj.not_null(), "invariant"); + args->set_receiver(h_obj); + result->set_type(T_VOID); // constructor result type + JfrJavaSupport::call_special(args, CHECK); + result->set_type(T_OBJECT); // set back to original result type + result->set_jobject((jobject)h_obj()); +} + +static void array_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, int array_length, TRAPS) { + assert(args != NULL, "invariant"); + assert(result != NULL, "invariant"); + assert(klass != NULL, "invariant"); + assert(klass->is_initialized(), "invariant"); + + Klass* const ak = klass->array_klass(THREAD); + ObjArrayKlass::cast(ak)->initialize(THREAD); + HandleMark hm(THREAD); + objArrayOop arr = ObjArrayKlass::cast(ak)->allocate(array_length, CHECK); + result->set_jobject((jobject)arr); +} + +static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) { + assert(args != NULL, "invariant"); + assert(result != NULL, "invariant"); + assert(result->get_type() == T_OBJECT, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + InstanceKlass* const klass = static_cast(args->klass()); + klass->initialize(CHECK); + + const int array_length = args->array_length(); + + if (array_length > 0) { + array_construction(args, result, klass, array_length, CHECK); + } else { + object_construction(args, result, klass, THREAD); + } +} + +static void handle_result(JavaValue* result, bool global_ref, Thread* t) { + assert(result != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t)); + const oop result_oop = (const oop)result->get_jobject(); + if (result_oop == NULL) { + return; + } + result->set_jobject(global_ref ? + JfrJavaSupport::global_jni_handle(result_oop, t) : + JfrJavaSupport::local_jni_handle(result_oop, t)); +} + +void JfrJavaSupport::new_object(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + create_object(args, args->result(), THREAD); +} + +void JfrJavaSupport::new_object_local_ref(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue* const result = args->result(); + assert(result != NULL, "invariant"); + create_object(args, result, CHECK); + handle_result(result, false, THREAD); +} + +void JfrJavaSupport::new_object_global_ref(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue* const result = args->result(); + assert(result != NULL, "invariant"); + create_object(args, result, CHECK); + handle_result(result, true, THREAD); +} + +jstring JfrJavaSupport::new_string(const char* c_str, TRAPS) { + assert(c_str != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + const oop result = java_lang_String::create_oop_from_str(c_str, THREAD); + return (jstring)local_jni_handle(result, THREAD); +} + +jobjectArray JfrJavaSupport::new_string_array(int length, TRAPS) { + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, "java/lang/String", "", "()V", CHECK_NULL); + args.set_array_length(length); + new_object_local_ref(&args, THREAD); + return (jobjectArray)args.result()->get_jobject(); +} + +jobject JfrJavaSupport::new_java_lang_Boolean(bool value, TRAPS) { + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, "java/lang/Boolean", "", "(Z)V", CHECK_NULL); + args.push_int(value ? (jint)JNI_TRUE : (jint)JNI_FALSE); + new_object_local_ref(&args, THREAD); + return args.result()->get_jobject(); +} + +jobject JfrJavaSupport::new_java_lang_Integer(jint value, TRAPS) { + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, "java/lang/Integer", "", "(I)V", CHECK_NULL); + args.push_int(value); + new_object_local_ref(&args, THREAD); + return args.result()->get_jobject(); +} + +jobject JfrJavaSupport::new_java_lang_Long(jlong value, TRAPS) { + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, "java/lang/Long", "", "(J)V", CHECK_NULL); + args.push_long(value); + new_object_local_ref(&args, THREAD); + return args.result()->get_jobject(); +} + +void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int index, Thread* t) { + assert(arr != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(t)); + HandleMark hm(t); + objArrayHandle a(t, (objArrayOop)resolve_non_null(arr)); + a->obj_at_put(index, resolve_non_null(element)); +} + +/* + * Field access + */ +static void write_int_field(const Handle& h_oop, fieldDescriptor* fd, jint value) { + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + h_oop->int_field_put(fd->offset(), value); +} + +static void write_float_field(const Handle& h_oop, fieldDescriptor* fd, jfloat value) { + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + h_oop->float_field_put(fd->offset(), value); +} + +static void write_double_field(const Handle& h_oop, fieldDescriptor* fd, jdouble value) { + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + h_oop->double_field_put(fd->offset(), value); +} + +static void write_long_field(const Handle& h_oop, fieldDescriptor* fd, jlong value) { + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + h_oop->long_field_put(fd->offset(), value); +} + +static void write_oop_field(const Handle& h_oop, fieldDescriptor* fd, const oop value) { + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + h_oop->obj_field_put(fd->offset(), value); +} + +static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop, fieldDescriptor* fd, bool static_field) { + assert(args != NULL, "invariant"); + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + assert(fd->offset() > 0, "invariant"); + assert(args->length() >= 1, "invariant"); + + // attempt must set a real value + assert(args->param(1).get_type() != T_VOID, "invariant"); + + switch(fd->field_type()) { + case T_BOOLEAN: + case T_CHAR: + case T_SHORT: + case T_INT: + write_int_field(h_oop, fd, args->param(1).get_jint()); + break; + case T_FLOAT: + write_float_field(h_oop, fd, args->param(1).get_jfloat()); + break; + case T_DOUBLE: + write_double_field(h_oop, fd, args->param(1).get_jdouble()); + break; + case T_LONG: + write_long_field(h_oop, fd, args->param(1).get_jlong()); + break; + case T_OBJECT: + write_oop_field(h_oop, fd, (oop)args->param(1).get_jobject()); + break; + case T_ADDRESS: + write_oop_field(h_oop, fd, JfrJavaSupport::resolve_non_null(args->param(1).get_jobject())); + break; + default: + ShouldNotReachHere(); + } +} + +static void read_specialized_field(JavaValue* result, const Handle& h_oop, fieldDescriptor* fd) { + assert(result != NULL, "invariant"); + assert(h_oop.not_null(), "invariant"); + assert(fd != NULL, "invariant"); + assert(fd->offset() > 0, "invariant"); + + switch(fd->field_type()) { + case T_BOOLEAN: + case T_CHAR: + case T_SHORT: + case T_INT: + result->set_jint(h_oop->int_field(fd->offset())); + break; + case T_FLOAT: + result->set_jfloat(h_oop->float_field(fd->offset())); + break; + case T_DOUBLE: + result->set_jdouble(h_oop->double_field(fd->offset())); + break; + case T_LONG: + result->set_jlong(h_oop->long_field(fd->offset())); + break; + case T_OBJECT: + result->set_jobject((jobject)h_oop->obj_field(fd->offset())); + break; + default: + ShouldNotReachHere(); + } +} + +static bool find_field(InstanceKlass* ik, + Symbol* name_symbol, + Symbol* signature_symbol, + fieldDescriptor* fd, + bool is_static = false, + bool allow_super = false) { + if (allow_super || is_static) { + return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL; + } + return ik->find_local_field(name_symbol, signature_symbol, fd); +} + +static void lookup_field(JfrJavaArguments* args, InstanceKlass* klass, fieldDescriptor* fd, bool static_field) { + assert(args != NULL, "invariant"); + assert(klass != NULL, "invariant"); + assert(klass->is_initialized(), "invariant"); + assert(fd != NULL, "invariant"); + find_field(klass, args->name(), args->signature(), fd, static_field, true); +} + +static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) { + assert(args != NULL, "invariant"); + assert(result != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + InstanceKlass* const klass = static_cast(args->klass()); + klass->initialize(CHECK); + const bool static_field = !args->has_receiver(); + fieldDescriptor fd; + lookup_field(args, klass, &fd, static_field); + assert(fd.offset() > 0, "invariant"); + + HandleMark hm(THREAD); + Handle h_oop(static_field ? Handle(THREAD, klass->java_mirror()) : Handle(THREAD, args->receiver())); + read_specialized_field(result, h_oop, &fd); +} + +static void write_field(JfrJavaArguments* args, JavaValue* result, TRAPS) { + assert(args != NULL, "invariant"); + assert(result != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + + InstanceKlass* const klass = static_cast(args->klass()); + klass->initialize(CHECK); + + const bool static_field = !args->has_receiver(); + fieldDescriptor fd; + lookup_field(args, klass, &fd, static_field); + assert(fd.offset() > 0, "invariant"); + + HandleMark hm(THREAD); + Handle h_oop(static_field ? Handle(THREAD, klass->java_mirror()) : Handle(THREAD, args->receiver())); + write_specialized_field(args, h_oop, &fd, static_field); +} + +void JfrJavaSupport::set_field(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + write_field(args, args->result(), THREAD); +} + +void JfrJavaSupport::get_field(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + read_field(args, args->result(), THREAD); +} + +void JfrJavaSupport::get_field_local_ref(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + + JavaValue* const result = args->result(); + assert(result != NULL, "invariant"); + assert(result->get_type() == T_OBJECT, "invariant"); + + read_field(args, result, CHECK); + const oop obj = (const oop)result->get_jobject(); + + if (obj != NULL) { + result->set_jobject(local_jni_handle(obj, THREAD)); + } +} + +void JfrJavaSupport::get_field_global_ref(JfrJavaArguments* args, TRAPS) { + assert(args != NULL, "invariant"); + DEBUG_ONLY(check_java_thread_in_vm(THREAD)); + + JavaValue* const result = args->result(); + assert(result != NULL, "invariant"); + assert(result->get_type() == T_OBJECT, "invariant"); + read_field(args, result, CHECK); + const oop obj = (const oop)result->get_jobject(); + if (obj != NULL) { + result->set_jobject(global_jni_handle(obj, THREAD)); + } +} + +/* + * Misc + */ +Klass* JfrJavaSupport::klass(const jobject handle) { + const oop obj = resolve_non_null(handle); + assert(obj != NULL, "invariant"); + return obj->klass(); +} + +// caller needs ResourceMark +const char* JfrJavaSupport::c_str(jstring string, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + if (string == NULL) { + return NULL; + } + const char* temp = NULL; + const oop java_string = resolve_non_null(string); + if (java_lang_String::value(java_string) != NULL) { + const size_t length = java_lang_String::utf8_length(java_string); + temp = NEW_RESOURCE_ARRAY_IN_THREAD(t, const char, (length + 1)); + if (temp == NULL) { + JfrJavaSupport::throw_out_of_memory_error("Unable to allocate thread local native memory", t); + return NULL; + } + assert(temp != NULL, "invariant"); + java_lang_String::as_utf8_string(java_string, const_cast(temp), (int) length + 1); + } + return temp; +} + +/* + * Exceptions and errors + */ +static void create_and_throw(Symbol* name, const char* message, TRAPS) { + assert(name != NULL, "invariant"); + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + assert(!HAS_PENDING_EXCEPTION, "invariant"); + THROW_MSG(name, message); +} + +void JfrJavaSupport::throw_illegal_state_exception(const char* message, TRAPS) { + create_and_throw(vmSymbols::java_lang_IllegalStateException(), message, THREAD); +} + +void JfrJavaSupport::throw_internal_error(const char* message, TRAPS) { + create_and_throw(vmSymbols::java_lang_InternalError(), message, THREAD); +} + +void JfrJavaSupport::throw_illegal_argument_exception(const char* message, TRAPS) { + create_and_throw(vmSymbols::java_lang_IllegalArgumentException(), message, THREAD); +} + +void JfrJavaSupport::throw_out_of_memory_error(const char* message, TRAPS) { + create_and_throw(vmSymbols::java_lang_OutOfMemoryError(), message, THREAD); +} + +void JfrJavaSupport::throw_class_format_error(const char* message, TRAPS) { + create_and_throw(vmSymbols::java_lang_ClassFormatError(), message, THREAD); +} + +void JfrJavaSupport::abort(jstring errorMsg, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + + ResourceMark rm(t); + const char* const error_msg = c_str(errorMsg, t); + if (error_msg != NULL) { + log_error(jfr, system)("%s",error_msg); + } + log_error(jfr, system)("%s", "An irrecoverable error in Jfr. Shutting down VM..."); + vm_abort(); +} + +JfrJavaSupport::CAUSE JfrJavaSupport::_cause = JfrJavaSupport::VM_ERROR; +void JfrJavaSupport::set_cause(jthrowable throwable, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + + HandleMark hm(t); + Handle ex(t, JNIHandles::resolve_external_guard(throwable)); + + if (ex.is_null()) { + return; + } + + if (ex->is_a(SystemDictionary::OutOfMemoryError_klass())) { + _cause = OUT_OF_MEMORY; + return; + } + if (ex->is_a(SystemDictionary::StackOverflowError_klass())) { + _cause = STACK_OVERFLOW; + return; + } + if (ex->is_a(SystemDictionary::Error_klass())) { + _cause = VM_ERROR; + return; + } + if (ex->is_a(SystemDictionary::RuntimeException_klass())) { + _cause = RUNTIME_EXCEPTION; + return; + } + if (ex->is_a(SystemDictionary::Exception_klass())) { + _cause = UNKNOWN; + return; + } +} + +void JfrJavaSupport::uncaught_exception(jthrowable throwable, Thread* t) { + DEBUG_ONLY(check_java_thread_in_vm(t)); + assert(throwable != NULL, "invariant"); + set_cause(throwable, t); +} + +JfrJavaSupport::CAUSE JfrJavaSupport::cause() { + return _cause; +} + +const char* const JDK_JFR_MODULE_NAME = "jdk.jfr"; +const char* const JDK_JFR_PACKAGE_NAME = "jdk/jfr"; + +jlong JfrJavaSupport::jfr_thread_id(jobject target_thread) { + oop java_thread = JNIHandles::resolve_non_null(target_thread); + JavaThread* native_thread = java_lang_Thread::thread(java_thread); + return native_thread != NULL ? THREAD_TRACE_ID(native_thread) : 0; +} diff --git a/src/share/vm/jfr/jni/jfrJavaSupport.hpp b/src/share/vm/jfr/jni/jfrJavaSupport.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6f7c12ea8b627740011887df8c9bac399f899729 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJavaSupport.hpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP +#define SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP + +#include "jfr/jni/jfrJavaCall.hpp" +#include "utilities/exceptions.hpp" + +class Klass; +class JavaThread; +class outputStream; + +class JfrJavaSupport : public AllStatic { + public: + static jobject local_jni_handle(const oop obj, Thread* t); + static jobject local_jni_handle(const jobject handle, Thread* t); + static void destroy_local_jni_handle(const jobject handle); + + static jobject global_jni_handle(const oop obj, Thread* t); + static jobject global_jni_handle(const jobject handle, Thread* t); + static void destroy_global_jni_handle(const jobject handle); + + static oop resolve_non_null(jobject obj); + static void notify_all(jobject obj, TRAPS); + static void set_array_element(jobjectArray arr, jobject element, int index, Thread* t); + + // naked oop result + static void call_static(JfrJavaArguments* args, TRAPS); + static void call_special(JfrJavaArguments* args, TRAPS); + static void call_virtual(JfrJavaArguments* args, TRAPS); + + static void set_field(JfrJavaArguments* args, TRAPS); + static void get_field(JfrJavaArguments* args, TRAPS); + static void new_object(JfrJavaArguments* args, TRAPS); + + // global jni handle result + static void new_object_global_ref(JfrJavaArguments* args, TRAPS); + static void get_field_global_ref(JfrJavaArguments* args, TRAPS); + + // local jni handle result + static void new_object_local_ref(JfrJavaArguments* args, TRAPS); + static void get_field_local_ref(JfrJavaArguments* args, TRAPS); + + static jstring new_string(const char* c_str, TRAPS); + static jobjectArray new_string_array(int length, TRAPS); + + static jobject new_java_lang_Boolean(bool value, TRAPS); + static jobject new_java_lang_Integer(jint value, TRAPS); + static jobject new_java_lang_Long(jlong value, TRAPS); + + // misc + static Klass* klass(const jobject handle); + // caller needs ResourceMark + static const char* c_str(jstring string, Thread* jt); + + // exceptions + static void throw_illegal_state_exception(const char* message, TRAPS); + static void throw_illegal_argument_exception(const char* message, TRAPS); + static void throw_internal_error(const char* message, TRAPS); + static void throw_out_of_memory_error(const char* message, TRAPS); + static void throw_class_format_error(const char* message, TRAPS); + + static jlong jfr_thread_id(jobject target_thread); + + // critical + static void abort(jstring errorMsg, TRAPS); + static void uncaught_exception(jthrowable throwable, Thread* t); + + // asserts + DEBUG_ONLY(static void check_java_thread_in_vm(Thread* t);) + DEBUG_ONLY(static void check_java_thread_in_native(Thread* t);) + + enum CAUSE { + VM_ERROR, + OUT_OF_MEMORY, + STACK_OVERFLOW, + RUNTIME_EXCEPTION, + UNKNOWN, + NOF_CAUSES + }; + + static CAUSE cause(); + + private: + static CAUSE _cause; + static void set_cause(jthrowable throwable, Thread* t); +}; + +#endif // SHARE_VM_JFR_JNI_JFRJAVASUPPORT_HPP diff --git a/src/share/vm/jfr/jni/jfrJniMethod.cpp b/src/share/vm/jfr/jni/jfrJniMethod.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fd30a0dbb558362013e9dbbfa86ceba76dc41f12 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJniMethod.cpp @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jni.h" +#include "jvm.h" +#include "jfr/jfr.hpp" +#include "jfr/periodic/sampling/jfrThreadSampler.hpp" +#include "jfr/recorder/jfrEventSetting.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/repository/jfrRepository.hpp" +#include "jfr/recorder/repository/jfrChunkSizeNotifier.hpp" +#include "jfr/recorder/repository/jfrChunkWriter.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/recorder/stringpool/jfrStringPool.hpp" +#include "jfr/jni/jfrGetAllEventClasses.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/jni/jfrJniMethodRegistration.hpp" +#include "jfr/instrumentation/jfrEventClassTransformer.hpp" +#include "jfr/instrumentation/jfrJvmtiAgent.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/utilities/jfrJavaLog.hpp" +#include "jfr/utilities/jfrTimeConverter.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "jfr/writers/jfrJavaEventWriter.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.hpp" +#include "trace/traceBackend.hpp" +#include "trace/traceMacros.hpp" +#include "tracefiles/traceEventClasses.hpp" +#include "tracefiles/tracePeriodic.hpp" +#include "utilities/debug.hpp" + +#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header { +#define NO_TRANSITION_END } } + +/* + * NO_TRANSITION entries + * + * Thread remains _thread_in_native + */ + +NO_TRANSITION(void, trace_register_natives(JNIEnv* env, jclass jvmclass)) + JfrJniMethodRegistration register_native_methods(env); +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_is_enabled()) + return Jfr::is_enabled() ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_is_disabled()) + return Jfr::is_disabled() ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_is_started()) + return JfrRecorder::is_created() ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jstring, jfr_get_pid(JNIEnv* env, jobject jvm)) + char pid_buf[32] = { 0 }; + jio_snprintf(pid_buf, sizeof(pid_buf), "%d", os::current_process_id()); + jstring pid_string = env->NewStringUTF(pid_buf); + return pid_string; // exception pending if NULL +NO_TRANSITION_END + +NO_TRANSITION(jlong, jfr_elapsed_frequency(JNIEnv* env, jobject jvm)) + return JfrTraceTime::frequency(); +NO_TRANSITION_END + +NO_TRANSITION(jlong, jfr_elapsed_counter(JNIEnv* env, jobject jvm)) + return JfrTraceTime::now(); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_retransform_classes(JNIEnv* env, jobject jvm, jobjectArray classes)) + JfrJvmtiAgent::retransform_classes(env, classes, JavaThread::thread_from_jni_environment(env)); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled)) + JfrEventSetting::set_enabled(event_type_id, JNI_TRUE == enabled); + if (EventOldObjectSample::eventId == event_type_id) { + ThreadInVMfromNative transition(JavaThread::thread_from_jni_environment(env)); + if (JNI_TRUE == enabled) { + LeakProfiler::start(JfrOptionSet::old_object_queue_size()); + } else { + LeakProfiler::stop(); + } + } +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong threshold)) + JfrChunkSizeNotifier::set_chunk_size_threshold((size_t)threshold); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_sample_threads(JNIEnv* env, jobject jvm, jboolean sampleThreads)) + JfrOptionSet::set_sample_threads(sampleThreads); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_stack_depth(JNIEnv* env, jobject jvm, jint depth)) + JfrOptionSet::set_stackdepth((jlong)depth); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_stacktrace_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled)) + JfrEventSetting::set_stacktrace(event_type_id, JNI_TRUE == enabled); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_global_buffer_count(JNIEnv* env, jobject jvm, jlong count)) + JfrOptionSet::set_num_global_buffers(count); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_global_buffer_size(JNIEnv* env, jobject jvm, jlong size)) +JfrOptionSet::set_global_buffer_size(size); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_thread_buffer_size(JNIEnv* env, jobject jvm, jlong size)) + JfrOptionSet::set_thread_buffer_size(size); +NO_TRANSITION_END + +NO_TRANSITION(void, jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size)) + JfrOptionSet::set_memory_size(size); +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks)) + return JfrEventSetting::set_threshold(event_type_id, thresholdTicks) ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_allow_event_retransforms(JNIEnv* env, jobject jvm)) + return JfrOptionSet::allow_event_retransforms() ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_is_available(JNIEnv* env, jclass jvm)) + return !Jfr::is_disabled() ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + +NO_TRANSITION(jlong, jfr_get_epoch_address(JNIEnv* env, jobject jvm)) + return JfrTraceIdEpoch::epoch_address(); +NO_TRANSITION_END + +NO_TRANSITION(jlong, jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm)) + return JfrEventClasses::unloaded_event_classes_count(); +NO_TRANSITION_END + +NO_TRANSITION(jdouble, jfr_time_conv_factor(JNIEnv* env, jobject jvm)) + return (jdouble)JfrTimeConverter::nano_to_counter_multiplier(); +NO_TRANSITION_END + +NO_TRANSITION(jboolean, jfr_set_cutoff(JNIEnv* env, jobject jvm, jlong event_type_id, jlong cutoff_ticks)) + return JfrEventSetting::set_cutoff(event_type_id, cutoff_ticks) ? JNI_TRUE : JNI_FALSE; +NO_TRANSITION_END + + +/* + * JVM_ENTRY_NO_ENV entries + * + * Transitions: + * Entry: _thread_in_native -> _thread_in_vm + * Exit: _thread_in_vm -> _thread_in_native + * + * Current JavaThread available as "thread" variable + */ + +JVM_ENTRY_NO_ENV(jboolean, jfr_create_jfr(JNIEnv* env, jobject jvm, jboolean simulate_failure)) + if (JfrRecorder::is_created()) { + return JNI_TRUE; + } + if (!JfrRecorder::create(simulate_failure == JNI_TRUE)) { + JfrJavaSupport::throw_illegal_state_exception("Unable to start Jfr", thread); + return JNI_FALSE; + } + return JNI_TRUE; +JVM_END + +JVM_ENTRY_NO_ENV(jboolean, jfr_destroy_jfr(JNIEnv* env, jobject jvm)) + JfrRecorder::destroy(); + return JNI_TRUE; +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_begin_recording(JNIEnv* env, jobject jvm)) + if (JfrRecorder::is_recording()) { + return; + } + JfrRecorder::start_recording(); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_end_recording(JNIEnv* env, jobject jvm)) + if (!JfrRecorder::is_recording()) { + return; + } + JfrRecorder::stop_recording(); +JVM_END + + +JVM_ENTRY_NO_ENV(jboolean, jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when)) + JfrPeriodicEventSet::requestEvent((TraceEventId)eventTypeId); + return thread->has_pending_exception() ? JNI_FALSE : JNI_TRUE; +JVM_END + +JVM_ENTRY_NO_ENV(jobject, jfr_get_all_event_classes(JNIEnv* env, jobject jvm)) + return JfrEventClasses::get_all_event_classes(thread); +JVM_END + +JVM_ENTRY_NO_ENV(jlong, jfr_class_id(JNIEnv* env, jclass jvm, jclass jc)) + return JfrTraceId::use(jc); +JVM_END + +JVM_ENTRY_NO_ENV(jlong, jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip)) + return JfrStackTraceRepository::record(thread, skip); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_log(JNIEnv* env, jobject jvm, jint tag_set, jint level, jstring message)) + JfrJavaLog::log(tag_set, level, message, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_subscribe_log_level(JNIEnv* env, jobject jvm, jobject log_tag, jint id)) + JfrJavaLog::subscribe_log_level(log_tag, id, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_set_output(JNIEnv* env, jobject jvm, jstring path)) + JfrRepository::set_chunk_path(path, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_set_method_sampling_interval(JNIEnv* env, jobject jvm, jlong type, jlong intervalMillis)) + if (intervalMillis < 0) { + intervalMillis = 0; + } + TraceEventId typed_event_id = (TraceEventId)type; + assert(EventExecutionSample::eventId == typed_event_id || EventNativeMethodSample::eventId == typed_event_id, "invariant"); + if (intervalMillis > 0) { + JfrEventSetting::set_enabled(typed_event_id, true); // ensure sampling event is enabled + } + if (EventExecutionSample::eventId == type) { + JfrThreadSampling::set_java_sample_interval(intervalMillis); + } else { + JfrThreadSampling::set_native_sample_interval(intervalMillis); + } +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_store_metadata_descriptor(JNIEnv* env, jobject jvm, jbyteArray descriptor)) + JfrMetadataEvent::update(descriptor); +JVM_END + +// trace thread id for a thread object +JVM_ENTRY_NO_ENV(jlong, jfr_id_for_thread(JNIEnv* env, jobject jvm, jobject t)) + return JfrJavaSupport::jfr_thread_id(t); +JVM_END + +JVM_ENTRY_NO_ENV(jobject, jfr_get_event_writer(JNIEnv* env, jclass cls)) + return JfrJavaEventWriter::event_writer(thread); +JVM_END + +JVM_ENTRY_NO_ENV(jobject, jfr_new_event_writer(JNIEnv* env, jclass cls)) + return JfrJavaEventWriter::new_event_writer(thread); +JVM_END + +JVM_ENTRY_NO_ENV(jboolean, jfr_event_writer_flush(JNIEnv* env, jclass cls, jobject writer, jint used_size, jint requested_size)) + return JfrJavaEventWriter::flush(writer, used_size, requested_size, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_set_repository_location(JNIEnv* env, jobject repo, jstring location)) + return JfrRepository::set_path(location, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_uncaught_exception(JNIEnv* env, jobject jvm, jobject t, jthrowable throwable)) + JfrJavaSupport::uncaught_exception(throwable, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg)) + JfrJavaSupport::abort(errorMsg, thread); +JVM_END + +JVM_ENTRY_NO_ENV(jlong, jfr_type_id(JNIEnv* env, jobject jvm, jclass jc)) + return JfrTraceId::get(jc); +JVM_END + +JVM_ENTRY_NO_ENV(jboolean, jfr_add_string_constant(JNIEnv* env, jclass jvm, jboolean epoch, jlong id, jstring string)) + return JfrStringPool::add(epoch == JNI_TRUE, id, string, thread); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_set_force_instrumentation(JNIEnv* env, jobject jvm, jboolean force_instrumentation)) + JfrEventClassTransformer::set_force_instrumentation(force_instrumentation == JNI_TRUE ? true : false); +JVM_END + +JVM_ENTRY_NO_ENV(void, jfr_emit_old_object_samples(JNIEnv* env, jobject jvm, jlong cutoff_ticks, jboolean emit_all)) + LeakProfiler::emit_events(cutoff_ticks, emit_all == JNI_TRUE); +JVM_END diff --git a/src/share/vm/jfr/jni/jfrJniMethod.hpp b/src/share/vm/jfr/jni/jfrJniMethod.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cc18997e698763ed7d0da923bf4183aa44f1dda7 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJniMethod.hpp @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP +#define SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP + +#include "jni.h" + +/* + * Native methods for jdk.jfr.internal.JVM + */ + +#ifdef __cplusplus +extern "C" { +#endif + +jboolean JNICALL jfr_is_enabled(); + +jboolean JNICALL jfr_is_disabled(); + +jboolean JNICALL jfr_is_started(); + +jlong JNICALL jfr_elapsed_counter(JNIEnv* env, jobject jvm); + +jboolean JNICALL jfr_create_jfr(JNIEnv* env, jobject jvm, jboolean simulate_failure); + +jboolean JNICALL jfr_destroy_jfr(JNIEnv* env, jobject jvm); + +void JNICALL jfr_begin_recording(JNIEnv* env, jobject jvm); + +void JNICALL jfr_end_recording(JNIEnv* env, jobject jvm); + +jboolean JNICALL jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when); + +jobject JNICALL jfr_get_all_event_classes(JNIEnv* env, jobject jvm); + +jlong JNICALL jfr_class_id(JNIEnv* env, jclass jvm, jclass jc); + +jstring JNICALL jfr_get_pid(JNIEnv* env, jobject jvm); + +jlong JNICALL jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip); + +jlong JNICALL jfr_elapsed_frequency(JNIEnv* env, jobject jvm); + +void JNICALL jfr_subscribe_log_level(JNIEnv* env, jobject jvm, jobject log_tag, jint id); + +void JNICALL jfr_log(JNIEnv* env, jobject jvm, jint tag_set, jint level, jstring message); + +void JNICALL jfr_retransform_classes(JNIEnv* env, jobject jvm, jobjectArray classes); + +void JNICALL jfr_set_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled); + +void JNICALL jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong delta); + +void JNICALL jfr_set_global_buffer_count(JNIEnv* env, jobject jvm, jlong count); + +void JNICALL jfr_set_global_buffer_size(JNIEnv* env, jobject jvm, jlong size); + +void JNICALL jfr_set_method_sampling_interval(JNIEnv* env, jobject jvm, jlong type, jlong intervalMillis); + +void JNICALL jfr_set_output(JNIEnv* env, jobject jvm, jstring path); + +void JNICALL jfr_set_sample_threads(JNIEnv* env, jobject jvm, jboolean sampleThreads); + +void JNICALL jfr_set_stack_depth(JNIEnv* env, jobject jvm, jint depth); + +void JNICALL jfr_set_stacktrace_enabled(JNIEnv* env, jobject jvm, jlong event_type_id, jboolean enabled); + +void JNICALL jfr_set_thread_buffer_size(JNIEnv* env, jobject jvm, jlong size); + +void JNICALL jfr_set_memory_size(JNIEnv* env, jobject jvm, jlong size); + +jboolean JNICALL jfr_set_threshold(JNIEnv* env, jobject jvm, jlong event_type_id, jlong thresholdTicks); + +void JNICALL jfr_store_metadata_descriptor(JNIEnv* env, jobject jvm, jbyteArray descriptor); + +jlong JNICALL jfr_id_for_thread(JNIEnv* env, jobject jvm, jobject t); + +jboolean JNICALL jfr_allow_event_retransforms(JNIEnv* env, jobject jvm); + +jboolean JNICALL jfr_is_available(JNIEnv* env, jclass jvm); + +jdouble JNICALL jfr_time_conv_factor(JNIEnv* env, jobject jvm); + +jlong JNICALL jfr_type_id(JNIEnv* env, jobject jvm, jclass jc); + +void JNICALL jfr_set_repository_location(JNIEnv* env, jobject repo, jstring location); + +jobject JNICALL jfr_get_event_writer(JNIEnv* env, jclass cls); + +jobject JNICALL jfr_new_event_writer(JNIEnv* env, jclass cls); + +jboolean JNICALL jfr_event_writer_flush(JNIEnv* env, jclass cls, jobject writer, jint used_size, jint requested_size); + +void JNICALL jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg); + +jlong JNICALL jfr_get_epoch_address(JNIEnv* env, jobject jvm); + +jlong JNICALL jfr_add_string_constant(JNIEnv* env, jclass jvm, jlong gen, jlong id, jstring string); + +void JNICALL jfr_uncaught_exception(JNIEnv* env, jobject jvm, jobject thread, jthrowable throwable); + +void JNICALL jfr_set_force_instrumentation(JNIEnv* env, jobject jvm, jboolean force); + +jlong JNICALL jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm); + +jboolean JNICALL jfr_set_cutoff(JNIEnv* env, jobject jvm, jlong event_type_id, jlong cutoff_ticks); + +void JNICALL jfr_emit_old_object_samples(JNIEnv* env, jobject jvm, jlong cutoff_ticks, jboolean); + +#ifdef __cplusplus +} +#endif + +#endif // SHARE_VM_JFR_JNI_JFRJNIMETHOD_HPP diff --git a/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp b/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2770912e1a1358ccf07c7910e419f48b8012cf26 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJniMethodRegistration.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/jni/jfrJniMethod.hpp" +#include "jfr/jni/jfrJniMethodRegistration.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/thread.hpp" +#include "utilities/exceptions.hpp" + +JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) { + assert(env != NULL, "invariant"); + jclass jfr_clz = env->FindClass("jdk/jfr/internal/JVM"); + if (jfr_clz != NULL) { + JNINativeMethod method[] = { + (char*)"beginRecording", (char*)"()V", (void*)jfr_begin_recording, + (char*)"endRecording", (char*)"()V", (void*)jfr_end_recording, + (char*)"counterTime", (char*)"()J", (void*)jfr_elapsed_counter, + (char*)"createJFR", (char*)"(Z)Z", (void*)jfr_create_jfr, + (char*)"destroyJFR", (char*)"()Z", (void*)jfr_destroy_jfr, + (char*)"emitEvent", (char*)"(JJJ)Z", (void*)jfr_emit_event, + (char*)"getAllEventClasses", (char*)"()Ljava/util/List;", (void*)jfr_get_all_event_classes, + (char*)"getClassIdNonIntrinsic", (char*)"(Ljava/lang/Class;)J", (void*)jfr_class_id, + (char*)"getPid", (char*)"()Ljava/lang/String;", (void*)jfr_get_pid, + (char*)"getStackTraceId", (char*)"(I)J", (void*)jfr_stacktrace_id, + (char*)"getThreadId", (char*)"(Ljava/lang/Thread;)J", (void*)jfr_id_for_thread, + (char*)"getTicksFrequency", (char*)"()J", (void*)jfr_elapsed_frequency, + (char*)"subscribeLogLevel", (char*)"(Ljdk/jfr/internal/LogTag;I)V", (void*)jfr_subscribe_log_level, + (char*)"log", (char*)"(IILjava/lang/String;)V", (void*)jfr_log, + (char*)"retransformClasses", (char*)"([Ljava/lang/Class;)V", (void*)jfr_retransform_classes, + (char*)"setEnabled", (char*)"(JZ)V", (void*)jfr_set_enabled, + (char*)"setFileNotification", (char*)"(J)V", (void*)jfr_set_file_notification, + (char*)"setGlobalBufferCount", (char*)"(J)V", (void*)jfr_set_global_buffer_count, + (char*)"setGlobalBufferSize", (char*)"(J)V", (void*)jfr_set_global_buffer_size, + (char*)"setMethodSamplingInterval", (char*)"(JJ)V", (void*)jfr_set_method_sampling_interval, + (char*)"setOutput", (char*)"(Ljava/lang/String;)V", (void*)jfr_set_output, + (char*)"setSampleThreads", (char*)"(Z)V", (void*)jfr_set_sample_threads, + (char*)"setStackDepth", (char*)"(I)V", (void*)jfr_set_stack_depth, + (char*)"setStackTraceEnabled", (char*)"(JZ)V", (void*)jfr_set_stacktrace_enabled, + (char*)"setThreadBufferSize", (char*)"(J)V", (void*)jfr_set_thread_buffer_size, + (char*)"setMemorySize", (char*)"(J)V", (void*)jfr_set_memory_size, + (char*)"setThreshold", (char*)"(JJ)Z", (void*)jfr_set_threshold, + (char*)"storeMetadataDescriptor", (char*)"([B)V", (void*)jfr_store_metadata_descriptor, + (char*)"getAllowedToDoEventRetransforms", (char*)"()Z", (void*)jfr_allow_event_retransforms, + (char*)"isAvailable", (char*)"()Z", (void*)jfr_is_available, + (char*)"getTimeConversionFactor", (char*)"()D", (void*)jfr_time_conv_factor, + (char*)"getTypeId", (char*)"(Ljava/lang/Class;)J", (void*)jfr_type_id, + (char*)"getEventWriter", (char*)"()Ljava/lang/Object;", (void*)jfr_get_event_writer, + (char*)"newEventWriter", (char*)"()Ljdk/jfr/internal/EventWriter;", (void*)jfr_new_event_writer, + (char*)"flush", (char*)"(Ljdk/jfr/internal/EventWriter;II)Z", (void*)jfr_event_writer_flush, + (char*)"setRepositoryLocation", (char*)"(Ljava/lang/String;)V", (void*)jfr_set_repository_location, + (char*)"abort", (char*)"(Ljava/lang/String;)V", (void*)jfr_abort, + (char*)"getEpochAddress", (char*)"()J",(void*)jfr_get_epoch_address, + (char*)"addStringConstant", (char*)"(ZJLjava/lang/String;)Z", (void*)jfr_add_string_constant, + (char*)"uncaughtException", (char*)"(Ljava/lang/Thread;Ljava/lang/Throwable;)V", (void*)jfr_uncaught_exception, + (char*)"setForceInstrumentation", (char*)"(Z)V", (void*)jfr_set_force_instrumentation, + (char*)"getUnloadedEventClassCount", (char*)"()J", (void*)jfr_get_unloaded_event_classes_count, + (char*)"setCutoff", (char*)"(JJ)Z", (void*)jfr_set_cutoff, + (char*)"emitOldObjectSamples", (char*)"(JZ)V", (void*)jfr_emit_old_object_samples + }; + + const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod); + if (env->RegisterNatives(jfr_clz, method, (jint)method_array_length) != JNI_OK) { + JavaThread* jt = JavaThread::thread_from_jni_environment(env); + assert(jt != NULL, "invariant"); + assert(jt->thread_state() == _thread_in_native, "invariant"); + ThreadInVMfromNative transition(jt); + log_error(jfr, system)("RegisterNatives for JVM class failed!"); + } + env->DeleteLocalRef(jfr_clz); + } +} diff --git a/src/share/vm/jfr/jni/jfrJniMethodRegistration.hpp b/src/share/vm/jfr/jni/jfrJniMethodRegistration.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5565c838233aaf18aa7ad1fd39441ec51c45fe1b --- /dev/null +++ b/src/share/vm/jfr/jni/jfrJniMethodRegistration.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP +#define SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP + +#include "jni.h" +#include "memory/allocation.hpp" + +// +// RegisterNatives for jdk.jfr.internal.JVM +// +class JfrJniMethodRegistration : public StackObj { + public: + JfrJniMethodRegistration(JNIEnv* env); +}; + +#endif // SHARE_VM_JFR_JNI_JFRJNIMETHODREGISTRATION_HPP diff --git a/src/share/vm/jfr/jni/jfrUpcalls.cpp b/src/share/vm/jfr/jni/jfrUpcalls.cpp new file mode 100644 index 0000000000000000000000000000000000000000..955b385bcaa0018f6455702a5afb51007a76bcdd --- /dev/null +++ b/src/share/vm/jfr/jni/jfrUpcalls.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/jni/jfrUpcalls.hpp" +#include "jfr/recorder/access/jfrEventClass.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/oopFactory.hpp" +#include "oops/oop.inline.hpp" +#include "oops/typeArrayKlass.hpp" +#include "oops/typeArrayOop.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/exceptions.hpp" + +static Symbol* jvm_upcalls_class_sym = NULL; +static Symbol* on_retransform_method_sym = NULL; +static Symbol* on_retransform_signature_sym = NULL; +static Symbol* bytes_for_eager_instrumentation_sym = NULL; +static Symbol* bytes_for_eager_instrumentation_sig_sym = NULL; + +static bool initialize(TRAPS) { + static bool initialized = false; + if (!initialized) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + jvm_upcalls_class_sym = SymbolTable::new_permanent_symbol("jdk/jfr/internal/JVMUpcalls", CHECK_false); + on_retransform_method_sym = SymbolTable::new_permanent_symbol("onRetransform", CHECK_false); + on_retransform_signature_sym = SymbolTable::new_permanent_symbol("(JZLjava/lang/Class;[B)[B", CHECK_false); + bytes_for_eager_instrumentation_sym = SymbolTable::new_permanent_symbol("bytesForEagerInstrumentation", CHECK_false); + bytes_for_eager_instrumentation_sig_sym = SymbolTable::new_permanent_symbol("(JZLjava/lang/Class;[B)[B", THREAD); + initialized = bytes_for_eager_instrumentation_sig_sym != NULL; + } + return initialized; +} + +static const typeArrayOop invoke(jlong trace_id, + jboolean force_instrumentation, + jclass class_being_redefined, + jint class_data_len, + const unsigned char* class_data, + Symbol* method_sym, + Symbol* signature_sym, + jint& new_bytes_length, + TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL); + assert(klass != NULL, "invariant"); + typeArrayOop old_byte_array = oopFactory::new_byteArray(class_data_len, CHECK_NULL); + memcpy(old_byte_array->byte_at_addr(0), class_data, class_data_len); + JavaValue result(T_OBJECT); + JfrJavaArguments args(&result, klass, method_sym, signature_sym); + args.push_long(trace_id); + args.push_int(force_instrumentation); + args.push_jobject(class_being_redefined); + args.push_oop(old_byte_array); + JfrJavaSupport::call_static(&args, THREAD); + if (HAS_PENDING_EXCEPTION) { + log_error(jfr, system)("JfrUpcall failed"); + return NULL; + } + // The result should be a [B + const oop res = (oop)result.get_jobject(); + assert(res != NULL, "invariant"); + assert(res->is_typeArray(), "invariant"); + assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "invariant"); + const typeArrayOop new_byte_array = typeArrayOop(res); + new_bytes_length = (jint)new_byte_array->length(); + return new_byte_array; +} + +static const size_t ERROR_MSG_BUFFER_SIZE = 256; +static void log_error_and_throw_oom(jint new_bytes_length, TRAPS) { + char error_buffer[ERROR_MSG_BUFFER_SIZE]; + jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE, + "Thread local allocation (native) for " SIZE_FORMAT " bytes failed in JfrUpcalls", (size_t)new_bytes_length); + log_error(jfr, system)("%s", error_buffer); + JfrJavaSupport::throw_out_of_memory_error(error_buffer, CHECK); +} + +void JfrUpcalls::on_retransform(jlong trace_id, + jclass class_being_redefined, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data, + TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + assert(class_being_redefined != NULL, "invariant"); + assert(class_data != NULL, "invariant"); + assert(new_class_data_len != NULL, "invariant"); + assert(new_class_data != NULL, "invariant"); + if (!JdkJfrEvent::is_visible(class_being_redefined)) { + return; + } + jint new_bytes_length = 0; + initialize(THREAD); + const typeArrayOop new_byte_array = invoke(trace_id, + false, + class_being_redefined, + class_data_len, + class_data, + on_retransform_method_sym, + on_retransform_signature_sym, + new_bytes_length, + CHECK); + assert(new_byte_array != NULL, "invariant"); + assert(new_bytes_length > 0, "invariant"); + // memory space must be malloced as mtInternal + // as it will be deallocated by JVMTI routines + unsigned char* const new_bytes = (unsigned char* const)os::malloc(new_bytes_length, mtInternal); + if (new_bytes == NULL) { + log_error_and_throw_oom(new_bytes_length, THREAD); // unwinds + } + assert(new_bytes != NULL, "invariant"); + memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length); + *new_class_data_len = new_bytes_length; + *new_class_data = new_bytes; +} + +void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id, + jboolean force_instrumentation, + jclass super, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data, + TRAPS) { + DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); + assert(super != NULL, "invariant"); + assert(class_data != NULL, "invariant"); + assert(new_class_data_len != NULL, "invariant"); + assert(new_class_data != NULL, "invariant"); + jint new_bytes_length = 0; + initialize(THREAD); + const typeArrayOop new_byte_array = invoke(trace_id, + force_instrumentation, + super, + class_data_len, + class_data, + bytes_for_eager_instrumentation_sym, + bytes_for_eager_instrumentation_sig_sym, + new_bytes_length, + CHECK); + assert(new_byte_array != NULL, "invariant"); + assert(new_bytes_length > 0, "invariant"); + unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length); + if (new_bytes == NULL) { + log_error_and_throw_oom(new_bytes_length, THREAD); // this unwinds + } + assert(new_bytes != NULL, "invariant"); + memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length); + *new_class_data_len = new_bytes_length; + *new_class_data = new_bytes; +} diff --git a/src/share/vm/jfr/jni/jfrUpcalls.hpp b/src/share/vm/jfr/jni/jfrUpcalls.hpp new file mode 100644 index 0000000000000000000000000000000000000000..70c9bf2ae94f6c14ab276742467828a552eac7e1 --- /dev/null +++ b/src/share/vm/jfr/jni/jfrUpcalls.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JNI_JFRUPCALLS_HPP +#define SHARE_VM_JFR_JNI_JFRUPCALLS_HPP + +#include "jni.h" +#include "jfr/utilities/jfrAllocation.hpp" +#include "utilities/exceptions.hpp" + +class JavaThread; + +// +// Upcalls to Java for instrumentation purposes. +// Targets are located in jdk.jfr.internal.JVMUpcalls. +// +class JfrUpcalls : AllStatic { + public: + static void new_bytes_eager_instrumentation(jlong trace_id, + jboolean force_instrumentation, + jclass super, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data, + TRAPS); + + static void on_retransform(jlong trace_id, + jclass class_being_redefined, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data, + TRAPS); +}; + +#endif // SHARE_VM_JFR_JNI_JFRUPCALLS_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..09bb3f8006fb2c8738fc87070ef1a31eeb172ff3 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/leakprofiler/chains/bfsClosure.hpp" +#include "jfr/leakprofiler/chains/dfsClosure.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/edgeQueue.hpp" +#include "jfr/leakprofiler/utilities/granularTimer.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "memory/resourceArea.hpp" +#include "jfr/utilities/align.hpp" + +BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits) : + _edge_queue(edge_queue), + _edge_store(edge_store), + _mark_bits(mark_bits), + _current_parent(NULL), + _current_frontier_level(0), + _next_frontier_idx(0), + _prev_frontier_idx(0), + _dfs_fallback_idx(0), + _use_dfs(false) { +} + +static void log_frontier_level_summary(size_t level, + size_t high_idx, + size_t low_idx, + size_t edge_size) { + const size_t nof_edges_in_frontier = high_idx - low_idx; + log_trace(jfr, system)( + "BFS front: " SIZE_FORMAT " edges: " SIZE_FORMAT " size: " SIZE_FORMAT " [KB]", + level, + nof_edges_in_frontier, + (nof_edges_in_frontier * edge_size) / K + ); +} + +void BFSClosure::log_completed_frontier() const { + log_frontier_level_summary(_current_frontier_level, + _next_frontier_idx, + _prev_frontier_idx, + _edge_queue->sizeof_edge()); +} + +void BFSClosure::log_dfs_fallback() const { + const size_t edge_size = _edge_queue->sizeof_edge(); + // first complete summary for frontier in progress + log_frontier_level_summary(_current_frontier_level, + _next_frontier_idx, + _prev_frontier_idx, + edge_size); + + // and then also complete the last frontier + log_frontier_level_summary(_current_frontier_level + 1, + _edge_queue->bottom(), + _next_frontier_idx, + edge_size); + + // additional information about DFS fallover + log_trace(jfr, system)( + "BFS front: " SIZE_FORMAT " filled edge queue at edge: " SIZE_FORMAT, + _current_frontier_level, + _dfs_fallback_idx + ); + + const size_t nof_dfs_completed_edges = _edge_queue->bottom() - _dfs_fallback_idx; + log_trace(jfr, system)( + "DFS to complete " SIZE_FORMAT " edges size: " SIZE_FORMAT " [KB]", + nof_dfs_completed_edges, + (nof_dfs_completed_edges * edge_size) / K + ); +} + +void BFSClosure::process() { + + process_root_set(); + process_queue(); +} + +void BFSClosure::process_root_set() { + for (size_t idx = _edge_queue->bottom(); idx < _edge_queue->top(); ++idx) { + const Edge* edge = _edge_queue->element_at(idx); + assert(edge->parent() == NULL, "invariant"); + process(edge->reference(), edge->pointee()); + } +} + +void BFSClosure::process(const oop* reference, const oop pointee) { + closure_impl(reference, pointee); +} +void BFSClosure::closure_impl(const oop* reference, const oop pointee) { + assert(reference != NULL, "invariant"); + assert(UnifiedOop::dereference(reference) == pointee, "invariant"); + + if (GranularTimer::is_finished()) { + return; + } + + if (_use_dfs) { + assert(_current_parent != NULL, "invariant"); + DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, _current_parent); + return; + } + + if (!_mark_bits->is_marked(pointee)) { + _mark_bits->mark_obj(pointee); + // is the pointee a sample object? + if (NULL == pointee->mark()) { + add_chain(reference, pointee); + } + + // if we are processinig initial root set, don't add to queue + if (_current_parent != NULL) { + assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant"); + _edge_queue->add(_current_parent, reference); + } + + if (_edge_queue->is_full()) { + dfs_fallback(); + } + } +} + +void BFSClosure::add_chain(const oop* reference, const oop pointee) { + assert(pointee != NULL, "invariant"); + assert(NULL == pointee->mark(), "invariant"); + + const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2; + ResourceMark rm; + Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); + size_t idx = 0; + chain[idx++] = Edge(NULL, reference); + // aggregate from breadth-first search + const Edge* current = _current_parent; + while (current != NULL) { + chain[idx++] = Edge(NULL, current->reference()); + current = current->parent(); + } + assert(length == idx, "invariant"); + _edge_store->add_chain(chain, length); +} + +void BFSClosure::dfs_fallback() { + assert(_edge_queue->is_full(), "invariant"); + _use_dfs = true; + _dfs_fallback_idx = _edge_queue->bottom(); + while (!_edge_queue->is_empty()) { + const Edge* edge = _edge_queue->remove(); + if (edge->pointee() != NULL) { + DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, edge); + } + } +} + +void BFSClosure::process_queue() { + assert(_current_frontier_level == 0, "invariant"); + assert(_next_frontier_idx == 0, "invariant"); + assert(_prev_frontier_idx == 0, "invariant"); + + _next_frontier_idx = _edge_queue->top(); + while (!is_complete()) { + iterate(_edge_queue->remove()); // edge_queue.remove() increments bottom + } +} + +void BFSClosure::step_frontier() const { + log_completed_frontier(); + ++_current_frontier_level; + _prev_frontier_idx = _next_frontier_idx; + _next_frontier_idx = _edge_queue->top(); +} + +bool BFSClosure::is_complete() const { + if (_edge_queue->bottom() < _next_frontier_idx) { + return false; + } + if (_edge_queue->bottom() > _next_frontier_idx) { + // fallback onto DFS as part of processing the frontier + assert(_dfs_fallback_idx >= _prev_frontier_idx, "invariant"); + assert(_dfs_fallback_idx < _next_frontier_idx, "invariant"); + log_dfs_fallback(); + return true; + } + assert(_edge_queue->bottom() == _next_frontier_idx, "invariant"); + if (_edge_queue->is_empty()) { + return true; + } + step_frontier(); + return false; +} + +void BFSClosure::iterate(const Edge* parent) { + assert(parent != NULL, "invariant"); + const oop pointee = parent->pointee(); + assert(pointee != NULL, "invariant"); + _current_parent = parent; + pointee->oop_iterate(this); +} + +void BFSClosure::do_oop(oop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, HeapWordSize), "invariant"); + const oop pointee = *ref; + if (pointee != NULL) { + closure_impl(ref, pointee); + } +} + +void BFSClosure::do_oop(narrowOop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); + const oop pointee = oopDesc::load_decode_heap_oop(ref); + if (pointee != NULL) { + closure_impl(UnifiedOop::encode(ref), pointee); + } +} diff --git a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4f41a921e6b62645aaf73d3a56506c949b33dc3b --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP + +#include "memory/iterator.hpp" +#include "oops/oop.hpp" + +class BitSet; +class Edge; +class EdgeStore; +class EdgeQueue; + +// Class responsible for iterating the heap breadth-first +class BFSClosure : public ExtendedOopClosure { + private: + EdgeQueue* _edge_queue; + EdgeStore* _edge_store; + BitSet* _mark_bits; + const Edge* _current_parent; + mutable size_t _current_frontier_level; + mutable size_t _next_frontier_idx; + mutable size_t _prev_frontier_idx; + size_t _dfs_fallback_idx; + bool _use_dfs; + + void log_completed_frontier() const; + void log_dfs_fallback() const; + + bool is_complete() const; + void step_frontier() const; + + void closure_impl(const oop* reference, const oop pointee); + void add_chain(const oop* reference, const oop pointee); + void dfs_fallback(); + + void iterate(const Edge* parent); + void process(const oop* reference, const oop pointee); + + void process_root_set(); + void process_queue(); + + public: + BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits); + void process(); + + virtual void do_oop(oop* ref); + virtual void do_oop(narrowOop* ref); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/bitset.cpp b/src/share/vm/jfr/leakprofiler/chains/bitset.cpp new file mode 100644 index 0000000000000000000000000000000000000000..80fd4f876eb803ead85c0173c333c5cacf84120c --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/bitset.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/recorder/storage/jfrVirtualMemory.hpp" +#include "memory/memRegion.hpp" + +BitSet::BitSet(const MemRegion& covered_region) : + _vmm(NULL), + _region_start(covered_region.start()), + _region_size(covered_region.word_size()) { +} + +BitSet::~BitSet() { + delete _vmm; +} + +bool BitSet::initialize() { + assert(_vmm == NULL, "invariant"); + _vmm = new JfrVirtualMemory(); + if (_vmm == NULL) { + return false; + } + + const BitMap::idx_t bits = _region_size >> LogMinObjAlignment; + const size_t words = bits / BitsPerWord; + const size_t raw_bytes = words * sizeof(BitMap::idx_t); + + // the virtual memory invocation will reserve and commit the entire space + BitMap::bm_word_t* map = (BitMap::bm_word_t*)_vmm->initialize(raw_bytes, raw_bytes); + if (map == NULL) { + return false; + } + _bits = BitMapView(map, bits); + return true; +} + diff --git a/src/share/vm/jfr/leakprofiler/chains/bitset.hpp b/src/share/vm/jfr/leakprofiler/chains/bitset.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6e2584ca13ab6783b2014265995ceb8ea66efa2f --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/bitset.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP + +#include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/bitMap.inline.hpp" + +class JfrVirtualMemory; +class MemRegion; + +class BitSet : public CHeapObj { + private: + JfrVirtualMemory* _vmm; + const HeapWord* const _region_start; + BitMapView _bits; + const size_t _region_size; + + public: + BitSet(const MemRegion& covered_region); + ~BitSet(); + + bool initialize(); + + BitMap::idx_t mark_obj(const HeapWord* addr) { + const BitMap::idx_t bit = addr_to_bit(addr); + _bits.par_set_bit(bit); + return bit; + } + + BitMap::idx_t mark_obj(oop obj) { + return mark_obj((HeapWord*)obj); + } + + bool is_marked(const HeapWord* addr) const { + return is_marked(addr_to_bit(addr)); + } + + bool is_marked(oop obj) const { + return is_marked((HeapWord*)obj); + } + + BitMap::idx_t size() const { + return _bits.size(); + } + + BitMap::idx_t addr_to_bit(const HeapWord* addr) const { + return pointer_delta(addr, _region_start) >> LogMinObjAlignment; + } + + bool is_marked(const BitMap::idx_t bit) const { + return _bits.at(bit); + } +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_BITSET_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bed8140f6077fabe7e1a3e3661f56ec0628cbccf --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/dfsClosure.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/utilities/granularTimer.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "jfr/leakprofiler/utilities/rootType.hpp" +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" +#include "memory/resourceArea.hpp" +#include "jfr/utilities/align.hpp" + +// max dfs depth should not exceed size of stack +static const size_t max_dfs_depth = 5000; + +EdgeStore* DFSClosure::_edge_store = NULL; +BitSet* DFSClosure::_mark_bits = NULL; +const Edge* DFSClosure::_start_edge = NULL; +size_t DFSClosure::_max_depth = max_dfs_depth; +bool DFSClosure::_ignore_root_set = false; + +DFSClosure::DFSClosure() : + _parent(NULL), + _reference(NULL), + _depth(0) { +} + +DFSClosure::DFSClosure(DFSClosure* parent, size_t depth) : + _parent(parent), + _reference(NULL), + _depth(depth) { +} + +void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store, + BitSet* mark_bits, + const Edge* start_edge) { + assert(edge_store != NULL, "invariant"); + assert(mark_bits != NULL," invariant"); + assert(start_edge != NULL, "invariant"); + + _edge_store = edge_store; + _mark_bits = mark_bits; + _start_edge = start_edge; + _ignore_root_set = false; + assert(_max_depth == max_dfs_depth, "invariant"); + + // Depth-first search, starting from a BFS egde + DFSClosure dfs; + start_edge->pointee()->oop_iterate(&dfs); +} + +void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store, + BitSet* mark_bits) { + assert(edge_store != NULL, "invariant"); + assert(mark_bits != NULL, "invariant"); + + _edge_store = edge_store; + _mark_bits = mark_bits; + _start_edge = NULL; + + // Mark root set, to avoid going sideways + _max_depth = 1; + _ignore_root_set = false; + DFSClosure dfs1; + RootSetClosure::process_roots(&dfs1); + + // Depth-first search + _max_depth = max_dfs_depth; + _ignore_root_set = true; + assert(_start_edge == NULL, "invariant"); + DFSClosure dfs2; + RootSetClosure::process_roots(&dfs2); +} + +void DFSClosure::closure_impl(const oop* reference, const oop pointee) { + assert(pointee != NULL, "invariant"); + assert(reference != NULL, "invariant"); + + if (GranularTimer::is_finished()) { + return; + } + if (_depth == 0 && _ignore_root_set) { + // Root set is already marked, but we want + // to continue, so skip is_marked check. + assert(_mark_bits->is_marked(pointee), "invariant"); + } else { + if (_mark_bits->is_marked(pointee)) { + return; + } + } + + _reference = reference; + _mark_bits->mark_obj(pointee); + assert(_mark_bits->is_marked(pointee), "invariant"); + + // is the pointee a sample object? + if (NULL == pointee->mark()) { + add_chain(); + } + + assert(_max_depth >= 1, "invariant"); + if (_depth < _max_depth - 1) { + DFSClosure next_level(this, _depth + 1); + pointee->oop_iterate(&next_level); + } +} + +void DFSClosure::add_chain() { + const size_t length = _start_edge == NULL ? _depth + 1 : + _start_edge->distance_to_root() + 1 + _depth + 1; + + ResourceMark rm; + Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); + size_t idx = 0; + + // aggregate from depth-first search + const DFSClosure* c = this; + while (c != NULL) { + chain[idx++] = Edge(NULL, c->reference()); + c = c->parent(); + } + + assert(idx == _depth + 1, "invariant"); + + // aggregate from breadth-first search + const Edge* current = _start_edge; + while (current != NULL) { + chain[idx++] = Edge(NULL, current->reference()); + current = current->parent(); + } + assert(idx == length, "invariant"); + _edge_store->add_chain(chain, length); +} + +void DFSClosure::do_oop(oop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, HeapWordSize), "invariant"); + const oop pointee = *ref; + if (pointee != NULL) { + closure_impl(ref, pointee); + } +} + +void DFSClosure::do_oop(narrowOop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); + const oop pointee = oopDesc::load_decode_heap_oop(ref); + if (pointee != NULL) { + closure_impl(UnifiedOop::encode(ref), pointee); + } +} diff --git a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cd66bc357a62ddbf1c9cb4db5b512af5f57e00d7 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP + +#include "memory/iterator.hpp" +#include "oops/oop.hpp" + +class BitSet; +class Edge; +class EdgeStore; +class EdgeQueue; + +// Class responsible for iterating the heap depth-first +class DFSClosure: public ExtendedOopClosure { + private: + static EdgeStore* _edge_store; + static BitSet* _mark_bits; + static const Edge*_start_edge; + static size_t _max_depth; + static bool _ignore_root_set; + DFSClosure* _parent; + const oop* _reference; + size_t _depth; + + void add_chain(); + void closure_impl(const oop* reference, const oop pointee); + + DFSClosure* parent() const { return _parent; } + const oop* reference() const { return _reference; } + + DFSClosure(DFSClosure* parent, size_t depth); + DFSClosure(); + + public: + static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge); + static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits); + + virtual void do_oop(oop* ref); + virtual void do_oop(narrowOop* ref); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/edge.cpp b/src/share/vm/jfr/leakprofiler/chains/edge.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6eb3cb60287f68289003aa6ac172f0dbfc0aaa9d --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edge.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" + +Edge::Edge() : _parent(NULL), _reference(NULL) {} + +Edge::Edge(const Edge* parent, const oop* reference) : _parent(parent), + _reference(reference) {} + +const oop Edge::pointee() const { + return UnifiedOop::dereference(_reference); +} + +const oop Edge::reference_owner() const { + return is_root() ? (oop)NULL : UnifiedOop::dereference(_parent->reference()); +} + +static const Klass* resolve_klass(const oop obj) { + assert(obj != NULL, "invariant"); + return java_lang_Class::is_instance(obj) ? + java_lang_Class::as_Klass(obj) : obj->klass(); +} + +const Klass* Edge::pointee_klass() const { + return resolve_klass(pointee()); +} + +const Klass* Edge::reference_owner_klass() const { + const oop ref_owner = reference_owner(); + return ref_owner != NULL ? resolve_klass(ref_owner) : NULL; +} + +size_t Edge::distance_to_root() const { + size_t depth = 0; + const Edge* current = _parent; + while (current != NULL) { + depth++; + current = current->parent(); + } + return depth; +} diff --git a/src/share/vm/jfr/leakprofiler/chains/edge.hpp b/src/share/vm/jfr/leakprofiler/chains/edge.hpp new file mode 100644 index 0000000000000000000000000000000000000000..bf23ac2a163620ce9e3a4c969ebf5cbd1fde662b --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edge.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP + +#include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" + +class Edge { + private: + const Edge* _parent; + const oop* _reference; + public: + Edge(); + Edge(const Edge* parent, const oop* reference); + + const oop* reference() const { + return _reference; + } + const Edge* parent() const { + return _parent; + } + bool is_root() const { + return _parent == NULL; + } + const oop pointee() const; + const Klass* pointee_klass() const; + const oop reference_owner() const; + const Klass* reference_owner_klass() const; + size_t distance_to_root() const; + + void* operator new (size_t sz, void* here) { + return here; + } +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGE_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeQueue.cpp b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83f2e79afac5f8645b9d097164c554fc66b66eaa --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/edgeQueue.hpp" +#include "jfr/recorder/storage/jfrVirtualMemory.hpp" + +EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes) : + _vmm(NULL), + _reservation_size_bytes(reservation_size_bytes), + _commit_block_size_bytes(commit_block_size_bytes), + _top_index(0), + _bottom_index(0) { +} + +bool EdgeQueue::initialize() { + assert(_reservation_size_bytes >= _commit_block_size_bytes, "invariant"); + assert(_vmm == NULL, "invariant"); + _vmm = new JfrVirtualMemory(); + return _vmm != NULL && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge)); +} + +EdgeQueue::~EdgeQueue() { + delete _vmm; +} + +void EdgeQueue::add(const Edge* parent, const oop* ref) { + assert(ref != NULL, "Null objects not allowed in EdgeQueue"); + assert(!is_full(), "EdgeQueue is full. Check is_full before adding another Edge"); + assert(!_vmm->is_full(), "invariant"); + void* const allocation = _vmm->new_datum(); + assert(allocation != NULL, "invariant"); + new (allocation)Edge(parent, ref); + _top_index++; + assert(_vmm->count() == _top_index, "invariant"); +} + +size_t EdgeQueue::top() const { + return _top_index; +} + +size_t EdgeQueue::bottom() const { + return EdgeQueue::_bottom_index; +} + +bool EdgeQueue::is_empty() const { + return _top_index == _bottom_index; +} + +bool EdgeQueue::is_full() const { + return _vmm->is_full(); +} + +const Edge* EdgeQueue::remove() const { + assert(!is_empty(), "EdgeQueue is empty. Check if empty before removing Edge"); + assert(!_vmm->is_empty(), "invariant"); + return (const Edge*)_vmm->get(_bottom_index++); +} + +const Edge* EdgeQueue::element_at(size_t index) const { + assert(index >= _bottom_index, "invariant"); + assert(index <_top_index, "invariant"); + return (Edge*)_vmm->get(index); +} + +size_t EdgeQueue::reserved_size() const { + assert(_vmm != NULL, "invariant"); + return _vmm->reserved_size(); +} + +size_t EdgeQueue::live_set() const { + assert(_vmm != NULL, "invariant"); + return _vmm->live_set(); +} + +size_t EdgeQueue::sizeof_edge() const { + assert(_vmm != NULL, "invariant"); + return _vmm->aligned_datum_size_bytes(); +} diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeQueue.hpp b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3f9eab5412bf9e0ababc2049f96240c197223c27 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeQueue.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP + +#include "memory/allocation.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" + +class JfrVirtualMemory; + +class EdgeQueue : public CHeapObj { + private: + JfrVirtualMemory* _vmm; + const size_t _reservation_size_bytes; + const size_t _commit_block_size_bytes; + mutable size_t _top_index; + mutable size_t _bottom_index; + public: + EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes); + ~EdgeQueue(); + + bool initialize(); + + void add(const Edge* parent, const oop* ref); + const Edge* remove() const; + const Edge* element_at(size_t index) const; + + size_t top() const; + size_t bottom() const; + bool is_empty() const; + bool is_full() const; + + size_t reserved_size() const; + size_t live_set() const; + size_t sizeof_edge() const; // with alignments +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp b/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7c431d8edf5e62d986ec9d74db24409b34364f03 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/edgeUtils.hpp" +#include "oops/oop.hpp" + +RoutableEdge::RoutableEdge() : Edge() {} +RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), + _skip_edge(NULL), + _skip_length(0), + _processed(false) {} + +RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge), + _skip_edge(NULL), + _skip_length(0), + _processed(false) {} + +RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge), + _skip_edge(edge._skip_edge), + _skip_length(edge._skip_length), + _processed(edge._processed) {} + +void RoutableEdge::operator=(const RoutableEdge& edge) { + Edge::operator=(edge); + _skip_edge = edge._skip_edge; + _skip_length = edge._skip_length; + _processed = edge._processed; +} + +size_t RoutableEdge::logical_distance_to_root() const { + size_t depth = 0; + const RoutableEdge* current = logical_parent(); + while (current != NULL) { + depth++; + current = current->logical_parent(); + } + return depth; +} + +traceid EdgeStore::_edge_id_counter = 0; + +EdgeStore::EdgeStore() : _edges(NULL) { + _edges = new EdgeHashTable(this); +} + +EdgeStore::~EdgeStore() { + assert(_edges != NULL, "invariant"); + delete _edges; + _edges = NULL; +} + +const Edge* EdgeStore::get_edge(const Edge* edge) const { + assert(edge != NULL, "invariant"); + EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); + return entry != NULL ? entry->literal_addr() : NULL; +} + +const Edge* EdgeStore::put(const Edge* edge) { + assert(edge != NULL, "invariant"); + const RoutableEdge e = *edge; + assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant"); + EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference()); + return entry.literal_addr(); +} + +traceid EdgeStore::get_id(const Edge* edge) const { + assert(edge != NULL, "invariant"); + EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); + assert(entry != NULL, "invariant"); + return entry->id(); +} + +traceid EdgeStore::get_root_id(const Edge* edge) const { + assert(edge != NULL, "invariant"); + const Edge* root = EdgeUtils::root(*edge); + assert(root != NULL, "invariant"); + return get_id(root); +} + +void EdgeStore::add_chain(const Edge* chain, size_t length) { + assert(chain != NULL, "invariant"); + assert(length > 0, "invariant"); + + size_t bottom_index = length - 1; + const size_t top_index = 0; + + const Edge* stored_parent_edge = NULL; + + // determine level of shared ancestry + for (; bottom_index > top_index; --bottom_index) { + const Edge* stored_edge = get_edge(&chain[bottom_index]); + if (stored_edge != NULL) { + stored_parent_edge = stored_edge; + continue; + } + break; + } + + // insertion of new Edges + for (int i = (int)bottom_index; i >= (int)top_index; --i) { + Edge edge(stored_parent_edge, chain[i].reference()); + stored_parent_edge = put(&edge); + } + + const oop sample_object = stored_parent_edge->pointee(); + assert(sample_object != NULL, "invariant"); + assert(NULL == sample_object->mark(), "invariant"); + + // Install the "top" edge of the chain into the sample object mark oop. + // This associates the sample object with its navigable reference chain. + sample_object->set_mark(markOop(stored_parent_edge)); +} + +bool EdgeStore::is_empty() const { + return !_edges->has_entries(); +} + +size_t EdgeStore::number_of_entries() const { + return _edges->cardinality(); +} + +void EdgeStore::assign_id(EdgeEntry* entry) { + assert(entry != NULL, "invariant"); + assert(entry->id() == 0, "invariant"); + entry->set_id(++_edge_id_counter); +} + +bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) { + assert(entry != NULL, "invariant"); + assert(entry->hash() == hash, "invariant"); + return true; +} diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp b/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4fdaffec4acbac13c319be3d27c99347a549002c --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP +#define SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP + +#include "jfr/utilities/jfrHashtable.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "memory/allocation.hpp" + +typedef u8 traceid; + +class RoutableEdge : public Edge { + private: + mutable const RoutableEdge* _skip_edge; + mutable size_t _skip_length; + mutable bool _processed; + + public: + RoutableEdge(); + RoutableEdge(const Edge* parent, const oop* reference); + RoutableEdge(const Edge& edge); + RoutableEdge(const RoutableEdge& edge); + void operator=(const RoutableEdge& edge); + + const RoutableEdge* skip_edge() const { return _skip_edge; } + size_t skip_length() const { return _skip_length; } + + bool is_skip_edge() const { return _skip_edge != NULL; } + bool processed() const { return _processed; } + bool is_sentinel() const { + return _skip_edge == NULL && _skip_length == 1; + } + + void set_skip_edge(const RoutableEdge* edge) const { + assert(!is_skip_edge(), "invariant"); + assert(edge != this, "invariant"); + _skip_edge = edge; + } + + void set_skip_length(size_t length) const { + _skip_length = length; + } + + void set_processed() const { + assert(!_processed, "invariant"); + _processed = true; + } + + // true navigation according to physical tree representation + const RoutableEdge* physical_parent() const { + return static_cast(parent()); + } + + // logical navigation taking skip levels into account + const RoutableEdge* logical_parent() const { + return is_skip_edge() ? skip_edge() : physical_parent(); + } + + size_t logical_distance_to_root() const; +}; + +class EdgeStore : public CHeapObj { + typedef HashTableHost EdgeHashTable; + typedef EdgeHashTable::HashEntry EdgeEntry; + template class, + typename, + size_t> + friend class HashTableHost; + private: + static traceid _edge_id_counter; + EdgeHashTable* _edges; + + // Hash table callbacks + void assign_id(EdgeEntry* entry); + bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry); + + const Edge* get_edge(const Edge* edge) const; + const Edge* put(const Edge* edge); + + public: + EdgeStore(); + ~EdgeStore(); + + void add_chain(const Edge* chain, size_t length); + bool is_empty() const; + size_t number_of_entries() const; + + traceid get_id(const Edge* edge) const; + traceid get_root_id(const Edge* edge) const; + + template + void iterate_edges(T& functor) const { _edges->iterate_value(functor); } +}; + +#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb1501422be82e853350fed846a656b4da894634 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/edgeUtils.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "oops/fieldStreams.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/oopsHierarchy.hpp" +#include "runtime/handles.inline.hpp" + +bool EdgeUtils::is_leak_edge(const Edge& edge) { + return (const Edge*)edge.pointee()->mark() == &edge; +} + +bool EdgeUtils::is_root(const Edge& edge) { + return edge.is_root(); +} + +static int field_offset(const Edge& edge) { + assert(!edge.is_root(), "invariant"); + const oop ref_owner = edge.reference_owner(); + assert(ref_owner != NULL, "invariant"); + const oop* reference = UnifiedOop::decode(edge.reference()); + assert(reference != NULL, "invariant"); + assert(!UnifiedOop::is_narrow(reference), "invariant"); + assert(!ref_owner->is_array(), "invariant"); + assert(ref_owner->is_instance(), "invariant"); + const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char)); + assert(offset < (ref_owner->size() * HeapWordSize), "invariant"); + return offset; +} + +static const InstanceKlass* field_type(const Edge& edge) { + assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant"); + return (const InstanceKlass*)edge.reference_owner_klass(); +} + +const Symbol* EdgeUtils::field_name_symbol(const Edge& edge) { + assert(!edge.is_root(), "invariant"); + assert(!is_array_element(edge), "invariant"); + const int offset = field_offset(edge); + const InstanceKlass* ik = field_type(edge); + while (ik != NULL) { + JavaFieldStream jfs(ik); + while (!jfs.done()) { + if (offset == jfs.offset()) { + return jfs.name(); + } + jfs.next(); + } + ik = (InstanceKlass*)ik->super(); + } + return NULL; +} + +jshort EdgeUtils::field_modifiers(const Edge& edge) { + const int offset = field_offset(edge); + const InstanceKlass* ik = field_type(edge); + + while (ik != NULL) { + JavaFieldStream jfs(ik); + while (!jfs.done()) { + if (offset == jfs.offset()) { + return jfs.access_flags().as_short(); + } + jfs.next(); + } + ik = (InstanceKlass*)ik->super(); + } + return 0; +} + +bool EdgeUtils::is_array_element(const Edge& edge) { + assert(!edge.is_root(), "invariant"); + const oop ref_owner = edge.reference_owner(); + assert(ref_owner != NULL, "invariant"); + return ref_owner->is_objArray(); +} + +static int array_offset(const Edge& edge) { + assert(!edge.is_root(), "invariant"); + const oop ref_owner = edge.reference_owner(); + assert(ref_owner != NULL, "invariant"); + const oop* reference = UnifiedOop::decode(edge.reference()); + assert(reference != NULL, "invariant"); + assert(!UnifiedOop::is_narrow(reference), "invariant"); + assert(ref_owner->is_array(), "invariant"); + const objArrayOop ref_owner_array = static_cast(ref_owner); + const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize); + assert(offset >= 0 && offset < ref_owner_array->length(), "invariant"); + return offset; +} + +int EdgeUtils::array_index(const Edge& edge) { + return is_array_element(edge) ? array_offset(edge) : 0; +} + +int EdgeUtils::array_size(const Edge& edge) { + if (is_array_element(edge)) { + const oop ref_owner = edge.reference_owner(); + assert(ref_owner != NULL, "invariant"); + assert(ref_owner->is_objArray(), "invariant"); + return ((objArrayOop)(ref_owner))->length(); + } + return 0; +} + +const Edge* EdgeUtils::root(const Edge& edge) { + const Edge* current = &edge; + const Edge* parent = current->parent(); + while (parent != NULL) { + current = parent; + parent = current->parent(); + } + return current; +} + +// The number of references associated with the leak node; +// can be viewed as the leak node "context". +// Used to provide leak context for a "capped/skipped" reference chain. +static const size_t leak_context = 100; + +// The number of references associated with the root node; +// can be viewed as the root node "context". +// Used to provide root context for a "capped/skipped" reference chain. +static const size_t root_context = 100; + +// A limit on the reference chain depth to be serialized, +static const size_t max_ref_chain_depth = leak_context + root_context; + +const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) { + const RoutableEdge* current = &edge; + const RoutableEdge* parent = current->physical_parent(); + size_t seek = 0; + while (parent != NULL && seek != skip_length) { + seek++; + current = parent; + parent = parent->physical_parent(); + } + return current; +} + +#ifdef ASSERT +static void validate_skip_target(const RoutableEdge* skip_target) { + assert(skip_target != NULL, "invariant"); + assert(skip_target->distance_to_root() + 1 == root_context, "invariant"); + assert(skip_target->is_sentinel(), "invariant"); +} + +static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) { + assert(new_skip_edge != NULL, "invariant"); + assert(new_skip_edge->is_skip_edge(), "invariant"); + if (last_skip_edge != NULL) { + const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment); + validate_skip_target(target->logical_parent()); + return; + } + assert(last_skip_edge == NULL, "invariant"); + // only one level of logical indirection + validate_skip_target(new_skip_edge->logical_parent()); +} +#endif // ASSERT + +static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) { + assert(new_skip_edge != NULL, "invariant"); + assert(!new_skip_edge->is_skip_edge(), "invariant"); + assert(!new_skip_edge->processed(), "invariant"); + const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance); + assert(skip_target != NULL, "invariant"); + new_skip_edge->set_skip_edge(skip_target); + new_skip_edge->set_skip_length(skip_target_distance); + assert(new_skip_edge->is_skip_edge(), "invariant"); + assert(new_skip_edge->logical_parent() == skip_target, "invariant"); +} + +static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) { + assert(distance == 0, "invariant"); + const RoutableEdge* current = &edge; + while (current != NULL) { + if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) { + return current; + } + current = current->physical_parent(); + ++distance; + } + return current; +} + +static void collapse_overlapping_chain(const RoutableEdge& edge, + const RoutableEdge* first_processed_edge, + size_t first_processed_distance) { + assert(first_processed_edge != NULL, "invariant"); + // first_processed_edge is already processed / written + assert(first_processed_edge->processed(), "invariant"); + assert(first_processed_distance + 1 <= leak_context, "invariant"); + + // from this first processed edge, attempt to fetch the last skip edge + size_t last_skip_edge_distance = 0; + const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance); + const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1; + + if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) { + // complete chain can be accommodated without modification + return; + } + + // backtrack one edge from existing processed edge + const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1); + assert(new_skip_edge != NULL, "invariant"); + assert(!new_skip_edge->processed(), "invariant"); + assert(new_skip_edge->parent() == first_processed_edge, "invariant"); + + size_t adjustment = 0; + if (last_skip_edge != NULL) { + assert(leak_context - 1 > first_processed_distance - 1, "invariant"); + adjustment = leak_context - first_processed_distance - 1; + assert(last_skip_edge_distance + 1 > adjustment, "invariant"); + install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment); + } else { + install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context); + new_skip_edge->logical_parent()->set_skip_length(1); // sentinel + } + + DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);) +} + +static void collapse_non_overlapping_chain(const RoutableEdge& edge, + const RoutableEdge* first_processed_edge, + size_t first_processed_distance) { + assert(first_processed_edge != NULL, "invariant"); + assert(!first_processed_edge->processed(), "invariant"); + // this implies that the first "processed" edge is the leak context relative "leaf" + assert(first_processed_distance + 1 == leak_context, "invariant"); + + const size_t distance_to_root = edge.distance_to_root(); + if (distance_to_root + 1 <= max_ref_chain_depth) { + // complete chain can be accommodated without constructing a skip edge + return; + } + + install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context); + first_processed_edge->logical_parent()->set_skip_length(1); // sentinel + + DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);) +} + +static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) { + assert(distance == 0, "invariant"); + const RoutableEdge* current = &edge; + while (current != NULL && distance < leak_context - 1) { + if (current->processed()) { + return current; + } + current = current->physical_parent(); + ++distance; + } + assert(distance <= leak_context - 1, "invariant"); + return current; +} + +/* + * Some vocabulary: + * ----------- + * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges. + * "Processed / written" means an edge that has already been serialized. + * "Skip edge" is an edge that contains additional information for logical routing purposes. + * "Skip target" is an edge used as a destination for a skip edge + */ +void EdgeUtils::collapse_chain(const RoutableEdge& edge) { + assert(is_leak_edge(edge), "invariant"); + + // attempt to locate an already processed edge inside current leak context (if any) + size_t first_processed_distance = 0; + const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance); + if (first_processed_edge == NULL) { + return; + } + + if (first_processed_edge->processed()) { + collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance); + } else { + collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance); + } + + assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant"); +} diff --git a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..acae0368517fb0268da3beb5c46d5e597de61009 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP +#define SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP + +#include "memory/allocation.hpp" + +class Edge; +class RoutableEdge; +class Symbol; + +class EdgeUtils : public AllStatic { + public: + static bool is_leak_edge(const Edge& edge); + + static const Edge* root(const Edge& edge); + static bool is_root(const Edge& edge); + + static bool is_array_element(const Edge& edge); + static int array_index(const Edge& edge); + static int array_size(const Edge& edge); + + static const Symbol* field_name_symbol(const Edge& edge); + static jshort field_modifiers(const Edge& edge); + + static void collapse_chain(const RoutableEdge& edge); +}; + +#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/share/vm/jfr/leakprofiler/chains/objectSampleMarker.hpp new file mode 100644 index 0000000000000000000000000000000000000000..801c8c92b8c263b30aa2ae6367c628680f6faf61 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/objectSampleMarker.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP + +#include "memory/allocation.hpp" +#include "oops/markOop.hpp" +#include "utilities/growableArray.hpp" +// +// This class will save the original mark oop of a object sample object. +// It will then install an "identifier" mark oop to be used for +// identification purposes in the search for reference chains. +// The destructor will restore each modified oop with its original mark oop. +// +class ObjectSampleMarker : public StackObj { + private: + class ObjectSampleMarkOop : public ResourceObj { + friend class ObjectSampleMarker; + private: + oop _obj; + markOop _mark_oop; + ObjectSampleMarkOop(const oop obj, + const markOop mark_oop) : _obj(obj), + _mark_oop(mark_oop) {} + public: + ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {} + }; + + GrowableArray* _store; + + public: + ObjectSampleMarker() : + _store(new GrowableArray(16)) {} + ~ObjectSampleMarker() { + assert(_store != NULL, "invariant"); + // restore the saved, original, markOop for sample objects + while (_store->is_nonempty()) { + ObjectSampleMarkOop sample_oop = _store->pop(); + sample_oop._obj->set_mark(sample_oop._mark_oop); + assert(sample_oop._obj->mark() == sample_oop._mark_oop, "invariant"); + } + } + + void mark(oop obj) { + assert(obj != NULL, "invariant"); + // save the original markOop + _store->push(ObjectSampleMarkOop(obj, obj->mark())); + // now we will "poison" the mark word of the sample object + // to the intermediate monitor INFLATING state. + // This is an "impossible" state during a safepoint, + // hence we will use it to quickly identify sample objects + // during the reachability search from gc roots. + assert(NULL == markOopDesc::INFLATING(), "invariant"); + obj->set_mark(markOopDesc::INFLATING()); + assert(NULL == obj->mark(), "invariant"); + } +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_OBJECTSAMPLEMARKER_HPP diff --git a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83c02d5a9e0df71b851c7fbd63f395d21143c67b --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" +#include "classfile/systemDictionary.hpp" +#include "jfr/leakprofiler/chains/edgeQueue.hpp" +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" +#include "jfr/leakprofiler/utilities/saveRestore.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "memory/universe.hpp" +#include "prims/jvmtiExport.hpp" +#include "runtime/synchronizer.hpp" +#include "runtime/thread.hpp" +#include "services/management.hpp" +#include "jfr/utilities/align.hpp" + +RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) : + _edge_queue(edge_queue) { +} + +void RootSetClosure::do_oop(oop* ref) { + assert(ref != NULL, "invariant"); + // We discard unaligned root references because + // our reference tagging scheme will use + // the lowest bit in a represented reference + // to indicate the reference is narrow. + // It is mainly roots delivered via nmethods::do_oops() + // that come in unaligned. It should be ok to duck these + // since they are supposedly weak. + if (!is_aligned(ref, HeapWordSize)) { + return; + } + + assert(is_aligned(ref, HeapWordSize), "invariant"); + const oop pointee = *ref; + if (pointee != NULL) { + closure_impl(ref, pointee); + } +} + +void RootSetClosure::do_oop(narrowOop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); + const oop pointee = oopDesc::load_decode_heap_oop(ref); + if (pointee != NULL) { + closure_impl(UnifiedOop::encode(ref), pointee); + } +} + +void RootSetClosure::closure_impl(const oop* reference, const oop pointee) { + if (!_edge_queue->is_full()) { + _edge_queue->add(NULL, reference); + } +} + +void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) { + RootSetClosure rs(edge_queue); + process_roots(&rs); +} + +class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope { +}; + +void RootSetClosure::process_roots(OopClosure* closure) { + SaveRestoreCLDClaimBits save_restore_cld_claim_bits; + RootSetClosureMarkScope mark_scope; + + CLDToOopClosure cldt_closure(closure); + ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); + CodeBlobToOopClosure blobs(closure, false); + Threads::oops_do(closure, &cldt_closure, &blobs); + ObjectSynchronizer::oops_do(closure); + Universe::oops_do(closure); + JNIHandles::oops_do(closure); + JvmtiExport::oops_do(closure); + SystemDictionary::always_strong_oops_do(closure); + Management::oops_do(closure); + StringTable::oops_do(closure); +} diff --git a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a3ad9d7e00843a08396ef1ad86b6b636ada10521 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP + +#include "memory/iterator.hpp" +#include "oops/oop.hpp" + +class EdgeQueue; + +class RootSetClosure: public ExtendedOopClosure { + private: + RootSetClosure(EdgeQueue* edge_queue); + EdgeQueue* _edge_queue; + void closure_impl(const oop* reference, const oop pointee); + public: + static void add_to_queue(EdgeQueue* edge_queue); + static void process_roots(OopClosure* closure); + + virtual void do_oop(oop* reference); + virtual void do_oop(narrowOop* reference); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..707b2aa2ac9af66008cd6be8c9c20daddf3ecb0e --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/leakprofiler/utilities/rootType.hpp" +#include "jfr/metadata/jfrConstantSerializer.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/thread.inline.hpp" +// to get CONTENT_TYPE defines +#include "tracefiles/traceTypes.hpp" + +template +static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) { + assert(sample != NULL, "invariant"); + while (sample != end) { + processor.sample_do(sample); + sample = sample->next(); + } +} + +class ConstantTypeRootSystem : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer) { + const u4 nof_root_systems = OldObjectRoot::_number_of_systems; + writer.write_number_of_constants(nof_root_systems); + for (u4 i = 0; i < nof_root_systems; ++i) { + writer.write_key(i); + writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i)); + } + } +}; + +class ConstantRootType : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer) { + const u4 nof_root_types = OldObjectRoot::_number_of_types; + writer.write_number_of_constants(nof_root_types); + for (u4 i = 0; i < nof_root_types; ++i) { + writer.write_key(i); + writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i)); + } + } +}; + +class CheckpointInstall { + private: + const JfrCheckpointBlobHandle& _cp; + public: + CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (!sample->is_dead()) { + sample->set_klass_checkpoint(_cp); + } + } +}; + +class CheckpointWrite { + private: + JfrCheckpointWriter& _writer; + const jlong _last_sweep; + public: + CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (sample->is_alive_and_older_than(_last_sweep)) { + if (sample->has_thread_checkpoint()) { + const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); + thread_cp->exclusive_write(_writer); + } + if (sample->has_klass_checkpoint()) { + const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); + klass_cp->exclusive_write(_writer); + } + } + } +}; + +class CheckpointStateReset { + private: + const jlong _last_sweep; + public: + CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (sample->is_alive_and_older_than(_last_sweep)) { + if (sample->has_thread_checkpoint()) { + const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); + thread_cp->reset_write_state(); + } + if (sample->has_klass_checkpoint()) { + const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); + klass_cp->reset_write_state(); + } + } + } +}; + +class StackTraceWrite { + private: + JfrStackTraceRepository& _stack_trace_repo; + JfrCheckpointWriter& _writer; + int _count; + public: + StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) : + _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) { + JfrStacktrace_lock->lock(); + } + ~StackTraceWrite() { + assert(JfrStacktrace_lock->owned_by_self(), "invariant"); + JfrStacktrace_lock->unlock(); + } + + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (!sample->is_dead()) { + if (sample->has_stack_trace()) { + JfrTraceId::use(sample->klass(), true); + _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash()); + ++_count; + } + } + } + + int count() const { + return _count; + } +}; + +class SampleMark { + private: + ObjectSampleMarker& _marker; + jlong _last_sweep; + int _count; + public: + SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), + _last_sweep(last_sweep), + _count(0) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (sample->is_alive_and_older_than(_last_sweep)) { + _marker.mark(sample->object()); + ++_count; + } + } + + int count() const { + return _count; + } +}; + +void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) { + assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant"); + + if (!writer.has_data()) { + if (!class_unload) { + LeakProfiler::resume(); + } + assert(LeakProfiler::is_running(), "invariant"); + return; + } + + assert(writer.has_data(), "invariant"); + const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob(); + + const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); + assert(object_sampler != NULL, "invariant"); + + ObjectSample* const last = const_cast(object_sampler->last()); + const ObjectSample* const last_resolved = object_sampler->last_resolved(); + CheckpointInstall install(h_cp); + + if (class_unload) { + if (last != NULL) { + // all samples need the class unload information + do_samples(last, NULL, install); + } + assert(LeakProfiler::is_running(), "invariant"); + return; + } + + // only new samples since last resolved checkpoint + if (last != last_resolved) { + do_samples(last, last_resolved, install); + if (resume) { + const_cast(object_sampler)->set_last_resolved(last); + } + } + assert(LeakProfiler::is_suspended(), "invariant"); + if (resume) { + LeakProfiler::resume(); + assert(LeakProfiler::is_running(), "invariant"); + } +} + +void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) { + assert(edge_store != NULL, "invariant"); + assert(thread != NULL, "invariant"); + static bool constant_types_registered = false; + if (!constant_types_registered) { + JfrConstantSerializer::register_serializer(CONSTANT_TYPE_OLDOBJECTROOTSYSTEM, false, true, new ConstantTypeRootSystem()); + JfrConstantSerializer::register_serializer(CONSTANT_TYPE_OLDOBJECTROOTTYPE, false, true, new ConstantRootType()); + constant_types_registered = true; + } + const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); + assert(object_sampler != NULL, "invariant"); + const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); + ObjectSample* const last = const_cast(object_sampler->last()); + { + JfrCheckpointWriter writer(false, false, thread); + CheckpointWrite checkpoint_write(writer, last_sweep); + do_samples(last, NULL, checkpoint_write); + } + CheckpointStateReset state_reset(last_sweep); + do_samples(last, NULL, state_reset); + if (!edge_store->is_empty()) { + // java object and chain representations + JfrCheckpointWriter writer(false, true, thread); + ObjectSampleWriter osw(writer, edge_store); + edge_store->iterate_edges(osw); + } +} + +WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) : + _stack_trace_repo(repo) { +} + +bool WriteObjectSampleStacktrace::process() { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + if (!LeakProfiler::is_running()) { + return true; + } + // Suspend the LeakProfiler subsystem + // to ensure stable samples even + // after we return from the safepoint. + LeakProfiler::suspend(); + assert(!LeakProfiler::is_running(), "invariant"); + assert(LeakProfiler::is_suspended(), "invariant"); + + const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); + assert(object_sampler != NULL, "invariant"); + assert(LeakProfiler::is_suspended(), "invariant"); + + ObjectSample* const last = const_cast(object_sampler->last()); + const ObjectSample* const last_resolved = object_sampler->last_resolved(); + if (last == last_resolved) { + assert(LeakProfiler::is_suspended(), "invariant"); + return true; + } + + JfrCheckpointWriter writer(false, true, Thread::current()); + const JfrCheckpointContext ctx = writer.context(); + + writer.write_constant_type(CONSTANT_TYPE_STACKTRACE); + const jlong count_offset = writer.reserve(sizeof(u4)); + + int count = 0; + { + StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock + do_samples(last, last_resolved, stack_trace_write); + count = stack_trace_write.count(); + } + if (count == 0) { + writer.set_context(ctx); + assert(LeakProfiler::is_suspended(), "invariant"); + return true; + } + assert(count > 0, "invariant"); + writer.write_number_of_constants((u4)count, count_offset); + JfrStackTraceRepository::write_metadata(writer); + + ObjectSampleCheckpoint::install(writer, false, false); + assert(LeakProfiler::is_suspended(), "invariant"); + return true; +} + +int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) { + const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); + assert(object_sampler != NULL, "invariant"); + ObjectSample* const last = const_cast(object_sampler->last()); + if (last == NULL) { + return 0; + } + const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); + SampleMark mark(marker, last_sweep); + do_samples(last, NULL, mark); + return mark.count(); +} diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp new file mode 100644 index 0000000000000000000000000000000000000000..964a75a23864797dc8e693a73ad40379805d86da --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP +#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP + +#include "memory/allocation.hpp" +#include "utilities/exceptions.hpp" + +class EdgeStore; +class JfrStackTraceRepository; +class JfrCheckpointWriter; +class ObjectSampleMarker; + +class ObjectSampleCheckpoint : AllStatic { + public: + static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume); + static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread); + static int mark(ObjectSampleMarker& marker, bool emit_all); +}; + +class WriteObjectSampleStacktrace : public StackObj { + private: + JfrStackTraceRepository& _stack_trace_repo; + public: + WriteObjectSampleStacktrace(JfrStackTraceRepository& repo); + bool process(); +}; + +#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e73ccfaa7ef0f56057811cce5afdd3fea52a2301 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/thread.hpp" +#include "utilities/ostream.hpp" + +static Symbol* symbol_size = NULL; + +ObjectDescriptionBuilder::ObjectDescriptionBuilder() { + reset(); +} + +void ObjectDescriptionBuilder::write_int(jint value) { + char buf[20]; + jio_snprintf(buf, sizeof(buf), "%d", value); + write_text(buf); +} + +void ObjectDescriptionBuilder::write_text(const char* text) { + if (_index == sizeof(_buffer) - 2) { + return; + } + while (*text != '\0' && _index < sizeof(_buffer) - 2) { + _buffer[_index] = *text; + _index++; + text++; + } + assert(_index < sizeof(_buffer) - 1, "index should not exceed buffer size"); + // add ellipsis if we reached end + if (_index == sizeof(_buffer) - 2) { + _buffer[_index-3] = '.'; + _buffer[_index-2] = '.'; + _buffer[_index-1] = '.'; + } + // terminate string + _buffer[_index] = '\0'; +} + +void ObjectDescriptionBuilder::reset() { + _index = 0; + _buffer[0] = '\0'; +} + +void ObjectDescriptionBuilder::print_description(outputStream* out) { + out->print("%s", (const char*)_buffer); +} + +const char* ObjectDescriptionBuilder::description() { + if (_buffer[0] == '\0') { + return NULL; + } + const size_t len = strlen(_buffer); + char* copy = NEW_RESOURCE_ARRAY(char, len + 1); + assert(copy != NULL, "invariant"); + strncpy(copy, _buffer, len + 1); + return copy; +} + +ObjectSampleDescription::ObjectSampleDescription(oop object) : + _object(object) { +} + +void ObjectSampleDescription::ensure_initialized() { + if (symbol_size == NULL) { + symbol_size = SymbolTable::new_permanent_symbol("size", Thread::current()); + } +} + +void ObjectSampleDescription::print_description(outputStream* out) { + write_object_to_buffer(); + _description.print_description(out); +} + +const char* ObjectSampleDescription::description() { + write_object_to_buffer(); + return _description.description(); +} + +void ObjectSampleDescription::write_text(const char* text) { + _description.write_text(text); +} + +void ObjectSampleDescription::write_int(jint value) { + _description.write_int(value); +} + +void ObjectSampleDescription::write_object_to_buffer() { + ensure_initialized(); + _description.reset(); + write_object_details(); +} + +void ObjectSampleDescription::write_object_details() { + Klass* klass = _object->klass(); + Symbol* class_name = klass->name(); + jint size; + + if (_object->is_a(SystemDictionary::Class_klass())) { + write_class_name(); + return; + } + + if (_object->is_a(SystemDictionary::Thread_klass())) { + write_thread_name(); + return; + } + + if (_object->is_a(SystemDictionary::ThreadGroup_klass())) { + write_thread_group_name(); + return; + } + + if (read_int_size(&size)) { + write_size(size); + return; + } +} + +void ObjectSampleDescription::write_class_name() { + assert(_object->is_a(SystemDictionary::Class_klass()), "invariant"); + const Klass* const k = java_lang_Class::as_Klass(_object); + if (k == NULL) { + // might represent a primitive + const Klass* const ak = java_lang_Class::array_klass(_object); + // If ak is NULL, this is most likely a mirror associated with a + // jvmti redefine/retransform scratch klass. We can't get any additional + // information from it. + if (ak != NULL) { + write_text(type2name(java_lang_Class::primitive_type(_object))); + } + return; + } + + if (k->oop_is_instance()) { + const InstanceKlass* ik = InstanceKlass::cast((Klass*)k); + if (ik->is_anonymous()) { + return; + } + assert(!ik->is_anonymous(), "invariant"); + const Symbol* name = ik->name(); + if (name != NULL) { + write_text("Class Name: "); + write_text(name->as_klass_external_name()); + } + } +} + +void ObjectSampleDescription::write_thread_group_name() { + assert(_object->is_a(SystemDictionary::ThreadGroup_klass()), "invariant"); + const char* tg_name = NULL; + typeArrayOop name = java_lang_ThreadGroup::name(_object); + if (name != NULL) { + tg_name = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length()); + } + if (tg_name != NULL) { + write_text("Thread Group: "); + write_text(tg_name); + } +} + +void ObjectSampleDescription::write_thread_name() { + assert(_object->is_a(SystemDictionary::Thread_klass()), "invariant"); + oop name = java_lang_Thread::name(_object); + if (name != NULL) { + char* p = java_lang_String::as_utf8_string(name); + if (p != NULL) { + write_text("Thread Name: "); + write_text(p); + } + } +} + +void ObjectSampleDescription::write_size(jint size) { + if (size >= 0) { + write_text("Size: "); + write_int(size); + } +} + +bool ObjectSampleDescription::read_int_size(jint* result_size) { + fieldDescriptor fd; + Klass* klass = _object->klass(); + if (klass->oop_is_instance()) { + InstanceKlass* ik = InstanceKlass::cast(klass); + if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != NULL) { + jint size = _object->int_field(fd.offset()); + *result_size = size; + return true; + } + } + return false; +} diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.hpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0a19d47d61f0fede1b5f3ef25e8d6177b69388af --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleDescription.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP +#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP + +#define OBJECT_SAMPLE_DESCRIPTION_BUFFER_SIZE 100 + +#include "memory/allocation.hpp" + +class outputStream; + +class ObjectDescriptionBuilder : public StackObj { +private: + char _buffer[OBJECT_SAMPLE_DESCRIPTION_BUFFER_SIZE]; + size_t _index; + +public: + ObjectDescriptionBuilder(); + + void write_text(const char* text); + void write_int(jint value); + void reset(); + + void print_description(outputStream* out); + const char* description(); +}; + +class ObjectSampleDescription : public StackObj { +private: + ObjectDescriptionBuilder _description; + oop _object; + + void write_text(const char* text); + void write_int(jint value); + + void write_object_details(); + void write_size(jint size); + void write_thread_name(); + void write_thread_group_name(); + void write_class_name(); + void write_object_to_buffer(); + bool is_class(Symbol* s1, const char* s2); + void ensure_initialized(); + bool read_int_size(jint* result); + +public: + ObjectSampleDescription(oop object); + void print_description(outputStream* out); + const char* description(); +}; + +#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEDESCRIPTION_HPP diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..89bd58ea1da9f046b9a6d43f9e75d47b5409de15 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp @@ -0,0 +1,615 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSet.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSetWriter.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/edgeUtils.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp" +#include "jfr/leakprofiler/checkpoint/rootResolver.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/leakprofiler/utilities/rootType.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "tracefiles/traceTypes.hpp" +#include "utilities/growableArray.hpp" + +template +class ObjectSampleAuxInfo : public ResourceObj { + public: + Data _data; + traceid _id; + ObjectSampleAuxInfo() : _data(), _id(0) {} +}; + +class ObjectSampleArrayData { + public: + int _array_size; + int _array_index; + ObjectSampleArrayData() : _array_size(0), _array_index(0) {} +}; + +class ObjectSampleFieldInfo : public ResourceObj { + public: + const Symbol* _field_name_symbol; + jshort _field_modifiers; + ObjectSampleFieldInfo() : _field_name_symbol(NULL), _field_modifiers(0) {} +}; + +class ObjectSampleRootDescriptionData { + public: + const Edge* _root_edge; + const char* _description; + OldObjectRoot::System _system; + OldObjectRoot::Type _type; + ObjectSampleRootDescriptionData() : _root_edge(NULL), + _description(NULL), + _system(OldObjectRoot::_system_undetermined), + _type(OldObjectRoot::_type_undetermined) {} +}; + +class OldObjectSampleData { + public: + oop _object; + traceid _reference_id; +}; + +class ReferenceData { + public: + traceid _field_info_id; + traceid _array_info_id; + traceid _old_object_sample_id; + size_t _skip; +}; + +static int initial_storage_size = 16; + +template +class SampleSet : public ResourceObj { + private: + GrowableArray* _storage; + public: + SampleSet() : _storage(NULL) {} + + traceid store(Data data) { + assert(data != NULL, "invariant"); + if (_storage == NULL) { + _storage = new GrowableArray(initial_storage_size); + } + assert(_storage != NULL, "invariant"); + assert(_storage->find(data) == -1, "invariant"); + _storage->append(data); + return data->_id; + } + + size_t size() const { + return _storage != NULL ? (size_t)_storage->length() : 0; + } + + template + void iterate(Functor& functor) { + if (_storage != NULL) { + for (int i = 0; i < _storage->length(); ++i) { + functor(_storage->at(i)); + } + } + } + + const GrowableArray& storage() const { + return *_storage; + } +}; + +typedef ObjectSampleAuxInfo ObjectSampleArrayInfo; +typedef ObjectSampleAuxInfo ObjectSampleRootDescriptionInfo; +typedef ObjectSampleAuxInfo OldObjectSampleInfo; +typedef ObjectSampleAuxInfo ReferenceInfo; + +class FieldTable : public ResourceObj { + template class, + typename, + size_t> + friend class HashTableHost; + typedef HashTableHost FieldInfoTable; + public: + typedef FieldInfoTable::HashEntry FieldInfoEntry; + + private: + static traceid _field_id_counter; + FieldInfoTable* _table; + + void assign_id(FieldInfoEntry* entry) { + assert(entry != NULL, "invariant"); + entry->set_id(++_field_id_counter); + } + + bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) { + assert(hash == entry->hash(), "invariant"); + assert(query != NULL, "invariant"); + const ObjectSampleFieldInfo* stored = entry->literal(); + assert(stored != NULL, "invariant"); + assert(const_cast(stored->_field_name_symbol)->identity_hash() == const_cast(query->_field_name_symbol)->identity_hash(), "invariant"); + return stored->_field_modifiers == query->_field_modifiers; + } + + public: + FieldTable() : _table(new FieldInfoTable(this)) {} + ~FieldTable() { + assert(_table != NULL, "invariant"); + delete _table; + } + + traceid store(const ObjectSampleFieldInfo* field_info) { + assert(field_info != NULL, "invariant"); + const FieldInfoEntry& entry =_table->lookup_put(field_info, + ((Symbol*)(field_info->_field_name_symbol))->identity_hash()); + return entry.id(); + } + + size_t size() const { + return _table->cardinality(); + } + + template + void iterate(T& functor) const { + _table->iterate_entry(functor); + } +}; + +traceid FieldTable::_field_id_counter = 0; + +typedef SampleSet SampleInfo; +typedef SampleSet RefInfo; +typedef SampleSet ArrayInfo; +typedef SampleSet RootDescriptionInfo; + +static SampleInfo* sample_infos = NULL; +static RefInfo* ref_infos = NULL; +static ArrayInfo* array_infos = NULL; +static FieldTable* field_infos = NULL; +static RootDescriptionInfo* root_infos = NULL; + +int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) { + assert(writer != NULL, "invariant"); + assert(si != NULL, "invariant"); + const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si; + oop object = oosi->_data._object; + assert(object != NULL, "invariant"); + writer->write(oosi->_id); + writer->write((u8)(const HeapWord*)object); + writer->write(const_cast(object->klass())); + ObjectSampleDescription od(object); + writer->write(od.description()); + writer->write(oosi->_data._reference_id); + return 1; +} + +typedef JfrArtifactWriterImplHost SampleWriterImpl; +typedef JfrArtifactWriterHost SampleWriter; + +static void write_sample_infos(JfrCheckpointWriter& writer) { + if (sample_infos != NULL) { + SampleWriter sw(&writer, NULL, false); + sample_infos->iterate(sw); + } +} + +int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) { + assert(writer != NULL, "invariant"); + assert(ri != NULL, "invariant"); + const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri; + writer->write(ref_info->_id); + writer->write(ref_info->_data._array_info_id); + writer->write(ref_info->_data._field_info_id); + writer->write(ref_info->_data._old_object_sample_id); + writer->write((s4)ref_info->_data._skip); + return 1; +} + +typedef JfrArtifactWriterImplHost ReferenceWriterImpl; +typedef JfrArtifactWriterHost ReferenceWriter; + +static void write_reference_infos(JfrCheckpointWriter& writer) { + if (ref_infos != NULL) { + ReferenceWriter rw(&writer, NULL, false); + ref_infos->iterate(rw); + } +} + +int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) { + assert(writer != NULL, "invariant"); + assert(ai != NULL, "invariant"); + const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai; + writer->write(osai->_id); + writer->write(osai->_data._array_size); + writer->write(osai->_data._array_index); + return 1; +} + +static traceid get_array_info_id(const Edge& edge, traceid id) { + if (edge.is_root() || !EdgeUtils::is_array_element(edge)) { + return 0; + } + if (array_infos == NULL) { + array_infos = new ArrayInfo(); + } + assert(array_infos != NULL, "invariant"); + + ObjectSampleArrayInfo* const osai = new ObjectSampleArrayInfo(); + assert(osai != NULL, "invariant"); + osai->_id = id; + osai->_data._array_size = EdgeUtils::array_size(edge); + osai->_data._array_index = EdgeUtils::array_index(edge); + return array_infos->store(osai); +} + +typedef JfrArtifactWriterImplHost ArrayWriterImpl; +typedef JfrArtifactWriterHost ArrayWriter; + +static void write_array_infos(JfrCheckpointWriter& writer) { + if (array_infos != NULL) { + ArrayWriter aw(&writer, NULL, false); + array_infos->iterate(aw); + } +} + +int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) { + assert(writer != NULL, "invariant"); + assert(fi != NULL, "invariant"); + const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi; + writer->write(field_info_entry->id()); + const ObjectSampleFieldInfo* const osfi = field_info_entry->literal(); + writer->write(osfi->_field_name_symbol->as_C_string()); + writer->write(osfi->_field_modifiers); + return 1; +} + +static traceid get_field_info_id(const Edge& edge) { + if (edge.is_root()) { + return 0; + } + + assert(!EdgeUtils::is_array_element(edge), "invariant"); + const Symbol* const field_name_symbol = EdgeUtils::field_name_symbol(edge); + if (field_name_symbol == NULL) { + return 0; + } + + if (field_infos == NULL) { + field_infos = new FieldTable(); + } + assert(field_infos != NULL, "invariant"); + + ObjectSampleFieldInfo* const osfi = new ObjectSampleFieldInfo(); + assert(osfi != NULL, "invariant"); + osfi->_field_name_symbol = field_name_symbol; + osfi->_field_modifiers = EdgeUtils::field_modifiers(edge); + return field_infos->store(osfi); +} + +typedef JfrArtifactWriterImplHost FieldWriterImpl; +typedef JfrArtifactWriterHost FieldWriter; + +static void write_field_infos(JfrCheckpointWriter& writer) { + if (field_infos != NULL) { + FieldWriter fw(&writer, NULL, false); + field_infos->iterate(fw); + } +} + +static const char* description(const ObjectSampleRootDescriptionInfo* osdi) { + assert(osdi != NULL, "invariant"); + + if (osdi->_data._description == NULL) { + return NULL; + } + + ObjectDescriptionBuilder description; + if (osdi->_data._system == OldObjectRoot::_threads) { + description.write_text("Thread Name: "); + } + description.write_text(osdi->_data._description); + return description.description(); +} + +int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) { + assert(writer != NULL, "invariant"); + assert(di != NULL, "invariant"); + const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di; + writer->write(osdi->_id); + writer->write(description(osdi)); + writer->write(osdi->_data._system); + writer->write(osdi->_data._type); + return 1; +} + +static traceid get_root_description_info_id(const Edge& edge, traceid id) { + assert(edge.is_root(), "invariant"); + if (EdgeUtils::is_leak_edge(edge)) { + return 0; + } + + if (root_infos == NULL) { + root_infos = new RootDescriptionInfo(); + } + assert(root_infos != NULL, "invariant"); + ObjectSampleRootDescriptionInfo* const oodi = new ObjectSampleRootDescriptionInfo(); + oodi->_id = id; + oodi->_data._root_edge = &edge; + return root_infos->store(oodi); +} + +typedef JfrArtifactWriterImplHost RootDescriptionWriterImpl; +typedef JfrArtifactWriterHost RootDescriptionWriter; + + +int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) { + return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; +} + +int _root_desc_compare_(const ObjectSampleRootDescriptionInfo*const & lhs, const ObjectSampleRootDescriptionInfo* const& rhs) { + const uintptr_t lhs_ref = (uintptr_t)lhs->_data._root_edge->reference(); + const uintptr_t rhs_ref = (uintptr_t)rhs->_data._root_edge->reference(); + return _edge_reference_compare_(lhs_ref, rhs_ref); +} + +static int find_sorted(const RootCallbackInfo& callback_info, + const GrowableArray* arr, + int length, + bool& found) { + assert(arr != NULL, "invariant"); + assert(length >= 0, "invariant"); + assert(length <= arr->length(), "invariant"); + + found = false; + int min = 0; + int max = length; + while (max >= min) { + const int mid = (int)(((uint)max + min) / 2); + int diff = _edge_reference_compare_((uintptr_t)callback_info._high, + (uintptr_t)arr->at(mid)->_data._root_edge->reference()); + if (diff > 0) { + min = mid + 1; + } else if (diff < 0) { + max = mid - 1; + } else { + found = true; + return mid; + } + } + return min; +} + +class RootResolutionSet : public ResourceObj, public RootCallback { + private: + GrowableArray* _unresolved_roots; + + const uintptr_t high() const { + return (uintptr_t)_unresolved_roots->last()->_data._root_edge->reference(); + } + + const uintptr_t low() const { + return (uintptr_t)_unresolved_roots->first()->_data._root_edge->reference(); + } + + bool in_set_address_range(const RootCallbackInfo& callback_info) const { + assert(callback_info._low == NULL, "invariant"); + const uintptr_t addr = (uintptr_t)callback_info._high; + return low() <= addr && high() >= addr; + } + + int compare_to_range(const RootCallbackInfo& callback_info) const { + assert(callback_info._high != NULL, "invariant"); + assert(callback_info._low != NULL, "invariant"); + + for (int i = 0; i < _unresolved_roots->length(); ++i) { + const uintptr_t ref_addr = (uintptr_t)_unresolved_roots->at(i)->_data._root_edge->reference(); + if ((uintptr_t)callback_info._low <= ref_addr && (uintptr_t)callback_info._high >= ref_addr) { + return i; + } + } + return -1; + } + + int exact(const RootCallbackInfo& callback_info) const { + assert(callback_info._high != NULL, "invariant"); + assert(in_set_address_range(callback_info), "invariant"); + + bool found; + const int idx = find_sorted(callback_info, _unresolved_roots, _unresolved_roots->length(), found); + return found ? idx : -1; + } + + bool resolve_root(const RootCallbackInfo& callback_info, int idx) const { + assert(idx >= 0, "invariant"); + assert(idx < _unresolved_roots->length(), "invariant"); + + ObjectSampleRootDescriptionInfo* const desc = + const_cast(_unresolved_roots->at(idx)); + assert(desc != NULL, "invariant"); + assert((uintptr_t)callback_info._high == (uintptr_t)desc->_data._root_edge->reference(), "invariant"); + + desc->_data._system = callback_info._system; + desc->_data._type = callback_info._type; + + if (callback_info._system == OldObjectRoot::_threads) { + const JavaThread* jt = (const JavaThread*)callback_info._context; + assert(jt != NULL, "invariant"); + desc->_data._description = jt->name(); + } + + _unresolved_roots->remove_at(idx); + return _unresolved_roots->is_empty(); + } + + public: + RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(NULL) { + assert(info != NULL, "invariant"); + // construct a sorted copy + const GrowableArray& info_storage = info->storage(); + const int length = info_storage.length(); + _unresolved_roots = new GrowableArray(length); + assert(_unresolved_roots != NULL, "invariant"); + + for (int i = 0; i < length; ++i) { + _unresolved_roots->insert_sorted<_root_desc_compare_>(info_storage.at(i)); + } + } + + bool process(const RootCallbackInfo& callback_info) { + if (NULL == callback_info._low) { + if (in_set_address_range(callback_info)) { + const int idx = exact(callback_info); + return idx == -1 ? false : resolve_root(callback_info, idx); + } + return false; + } + assert(callback_info._low != NULL, "invariant"); + const int idx = compare_to_range(callback_info); + return idx == -1 ? false : resolve_root(callback_info, idx); + } + + int entries() const { + return _unresolved_roots->length(); + } + + const void* at(int idx) const { + assert(idx >= 0, "invariant"); + assert(idx < _unresolved_roots->length(), "invariant"); + return _unresolved_roots->at(idx)->_data._root_edge->reference(); + } +}; + +static void write_root_descriptors(JfrCheckpointWriter& writer) { + if (root_infos != NULL) { + // resolve roots + RootResolutionSet rrs(root_infos); + RootResolver::resolve(rrs); + // write roots + RootDescriptionWriter rw(&writer, NULL, false); + root_infos->iterate(rw); + } +} + +static void add_old_object_sample_info(const Edge* current, traceid id) { + assert(current != NULL, "invariant"); + if (sample_infos == NULL) { + sample_infos = new SampleInfo(); + } + assert(sample_infos != NULL, "invariant"); + OldObjectSampleInfo* const oosi = new OldObjectSampleInfo(); + assert(oosi != NULL, "invariant"); + oosi->_id = id; + oosi->_data._object = current->pointee(); + oosi->_data._reference_id = current->is_root() ? (traceid)0 : id; + sample_infos->store(oosi); +} + +static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) { + assert(current != NULL, "invariant"); + if (ref_infos == NULL) { + ref_infos = new RefInfo(); + } + + assert(ref_infos != NULL, "invariant"); + ReferenceInfo* const ri = new ReferenceInfo(); + assert(ri != NULL, "invariant"); + + ri->_id = id; + ri->_data._array_info_id = !current->is_skip_edge() ? get_array_info_id(*current, id) : 0; + ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? + get_field_info_id(*current) : (traceid)0; + ri->_data._old_object_sample_id = parent_id; + ri->_data._skip = current->skip_length(); + ref_infos->store(ri); +} + +static traceid add_root_info(const Edge* root, traceid id) { + assert(root != NULL, "invariant"); + assert(root->is_root(), "invariant"); + return get_root_description_info_id(*root, id); +} + +void ObjectSampleWriter::write(const RoutableEdge* edge) { + assert(edge != NULL, "invariant"); + const traceid id = _store->get_id(edge); + add_old_object_sample_info(edge, id); + const RoutableEdge* parent = edge->logical_parent(); + if (parent != NULL) { + add_reference_info(edge, id, _store->get_id(parent)); + } else { + assert(edge->is_root(), "invariant"); + add_root_info(edge, id); + } +} + +ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) : + _writer(writer), + _store(store) { + assert(store != NULL, "invariant"); + assert(store->number_of_entries() > 0, "invariant"); + sample_infos = NULL; + ref_infos = NULL; + array_infos = NULL; + field_infos = NULL; + root_infos = NULL; +} + +ObjectSampleWriter::~ObjectSampleWriter() { + write_sample_infos(_writer); + write_reference_infos(_writer); + write_array_infos(_writer); + write_field_infos(_writer); + write_root_descriptors(_writer); +} + +void ObjectSampleWriter::write_chain(const RoutableEdge& edge) { + assert(EdgeUtils::is_leak_edge(edge), "invariant"); + if (edge.processed()) { + return; + } + EdgeUtils::collapse_chain(edge); + const RoutableEdge* current = &edge; + while (current != NULL) { + if (current->processed()) { + return; + } + write(current); + current->set_processed(); + current = current->logical_parent(); + } +} + +bool ObjectSampleWriter::operator()(const RoutableEdge& edge) { + if (EdgeUtils::is_leak_edge(edge)) { + write_chain(edge); + } + return true; +} diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b382390bce2111ba7f552adf4d46beb626b65948 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP +#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP + +#include "memory/allocation.hpp" + +class Edge; +class EdgeStore; +class JfrCheckpointWriter; +class RoutableEdge; + +class ObjectSampleWriter : public StackObj { + private: + JfrCheckpointWriter& _writer; + const EdgeStore* const _store; + + void write(const RoutableEdge* edge); + void write_chain(const RoutableEdge& edge); + + public: + ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store); + ~ObjectSampleWriter(); + + bool operator()(const RoutableEdge& edge); +}; + +#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.cpp b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..93b33ea2b653691ee91d1b4826164f6a753b478f --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.cpp @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" +#include "jfr/leakprofiler/checkpoint/rootResolver.hpp" +#include "memory/iterator.hpp" +#include "oops/klass.hpp" +#include "oops/markOop.hpp" +#include "oops/oop.hpp" +#include "prims/jvmtiThreadState.hpp" +#include "prims/privilegedStack.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/vframe_hp.hpp" +#include "services/management.hpp" +#include "utilities/growableArray.hpp" + +class ReferenceLocateClosure : public OopClosure { + protected: + RootCallback& _callback; + RootCallbackInfo _info; + bool _complete; + + void do_oop_shared(const void* ref); + + public: + ReferenceLocateClosure(RootCallback& callback, + OldObjectRoot::System system, + OldObjectRoot::Type type, + const void* context) : _callback(callback), + _info(), + _complete(false) { + _info._high = NULL; + _info._low = NULL; + _info._system = system; + _info._type = type; + _info._context = context; + } + + virtual void do_oop(oop* ref); + virtual void do_oop(narrowOop* ref); + + bool complete() const { + return _complete; + } +}; + +void ReferenceLocateClosure::do_oop_shared(const void* ref) { + assert(ref != NULL, "invariant"); + if (!_complete) { + _info._high = ref; + _complete = _callback.process(_info); + } +} + +void ReferenceLocateClosure::do_oop(oop* ref) { + do_oop_shared(ref); +} + +void ReferenceLocateClosure::do_oop(narrowOop* ref) { + do_oop_shared(ref); +} + +class ReferenceToRootClosure : public StackObj { + private: + RootCallback& _callback; + RootCallbackInfo _info; + bool _complete; + + bool do_cldg_roots(); + bool do_object_synchronizer_roots(); + bool do_universe_roots(); + bool do_jni_handle_roots(); + bool do_jvmti_roots(); + bool do_system_dictionary_roots(); + bool do_management_roots(); + bool do_string_table_roots(); + + bool do_roots(); + + public: + ReferenceToRootClosure(RootCallback& callback) : _callback(callback), + _info(), + _complete(false) { + _info._high = NULL; + _info._low = NULL; + _info._context = NULL; + _info._system = OldObjectRoot::_system_undetermined; + _info._type = OldObjectRoot::_type_undetermined; + + assert_locked_or_safepoint(Threads_lock); + do_roots(); + } + + bool complete() const { + return _complete; + } +}; + +bool ReferenceToRootClosure::do_cldg_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL); + CLDToOopClosure cldt_closure(&rlc); + ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_object_synchronizer_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_object_synchronizer, OldObjectRoot::_type_undetermined, NULL); + ObjectSynchronizer::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_universe_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_universe, OldObjectRoot::_type_undetermined, NULL); + Universe::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_jni_handle_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_global_jni_handles, OldObjectRoot::_global_jni_handle, NULL); + JNIHandles::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_jvmti_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_jvmti, OldObjectRoot::_global_jni_handle, NULL); + JvmtiExport::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_system_dictionary_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_system_dictionary, OldObjectRoot::_type_undetermined, NULL); + SystemDictionary::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_management_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_management, OldObjectRoot::_type_undetermined, NULL); + Management::oops_do(&rlc); + return rlc.complete(); +} + +bool ReferenceToRootClosure::do_string_table_roots() { + assert(!complete(), "invariant"); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_string_table, OldObjectRoot::_type_undetermined, NULL); + StringTable::oops_do(&rlc); + return rlc.complete(); +} + + +bool ReferenceToRootClosure::do_roots() { + assert(!complete(), "invariant"); + assert(OldObjectRoot::_system_undetermined == _info._system, "invariant"); + assert(OldObjectRoot::_type_undetermined == _info._type, "invariant"); + + if (do_cldg_roots()) { + _complete = true; + return true; + } + + if (do_object_synchronizer_roots()) { + _complete = true; + return true; + } + + if (do_universe_roots()) { + _complete = true; + return true; + } + + if (do_jni_handle_roots()) { + _complete = true; + return true; + } + + if (do_jvmti_roots()) { + _complete = true; + return true; + } + + if (do_system_dictionary_roots()) { + _complete = true; + return true; + } + + if (do_management_roots()) { + _complete = true; + return true; + } + + if (do_string_table_roots()) { + _complete = true; + return true; + } + + + return false; +} + +class ReferenceToThreadRootClosure : public StackObj { + private: + RootCallback& _callback; + bool _complete; + + bool do_java_threads_oops(JavaThread* jt); + bool do_thread_roots(JavaThread* jt); + bool do_thread_stack_fast(JavaThread* jt); + bool do_thread_stack_detailed(JavaThread* jt); + bool do_thread_jni_handles(JavaThread* jt); + bool do_thread_handle_area(JavaThread* jt); + + public: + ReferenceToThreadRootClosure(RootCallback& callback) :_callback(callback), _complete(false) { + assert_locked_or_safepoint(Threads_lock); + JavaThread *jt = Threads::first(); + while (jt) { + if (do_thread_roots(jt)) { + return; + } + + jt = jt->next(); + } + } + + bool complete() const { + return _complete; + } +}; + +bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) { + assert(jt != NULL, "invariant"); + assert(!complete(), "invariant"); + ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_handle_area, jt); + jt->handle_area()->oops_do(&rcl); + return rcl.complete(); +} + +bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) { + assert(jt != NULL, "invariant"); + assert(!complete(), "invariant"); + + ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_local_jni_handle, jt); + jt->active_handles()->oops_do(&rcl); + return rcl.complete(); +} + +bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) { + assert(jt != NULL, "invariant"); + assert(!complete(), "invariant"); + + if (_callback.entries() == 0) { + _complete = true; + return true; + } + + RootCallbackInfo info; + info._high = NULL; + info._low = NULL; + info._context = jt; + info._system = OldObjectRoot::_threads; + info._type = OldObjectRoot::_stack_variable; + + for (int i = 0; i < _callback.entries(); ++i) { + const address adr = (address)_callback.at(i); + if (jt->is_in_usable_stack(adr)) { + info._high = adr; + _complete = _callback.process(info); + if (_complete) { + return true; + } + } + } + assert(!complete(), "invariant"); + return false; +} + +bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { + assert(jt != NULL, "invariant"); + assert(!complete(), "invariant"); + + ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt); + + if (jt->has_last_Java_frame()) { + PrivilegedElement* const pelem = jt->privileged_stack_top(); + if (pelem != NULL) { + pelem->oops_do(&rcl); + if (rcl.complete()) { + return true; + } + } + + // traverse the registered growable array gc_array + // can't do this as it is not reachable from outside + + // Traverse the monitor chunks + MonitorChunk* chunk = jt->monitor_chunks(); + for (; chunk != NULL; chunk = chunk->next()) { + chunk->oops_do(&rcl); + } + + if (rcl.complete()) { + return true; + } + + // Traverse the execution stack + for (StackFrameStream fst(jt); !fst.is_done(); fst.next()) { + fst.current()->oops_do(&rcl, NULL, NULL, fst.register_map()); + } + + } // last java frame + + if (rcl.complete()) { + return true; + } + + GrowableArray* const list = jt->deferred_locals(); + if (list != NULL) { + for (int i = 0; i < list->length(); i++) { + list->at(i)->oops_do(&rcl); + } + } + + if (rcl.complete()) { + return true; + } + + // Traverse instance variables at the end since the GC may be moving things + // around using this function + /* + * // can't reach these oop* from the outside + f->do_oop((oop*) &_threadObj); + f->do_oop((oop*) &_vm_result); + f->do_oop((oop*) &_exception_oop); + f->do_oop((oop*) &_pending_async_exception); + */ + + JvmtiThreadState* const jvmti_thread_state = jt->jvmti_thread_state(); + if (jvmti_thread_state != NULL) { + jvmti_thread_state->oops_do(&rcl); + } + + return rcl.complete(); +} + +bool ReferenceToThreadRootClosure::do_java_threads_oops(JavaThread* jt) { + assert(jt != NULL, "invariant"); + assert(!complete(), "invariant"); + + ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_global_jni_handle, jt); + jt->oops_do(&rcl, NULL, NULL); + return rcl.complete(); +} + +bool ReferenceToThreadRootClosure::do_thread_roots(JavaThread* jt) { + assert(jt != NULL, "invariant"); + + if (do_thread_stack_fast(jt)) { + _complete = true; + return true; + } + + if (do_thread_jni_handles(jt)) { + _complete = true; + return true; + } + + if (do_thread_handle_area(jt)) { + _complete = true; + return true; + } + + if (do_thread_stack_detailed(jt)) { + _complete = true; + return true; + } + + return false; +} + +class RootResolverMarkScope : public MarkingCodeBlobClosure::MarkScope { +}; + +void RootResolver::resolve(RootCallback& callback) { + + // Need to clear cld claim bit before starting + ClassLoaderDataGraph::clear_claimed_marks(); + RootResolverMarkScope mark_scope; + + // thread local roots + ReferenceToThreadRootClosure rtrc(callback); + if (rtrc.complete()) { + return; + } + // system global roots + ReferenceToRootClosure rrc(callback); +} diff --git a/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f5af570181a741918df40908fead26f15241cb3f --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP +#define SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP + +#include "memory/allocation.hpp" +#include "jfr/leakprofiler/utilities/rootType.hpp" +#include "oops/oopsHierarchy.hpp" + +struct RootCallbackInfo { + const void* _high; + const void* _low; + const void* _context; + OldObjectRoot::System _system; + OldObjectRoot::Type _type; +}; + +class RootCallback { + public: + virtual bool process(const RootCallbackInfo& info) = 0; + virtual int entries() const = 0; + virtual const void* at(int idx) const = 0; +}; + +class RootResolver : public AllStatic { + public: + static void resolve(RootCallback& callback); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP diff --git a/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp b/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f99921acbcb81076c6cc13b90762ded9d620091 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "gc_interface/collectedHeap.hpp" +#include "jfr/leakprofiler/utilities/granularTimer.hpp" +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeQueue.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/leakprofiler/emitEventOperation.hpp" +#include "jfr/leakprofiler/chains/bfsClosure.hpp" +#include "jfr/leakprofiler/chains/dfsClosure.hpp" +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" +#include "jfr/recorder/access/jfrbackend.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +//#include "logging/log.hpp" +#include "memory/resourceArea.hpp" +#include "memory/universe.hpp" +#include "oops/markOop.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/vmThread.hpp" +#include "trace/tracing.hpp" +#include "utilities/globalDefinitions.hpp" + +/* The EdgeQueue is backed by directly managed virtual memory. + * We will attempt to dimension an initial reservation + * in proportion to the size of the heap (represented by heap_region). + * Initial memory reservation: 5% of the heap OR at least 32 Mb + * Commit ratio: 1 : 10 (subject to allocation granularties) + */ +static size_t edge_queue_memory_reservation(const MemRegion& heap_region) { + const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M); + assert(memory_reservation_bytes >= (size_t)32*M, "invariant"); + return memory_reservation_bytes; +} + +static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) { + const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10; + assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant"); + return memory_commit_block_size_bytes; +} + +static void log_edge_queue_summary(const EdgeQueue& edge_queue) { + log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K); + log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top()); + log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K); + if (edge_queue.reserved_size() > 0) { + log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n", + ((double)edge_queue.live_set() / (double)edge_queue.reserved_size())); + } +} + +void EmitEventOperation::doit() { + assert(LeakProfiler::is_running(), "invariant"); + _object_sampler = LeakProfiler::object_sampler(); + assert(_object_sampler != NULL, "invariant"); + + _vm_thread = VMThread::vm_thread(); + assert(_vm_thread == Thread::current(), "invariant"); + _vm_thread_data = _vm_thread->trace_data(); + assert(_vm_thread_data != NULL, "invariant"); + assert(_vm_thread->trace_data()->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant"); + + // The VM_Operation::evaluate() which invoked doit() + // contains a top level ResourceMark + + // save the original markWord for the potential leak objects + // to be restored on function exit + ObjectSampleMarker marker; + if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) { + return; + } + + EdgeStore edge_store; + + GranularTimer::start(_cutoff_ticks, 1000000); + if (_cutoff_ticks <= 0) { + // no chains + write_events(&edge_store); + return; + } + + assert(_cutoff_ticks > 0, "invariant"); + + // The bitset used for marking is dimensioned as a function of the heap size + const MemRegion heap_region = Universe::heap()->reserved_region(); + BitSet mark_bits(heap_region); + + // The edge queue is dimensioned as a fraction of the heap size + const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region); + EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size)); + + // The initialize() routines will attempt to reserve and allocate backing storage memory. + // Failure to accommodate will render root chain processing impossible. + // As a fallback on failure, just write out the existing samples, flat, without chains. + if (!(mark_bits.initialize() && edge_queue.initialize())) { + log_warning(jfr)("Unable to allocate memory for root chain processing"); + write_events(&edge_store); + return; + } + + // necessary condition for attempting a root set iteration + Universe::heap()->ensure_parsability(false); + + RootSetClosure::add_to_queue(&edge_queue); + if (edge_queue.is_full()) { + // Pathological case where roots don't fit in queue + // Do a depth-first search, but mark roots first + // to avoid walking sideways over roots + DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits); + } else { + BFSClosure bfs(&edge_queue, &edge_store, &mark_bits); + bfs.process(); + } + GranularTimer::stop(); + write_events(&edge_store); + log_edge_queue_summary(edge_queue); +} + +int EmitEventOperation::write_events(EdgeStore* edge_store) { + assert(_object_sampler != NULL, "invariant"); + assert(edge_store != NULL, "invariant"); + assert(_vm_thread != NULL, "invariant"); + assert(_vm_thread_data != NULL, "invariant"); + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + + // save thread id in preparation for thread local trace data manipulations + const traceid vmthread_id = _vm_thread_data->thread_id(); + assert(_vm_thread_data->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant"); + + const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value(); + int count = 0; + + for (int i = 0; i < _object_sampler->item_count(); ++i) { + const ObjectSample* sample = _object_sampler->item_at(i); + if (sample->is_alive_and_older_than(last_sweep)) { + write_event(sample, edge_store); + ++count; + } + } + + // restore thread local stack trace and thread id + _vm_thread_data->set_thread_id(vmthread_id); + _vm_thread_data->clear_cached_stack_trace(); + assert(_vm_thread_data->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant"); + + if (count > 0) { + // serialize assoicated checkpoints + ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread); + } + return count; +} + +static int array_size(const oop object) { + assert(object != NULL, "invariant"); + if (object->is_array()) { + return arrayOop(object)->length(); + } + return -1; +} + +void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) { + assert(sample != NULL, "invariant"); + assert(!sample->is_dead(), "invariant"); + assert(edge_store != NULL, "invariant"); + assert(_vm_thread_data != NULL, "invariant"); + const oop* object_addr = sample->object_addr(); + assert(*object_addr != NULL, "invariant"); + + const Edge* edge = (const Edge*)(*object_addr)->mark(); + traceid gc_root_id = 0; + if (edge == NULL) { + // In order to dump out a representation of the event + // even though it was not reachable / too long to reach, + // we need to register a top level edge for this object + Edge e(NULL, object_addr); + edge_store->add_chain(&e, 1); + edge = (const Edge*)(*object_addr)->mark(); + } else { + gc_root_id = edge_store->get_root_id(edge); + } + + assert(edge != NULL, "invariant"); + assert(edge->pointee() == *object_addr, "invariant"); + const traceid object_id = edge_store->get_id(edge); + assert(object_id != 0, "invariant"); + + EventOldObjectSample e(UNTIMED); + e.set_starttime(GranularTimer::start_time()); + e.set_endtime(GranularTimer::end_time()); + e.set_allocationTime(sample->allocation_time()); + e.set_object(object_id); + e.set_arrayElements(array_size(*object_addr)); + e.set_root(gc_root_id); + + // Temporarily assigning both the stack trace id and thread id + // onto the thread local data structure of the VMThread (for the duration + // of the commit() call). This trick provides a means to override + // the event generation mechanism by injecting externally provided id's. + // Here, in particular, this allows us to emit an old object event + // supplying information from where the actual sampling occurred. + _vm_thread_data->set_cached_stack_trace_id(sample->stack_trace_id()); + assert(sample->has_thread(), "invariant"); + _vm_thread_data->set_thread_id(sample->thread_id()); + e.commit(); +} diff --git a/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp b/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b1c144e5b359c96ab3f353d8440d8460153c0f4e --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP +#define SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP + +#include "runtime/vm_operations.hpp" + +class BFSClosure; +class EdgeStore; +class EdgeQueue; +class JfrThreadData; +class ObjectSample; +class ObjectSampler; + +// Safepoint operation for emitting object sample events +class EmitEventOperation : public VM_Operation { + private: + jlong _cutoff_ticks; + bool _emit_all; + VMThread* _vm_thread; + JfrThreadData* _vm_thread_data; + ObjectSampler* _object_sampler; + + void write_event(const ObjectSample* sample, EdgeStore* edge_store); + int write_events(EdgeStore* edge_store); + + public: + EmitEventOperation(jlong cutoff_ticks, bool emit_all) : + _cutoff_ticks(cutoff_ticks), + _emit_all(emit_all), + _vm_thread(NULL), + _vm_thread_data(NULL), + _object_sampler(NULL) { + } + + VMOp_Type type() const { + return VMOp_GC_HeapInspection; + } + + Mode evaluation_mode() const { + return _safepoint; + } + + virtual void doit(); +}; + +#endif // SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP diff --git a/src/share/vm/jfr/leakprofiler/leakProfiler.cpp b/src/share/vm/jfr/leakprofiler/leakProfiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a8ad04e9326389e9481704622119f19841f055e4 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/leakProfiler.cpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/emitEventOperation.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/startOperation.hpp" +#include "jfr/leakprofiler/stopOperation.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "memory/iterator.hpp" +#include "oops/oop.hpp" +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.inline.hpp" +#include "runtime/thread.hpp" +#include "runtime/vmThread.hpp" +#include "utilities/ostream.hpp" + +// Only to be updated during safepoint +ObjectSampler* LeakProfiler::_object_sampler = NULL; + +static volatile jbyte suspended = 0; +bool LeakProfiler::start(jint sample_count) { + if (_object_sampler != NULL) { + // already started + return true; + } + // Allows user to disable leak profiler on command line by setting queue size to zero. + if (sample_count > 0) { + StartOperation op(sample_count); + VMThread::execute(&op); + return _object_sampler != NULL; + } + return false; +} + +bool LeakProfiler::stop() { + if (_object_sampler == NULL) { + // already stopped/not started + return true; + } + StopOperation op; + VMThread::execute(&op); + return _object_sampler == NULL; +} + +void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) { + if (!is_running()) { + return; + } + EmitEventOperation op(cutoff_ticks, emit_all); + VMThread::execute(&op); +} + +void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + assert(SafepointSynchronize::is_at_safepoint(), + "Leak Profiler::oops_do(...) may only be called during safepoint"); + + if (_object_sampler != NULL) { + _object_sampler->oops_do(is_alive, f); + } +} + +void LeakProfiler::sample(HeapWord* object, + size_t size, + JavaThread* thread) { + assert(is_running(), "invariant"); + assert(thread != NULL, "invariant"); + assert(thread->thread_state() == _thread_in_vm, "invariant"); + + // exclude compiler threads and code sweeper thread + if (thread->is_hidden_from_external_view()) { + return; + } + + _object_sampler->add(object, size, thread); +} + +ObjectSampler* LeakProfiler::object_sampler() { + assert(is_suspended() || SafepointSynchronize::is_at_safepoint(), + "Leak Profiler::object_sampler() may only be called during safepoint"); + return _object_sampler; +} + +void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) { + assert(SafepointSynchronize::is_at_safepoint(), + "Leak Profiler::set_object_sampler() may only be called during safepoint"); + _object_sampler = object_sampler; +} + +bool LeakProfiler::is_running() { + return _object_sampler != NULL && !suspended; +} + +bool LeakProfiler::is_suspended() { + return _object_sampler != NULL && suspended; +} + +void LeakProfiler::resume() { + assert(is_suspended(), "invariant"); + OrderAccess::storestore(); + Atomic::store((jbyte)0, &suspended); + assert(is_running(), "invariant"); +} + +void LeakProfiler::suspend() { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(_object_sampler != NULL, "invariant"); + assert(!is_suspended(), "invariant"); + suspended = (jbyte)1; // safepoint visible +} diff --git a/src/share/vm/jfr/leakprofiler/leakProfiler.hpp b/src/share/vm/jfr/leakprofiler/leakProfiler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..173849a1611d731f978a682d8c9d021faedab4fb --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/leakProfiler.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP +#define SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP + +#include "jfr/utilities/jfrTraceTime.hpp" +#include "memory/allocation.hpp" + +class BoolObjectClosure; +class ObjectSampler; +class OopClosure; +class Thread; + +class LeakProfiler : public AllStatic { + friend class ClassUnloadConstantSet; + friend class ConstantSet; + friend class ObjectSampleCheckpoint; + friend class StartOperation; + friend class StopOperation; + friend class EmitEventOperation; + friend class WriteObjectSampleStacktrace; + + private: + static ObjectSampler* _object_sampler; + static JfrTraceTime _last_sweep; + + static void set_object_sampler(ObjectSampler* object_sampler); + static ObjectSampler* object_sampler(); + + static void suspend(); + static void resume(); + static bool is_suspended(); + + public: + static bool start(jint sample_count); + static bool stop(); + static void emit_events(jlong cutoff_ticks, bool emit_all); + static bool is_running(); + + static void sample(HeapWord* object, size_t size, JavaThread* thread); + + // Called by GC + static void oops_do(BoolObjectClosure* is_alive, OopClosure* f); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_LEAKPROFILER_HPP diff --git a/src/share/vm/jfr/leakprofiler/sampling/objectSample.hpp b/src/share/vm/jfr/leakprofiler/sampling/objectSample.hpp new file mode 100644 index 0000000000000000000000000000000000000000..117983c414844e2a1fe787977e8a58e653814b17 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSample.hpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP + +#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp" +#include "jfr/utilities/jfrAllocation.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "memory/allocation.hpp" +#include "oops/oop.hpp" +#include "trace/tracing.hpp" +#include "utilities/ticks.hpp" +/* + * Handle for diagnosing Java memory leaks. + * + * The class tracks the time the object was + * allocated, the thread and the stack trace. + */ +class ObjectSample : public JfrCHeapObj { + friend class ObjectSampler; + friend class SampleList; + private: + ObjectSample* _next; + ObjectSample* _previous; + JfrCheckpointBlobHandle _thread_cp; + JfrCheckpointBlobHandle _klass_cp; + oop _object; + Ticks _allocation_time; + traceid _stack_trace_id; + traceid _thread_id; + int _index; + size_t _span; + size_t _allocated; + unsigned int _stack_trace_hash; + bool _dead; + + void set_dead() { + _dead = true; + } + + void release_references() { + if (_thread_cp.valid()) { + _thread_cp.~JfrCheckpointBlobHandle(); + } + if (_klass_cp.valid()) { + _klass_cp.~JfrCheckpointBlobHandle(); + } + } + + void reset() { + set_stack_trace_id(0); + set_stack_trace_hash(0), + release_references(); + _dead = false; + } + + public: + ObjectSample() : _next(NULL), + _previous(NULL), + _thread_cp(), + _klass_cp(), + _object(NULL), + _allocation_time(0), + _stack_trace_id(0), + _thread_id(0), + _index(0), + _span(0), + _allocated(0), + _stack_trace_hash(0), + _dead(false) {} + + ObjectSample* next() const { + return _next; + } + + void set_next(ObjectSample* next) { + _next = next; + } + + ObjectSample* prev() const { + return _previous; + } + + void set_prev(ObjectSample* prev) { + _previous = prev; + } + + bool is_dead() const { + return _dead; + } + + const oop object() const { + return _object; + } + + const oop* object_addr() const { + return &_object; + } + + void set_object(oop object) { + _object = object; + } + + const Klass* klass() const { + assert(_object != NULL, "invariant"); + return _object->klass(); + } + + int index() const { + return _index; + } + + void set_index(int index) { + _index = index; + } + + size_t span() const { + return _span; + } + + void set_span(size_t span) { + _span = span; + } + + void add_span(size_t span) { + _span += span; + } + + size_t allocated() const { + return _allocated; + } + + void set_allocated(size_t size) { + _allocated = size; + } + + const Ticks& allocation_time() const { + return _allocation_time; + } + + const void set_allocation_time(const JfrTraceTime& time) { + _allocation_time = Ticks(time.value()); + } + + bool has_stack_trace() const { + return stack_trace_id() != 0; + } + + traceid stack_trace_id() const { + return _stack_trace_id; + } + + void set_stack_trace_id(traceid id) { + _stack_trace_id = id; + } + + unsigned int stack_trace_hash() const { + return _stack_trace_hash; + } + + void set_stack_trace_hash(unsigned int hash) { + _stack_trace_hash = hash; + } + + bool has_thread() const { + return _thread_id != 0; + } + + traceid thread_id() const { + return _thread_id; + } + + void set_thread_id(traceid id) { + _thread_id = id; + } + + bool is_alive_and_older_than(jlong time_stamp) const { + return !is_dead() && (JfrTraceTime::is_ft_enabled() ? + _allocation_time.ft_value() : _allocation_time.value()) < time_stamp; + } + + const JfrCheckpointBlobHandle& thread_checkpoint() const { + return _thread_cp; + } + + bool has_thread_checkpoint() const { + return _thread_cp.valid(); + } + + // JfrCheckpointBlobHandle assignment operator + // maintains proper reference counting + void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) { + if (_thread_cp != ref) { + _thread_cp = ref; + } + } + + const JfrCheckpointBlobHandle& klass_checkpoint() const { + return _klass_cp; + } + + bool has_klass_checkpoint() const { + return _klass_cp.valid(); + } + + void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) { + if (_klass_cp != ref) { + if (_klass_cp.valid()) { + _klass_cp->set_next(ref); + return; + } + _klass_cp = ref; + } + } +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP diff --git a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..71e875183a35ce7687eef235eca65334b726d955 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "jfr/recorder/jfrEventSetting.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/leakprofiler/sampling/sampleList.hpp" +#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp" +#include "jfr/utilities/jfrTryLock.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/thread.hpp" +#include "trace/tracing.hpp" + +ObjectSampler::ObjectSampler(size_t size) : + _priority_queue(new SamplePriorityQueue(size)), + _list(new SampleList(size)), + _last_sweep(Tracing::time()), + _total_allocated(0), + _threshold(0), + _size(size), + _tryLock(0), + _dead_samples(false) {} + +ObjectSampler::~ObjectSampler() { + delete _priority_queue; + _priority_queue = NULL; + delete _list; + _list = NULL; +} + +void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) { + assert(thread != NULL, "invariant"); + const traceid thread_id = thread->threadObj() != NULL ? thread->trace_data()->thread_id() : 0; + if (thread_id == 0) { + return; + } + assert(thread_id != 0, "invariant"); + + if (!thread->trace_data()->has_thread_checkpoint()) { + JfrCheckpointManager::create_thread_checkpoint(thread); + assert(thread->trace_data()->has_thread_checkpoint(), "invariant"); + } + + traceid stack_trace_id = 0; + unsigned int stack_trace_hash = 0; + if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { + stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash); + thread->trace_data()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash); + } + + const JfrTraceTime allocation_time = Tracing::time(); + + JfrTryLock tryLock(&_tryLock); + if (!tryLock.has_lock()) { + log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention"); + return; + } + + if (_dead_samples) { + scavenge(); + assert(!_dead_samples, "invariant"); + } + + _total_allocated += allocated; + const size_t span = _total_allocated - _priority_queue->total(); + ObjectSample* sample; + if ((size_t)_priority_queue->count() == _size) { + assert(_list->count() == _size, "invariant"); + const ObjectSample* peek = _priority_queue->peek(); + if (peek->span() > span) { + // quick reject, will not fit + return; + } + sample = _list->reuse(_priority_queue->pop()); + } else { + sample = _list->get(); + } + + assert(sample != NULL, "invariant"); + assert(thread_id != 0, "invariant"); + sample->set_thread_id(thread_id); + sample->set_thread_checkpoint(thread->trace_data()->thread_checkpoint()); + + if (stack_trace_id != 0) { + sample->set_stack_trace_id(stack_trace_id); + sample->set_stack_trace_hash(stack_trace_hash); + } + + sample->set_span(allocated); + sample->set_object((oop)obj); + sample->set_allocated(allocated); + sample->set_allocation_time(allocation_time); + _priority_queue->push(sample); +} + +const ObjectSample* ObjectSampler::last() const { + return _list->last(); +} + +const ObjectSample* ObjectSampler::last_resolved() const { + return _list->last_resolved(); +} + +void ObjectSampler::set_last_resolved(const ObjectSample* sample) { + _list->set_last_resolved(sample); +} + +void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + ObjectSample* current = _list->last(); + while (current != NULL) { + ObjectSample* next = current->next(); + if (!current->is_dead()) { + if (is_alive->do_object_b(current->object())) { + // The weakly referenced object is alive, update pointer + f->do_oop(const_cast(current->object_addr())); + } else { + current->set_dead(); + _dead_samples = true; + } + } + current = next; + } + _last_sweep = Tracing::time(); +} + +void ObjectSampler::remove_dead(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + assert(sample->is_dead(), "invariant"); + ObjectSample* const previous = sample->prev(); + // push span on to previous + if (previous != NULL) { + _priority_queue->remove(previous); + previous->add_span(sample->span()); + _priority_queue->push(previous); + } + _priority_queue->remove(sample); + _list->release(sample); +} + +void ObjectSampler::scavenge() { + ObjectSample* current = _list->last(); + while (current != NULL) { + ObjectSample* next = current->next(); + if (current->is_dead()) { + remove_dead(current); + } + current = next; + } + _dead_samples = false; +} + +int ObjectSampler::item_count() const { + return _priority_queue->count(); +} + +const ObjectSample* ObjectSampler::item_at(int index) const { + return _priority_queue->item_at(index); +} + +ObjectSample* ObjectSampler::item_at(int index) { + return const_cast( + const_cast(this)->item_at(index) + ); +} + +const JfrTraceTime& ObjectSampler::last_sweep() const { + return _last_sweep; +} diff --git a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..478816dc239c249f56282d95ca59dcc3719e3daf --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP +#define SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP + +#include "memory/allocation.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" + +class BoolObjectClosure; +class OopClosure; +class ObjectSample; +class ObjectSampler; +class SampleList; +class SamplePriorityQueue; +class Thread; + +// Class reponsible for holding samples and +// making sure the samples are evenly distributed as +// new entries are added and removed. +class ObjectSampler : public CHeapObj { + friend class LeakProfiler; + friend class ObjectSampleCheckpoint; + friend class StartOperation; + friend class StopOperation; + friend class EmitEventOperation; + private: + SamplePriorityQueue* _priority_queue; + SampleList* _list; + JfrTraceTime _last_sweep; + size_t _total_allocated; + size_t _threshold; + size_t _size; + volatile int _tryLock; + bool _dead_samples; + + explicit ObjectSampler(size_t size); + ~ObjectSampler(); + + void add(HeapWord* object, size_t size, JavaThread* thread); + void remove_dead(ObjectSample* sample); + void scavenge(); + + // Called by GC + void oops_do(BoolObjectClosure* is_alive, OopClosure* f); + + public: + const ObjectSample* item_at(int index) const; + ObjectSample* item_at(int index); + int item_count() const; + const ObjectSample* last() const; + const ObjectSample* last_resolved() const; + void set_last_resolved(const ObjectSample* sample); + const JfrTraceTime& last_sweep() const; +}; + +#endif // SHARE_VM_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP diff --git a/src/share/vm/jfr/leakprofiler/sampling/sampleList.cpp b/src/share/vm/jfr/leakprofiler/sampling/sampleList.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0b5467b3aa4c67240e13b530c6071a3fc8695d33 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/sampleList.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/sampleList.hpp" +#include "oops/oop.inline.hpp" + +SampleList::SampleList(size_t limit, size_t cache_size) : + _free_list(), + _in_use_list(), + _last_resolved(NULL), + _limit(limit), + _cache_size(cache_size), + _allocated(0) { +} + +SampleList::~SampleList() { + deallocate_samples(_free_list); + deallocate_samples(_in_use_list); +} + +ObjectSample* SampleList::last() const { + return _in_use_list.head(); +} + +const ObjectSample* SampleList::last_resolved() const { + return _last_resolved; +} + +void SampleList::set_last_resolved(const ObjectSample* sample) { + assert(last() == sample, "invariant"); + _last_resolved = sample; +} + +void SampleList::link(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + _in_use_list.prepend(sample); +} + +void SampleList::unlink(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (_last_resolved == sample) { + _last_resolved = sample->next(); + } + reset(_in_use_list.remove(sample)); +} + +ObjectSample* SampleList::reuse(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + unlink(sample); + link(sample); + return sample; +} + +void SampleList::populate_cache() { + if (_free_list.count() < _cache_size) { + const size_t cache_delta = _cache_size - _free_list.count(); + for (size_t i = 0; i < cache_delta; ++i) { + ObjectSample* sample = newSample(); + if (sample != NULL) { + _free_list.append(sample); + } + } + } +} + +ObjectSample* SampleList::newSample() const { + if (_limit == _allocated) { + return NULL; + } + ++_allocated; + return new ObjectSample(); +} + +ObjectSample* SampleList::get() { + ObjectSample* sample = _free_list.head(); + if (sample != NULL) { + link(_free_list.remove(sample)); + } else { + sample = newSample(); + if (sample != NULL) { + _in_use_list.prepend(sample); + } + } + if (_cache_size > 0 && sample != NULL) { + populate_cache(); + } + return sample; +} + +void SampleList::release(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + unlink(sample); + _free_list.append(sample); +} + +void SampleList::deallocate_samples(List& list) { + if (list.count() > 0) { + ObjectSample* sample = list.head(); + while (sample != NULL) { + list.remove(sample); + delete sample; + sample = list.head(); + } + } + assert(list.count() == 0, "invariant"); +} + +void SampleList::reset(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + sample->reset(); +} + +bool SampleList::is_full() const { + return _in_use_list.count() == _limit; +} + +size_t SampleList::count() const { + return _in_use_list.count(); +} diff --git a/src/share/vm/jfr/leakprofiler/sampling/sampleList.hpp b/src/share/vm/jfr/leakprofiler/sampling/sampleList.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6264ad6d56d696f767dbbc99ed83e6aaa5baccc8 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/sampleList.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP +#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP +#include "jfr/utilities/jfrAllocation.hpp" +#include "jfr/utilities/jfrDoublyLinkedList.hpp" + +class ObjectSample; + +class SampleList : public JfrCHeapObj { + typedef JfrDoublyLinkedList List; + private: + List _free_list; + List _in_use_list; + const ObjectSample* _last_resolved; + mutable size_t _allocated; + const size_t _limit; + const size_t _cache_size; + + void populate_cache(); + ObjectSample* newSample() const; + void link(ObjectSample* sample); + void unlink(ObjectSample* sample); + void deallocate_samples(List& list); + void reset(ObjectSample* sample); + + public: + SampleList(size_t limit, size_t cache_size = 0); + ~SampleList(); + + void set_last_resolved(const ObjectSample* sample); + ObjectSample* get(); + ObjectSample* last() const; + void release(ObjectSample* sample); + const ObjectSample* last_resolved() const; + ObjectSample* reuse(ObjectSample* sample); + bool is_full() const; + size_t count() const; +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLELIST_HPP diff --git a/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.cpp b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c6020b079b6464c12047722bf67950d488c2856 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/allocation.hpp" +#include "oops/oop.inline.hpp" + +SamplePriorityQueue::SamplePriorityQueue(size_t size) : + _allocated_size(size), + _count(0), + _total(0) { + _items = NEW_C_HEAP_ARRAY(ObjectSample*, size, mtTracing); + memset(_items, 0, sizeof(ObjectSample*) * size); +} + +SamplePriorityQueue::~SamplePriorityQueue() { + FREE_C_HEAP_ARRAY(ObjectSample*, _items, mtTracing); + _items = NULL; +} + +void SamplePriorityQueue::push(ObjectSample* item) { + assert(item != NULL, "invariant"); + assert(_items[_count] == NULL, "invariant"); + + _items[_count] = item; + _items[_count]->set_index(_count); + _count++; + moveUp(_count - 1); + _total += item->span(); +} + +size_t SamplePriorityQueue::total() const { + return _total; +} + +ObjectSample* SamplePriorityQueue::pop() { + if (_count == 0) { + return NULL; + } + + ObjectSample* const s = _items[0]; + assert(s != NULL, "invariant"); + swap(0, _count - 1); + _count--; + assert(s == _items[_count], "invariant"); + // clear from heap + _items[_count] = NULL; + moveDown(0); + _total -= s->span(); + return s; +} + +void SamplePriorityQueue::swap(int i, int j) { + ObjectSample* tmp = _items[i]; + _items[i] = _items[j]; + _items[j] = tmp; + _items[i]->set_index(i); + _items[j]->set_index(j); +} + +static int left(int i) { + return 2 * i + 1; +} + +static int right(int i) { + return 2 * i + 2; +} + +static int parent(int i) { + return (i - 1) / 2; +} + +void SamplePriorityQueue::moveDown(int i) { + do { + int j = -1; + int r = right(i); + if (r < _count && _items[r]->span() < _items[i]->span()) { + int l = left(i); + if (_items[l]->span() < _items[r]->span()) { + j = l; + } else { + j = r; + } + } else { + int l = left(i); + if (l < _count && _items[l]->span() < _items[i]->span()) { + j = l; + } + } + if (j >= 0) { + swap(i, j); + } + i = j; + } while (i >= 0); + +} + +void SamplePriorityQueue::moveUp(int i) { + int p = parent(i); + while (i > 0 && _items[i]->span() < _items[p]->span()) { + swap(i,p); + i = p; + p = parent(i); + } +} + +void SamplePriorityQueue::remove(ObjectSample* s) { + assert(s != NULL, "invariant"); + const size_t realSpan = s->span(); + s->set_span(0); + moveUp(s->index()); + s->set_span(realSpan); + pop(); +} + +int SamplePriorityQueue::count() const { + return _count; +} + +const ObjectSample* SamplePriorityQueue::peek() const { + return _count == 0 ? NULL : _items[0]; +} + +ObjectSample* SamplePriorityQueue::item_at(int index) { + assert(index >= 0 && index < _count, "out of range"); + return _items[index]; +} diff --git a/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.hpp b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5ca2753cccc304c4de98ceff894358a1343b5cdc --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/sampling/samplePriorityQueue.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP + +#include "memory/allocation.hpp" + +class ObjectSample; + +// Priority queue that keeps object samples ordered +// by the amount of allocation they span. +class SamplePriorityQueue : public CHeapObj { + private: + ObjectSample** _items; + size_t _allocated_size; + int _count; + size_t _total; + + void swap(int i, int j); + void moveDown(int index); + void moveUp(int index); + + public: + SamplePriorityQueue(size_t size); + ~SamplePriorityQueue(); + + void push(ObjectSample* sample); + ObjectSample* pop(); + const ObjectSample* peek() const; + void remove(ObjectSample* sample); + ObjectSample* item_at(int index); + size_t total() const; + int count() const; +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_SAMPLING_SAMPLEPRIORITYQUEUE_HPP diff --git a/src/share/vm/jfr/leakprofiler/startOperation.hpp b/src/share/vm/jfr/leakprofiler/startOperation.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c44d7f3a8482adc8cc14ae14fb046cf3880a01b0 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/startOperation.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP +#define SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP + +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "runtime/vm_operations.hpp" + +// Safepoint operation for starting leak profiler object sampler +class StartOperation : public VM_Operation { + private: + jlong _sample_count; + public: + StartOperation(jlong sample_count) : + _sample_count(sample_count) { + } + + Mode evaluation_mode() const { + return _safepoint; + } + + VMOp_Type type() const { + return VMOp_GC_HeapInspection; + } + + virtual void doit() { + assert(!LeakProfiler::is_running(), "invariant"); + jint queue_size = JfrOptionSet::old_object_queue_size(); + LeakProfiler::set_object_sampler(new ObjectSampler(queue_size)); + log_trace(jfr, system)( "Object sampling started"); + } +}; + +#endif // SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP diff --git a/src/share/vm/jfr/leakprofiler/stopOperation.hpp b/src/share/vm/jfr/leakprofiler/stopOperation.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b8e832d6ea4fe92cdad409a8abe7d57f5f878583 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/stopOperation.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP +#define SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP + +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "runtime/vm_operations.hpp" + +// Safepoint operation for stopping leak profiler object sampler +class StopOperation : public VM_Operation { + public: + StopOperation() {} + + Mode evaluation_mode() const { + return _safepoint; + } + + VMOp_Type type() const { + return VMOp_GC_HeapInspection; + } + + virtual void doit() { + assert(LeakProfiler::is_running(), "invariant"); + ObjectSampler* object_sampler = LeakProfiler::object_sampler(); + delete object_sampler; + LeakProfiler::set_object_sampler(NULL); + log_trace(jfr, system)( "Object sampling stopped"); + } +}; + +#endif // SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP diff --git a/src/share/vm/jfr/leakprofiler/utilities/granularTimer.cpp b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b0f1c96e3814a6c7970e32cc93cc2ac7a72c98ae --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/utilities/granularTimer.hpp" +#include "trace/traceBackend.hpp" + +long GranularTimer::_granularity = 0; +long GranularTimer::_counter = 0; +JfrTraceTime GranularTimer::_finish_time_ticks = 0; +JfrTraceTime GranularTimer::_start_time_ticks = 0; +bool GranularTimer::_finished = false; + +void GranularTimer::start(jlong duration_ticks, long granularity) { + assert(granularity > 0, "granularity must be at least 1"); + _granularity = granularity; + _counter = granularity; + _start_time_ticks = JfrTraceTime::now(); + const jlong end_time_ticks = _start_time_ticks.value() + duration_ticks; + _finish_time_ticks = end_time_ticks < 0 ? JfrTraceTime(max_jlong) : JfrTraceTime(end_time_ticks); + _finished = _finish_time_ticks == _start_time_ticks; + assert(_finish_time_ticks >= 0, "invariant"); + assert(_finish_time_ticks >= _start_time_ticks, "invariant"); +} +void GranularTimer::stop() { + if (!_finished) { + _finish_time_ticks = JfrTraceTime::now(); + } +} +const JfrTraceTime& GranularTimer::start_time() { + return _start_time_ticks; +} + +const JfrTraceTime& GranularTimer::end_time() { + return _finish_time_ticks; +} + +bool GranularTimer::is_finished() { + assert(_granularity != 0, "GranularTimer::is_finished must be called after GranularTimer::start"); + if (--_counter == 0) { + if (_finished) { + // reset so we decrease to zero at next iteration + _counter = 1; + return true; + } + if (JfrTraceTime::now() > _finish_time_ticks) { + _finished = true; + _counter = 1; + return true; + } + assert(_counter == 0, "invariant"); + _counter = _granularity; // restore next batch + } + return false; +} diff --git a/src/share/vm/jfr/leakprofiler/utilities/granularTimer.hpp b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0cc761b549b186e8523550e2477d61ca48fcc1ba --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/granularTimer.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP +#define SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP + +#include "jfr/utilities/jfrTraceTime.hpp" +#include "memory/allocation.hpp" + +class GranularTimer : public AllStatic { + private: + static JfrTraceTime _finish_time_ticks; + static JfrTraceTime _start_time_ticks; + static long _counter; + static long _granularity; + static bool _finished; + public: + static void start(jlong duration_ticks, long granularity); + static void stop(); + static const JfrTraceTime& start_time(); + static const JfrTraceTime& end_time(); + static bool is_finished(); +}; + +#endif // SHARE_VM_LEAKPROFILER_UTILITIES_GRANULARTIMER_HPP diff --git a/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.cpp b/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8da050bb3223417b3b0d31fda8f6c71c63842c1c --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/utilities/objectSampleAssistance.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "runtime/thread.hpp" + +ObjectSampleAssistance::ObjectSampleAssistance(HeapWord* obj, size_t alloc_size, Thread* thread) : _trace_data(NULL) { + if (LeakProfiler::is_running()) { + assert(thread->is_Java_thread(), "invariant"); + _trace_data = thread->trace_data(); + LeakProfiler::sample(obj, alloc_size, (JavaThread*)thread); + } +} + +ObjectSampleAssistance::~ObjectSampleAssistance() { + if (_trace_data != NULL) { + _trace_data->clear_cached_stack_trace(); + } +} diff --git a/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.hpp b/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c33362bd2320594e4b2ca2e45585eaa892f26fd8 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/objectSampleAssistance.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_UTILITIES_OBJECTSAMPLEASSISTANCE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_UTILITIES_OBJECTSAMPLEASSISTANCE_HPP + +#include "memory/allocation.hpp" + +class JfrThreadData; + +class ObjectSampleAssistance : public StackObj { + private: + JfrThreadData* _trace_data; + public: + ObjectSampleAssistance(HeapWord* obj, size_t alloc_size, Thread* thread); + ~ObjectSampleAssistance(); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_UTILITIES_OBJECTSAMPLEASSISTANCE_HPP diff --git a/src/share/vm/jfr/leakprofiler/utilities/rootType.hpp b/src/share/vm/jfr/leakprofiler/utilities/rootType.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b8ccff3710af1f572abfd2f86fb1232f3f4597e3 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/rootType.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP +#define SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class OldObjectRoot : public AllStatic { + public: + enum System { + _system_undetermined, + _universe, + _global_jni_handles, + _threads, + _object_synchronizer, + _system_dictionary, + _class_loader_data, + _management, + _jvmti, + _code_cache, + _string_table, + _aot, + _number_of_systems + }; + + enum Type { + _type_undetermined, + _stack_variable, + _local_jni_handle, + _global_jni_handle, + _handle_area, + _number_of_types + }; + + static const char* system_description(System system) { + switch (system) { + case _system_undetermined: + return ""; + case _universe: + return "Universe"; + case _global_jni_handles: + return "Global JNI Handles"; + case _threads: + return "Threads"; + case _object_synchronizer: + return "Object Monitor"; + case _system_dictionary: + return "System Dictionary"; + case _class_loader_data: + return "Class Loader Data"; + case _management: + return "Management"; + case _jvmti: + return "JVMTI"; + case _code_cache: + return "Code Cache"; + case _string_table: + return "String Table"; + case _aot: + return "AOT"; + default: + ShouldNotReachHere(); + } + return NULL; + } + + static const char* type_description(Type type) { + switch (type) { + case _type_undetermined: + return ""; + case _stack_variable: + return "Stack Variable"; + case _local_jni_handle: + return "Local JNI Handle"; + case _global_jni_handle: + return "Global JNI Handle"; + case _handle_area: + return "Handle Area"; + default: + ShouldNotReachHere(); + } + return NULL; + } +}; + +#endif // SHARE_VM_LEAKPROFILER_UTILITIES_ROOTTYPE_HPP diff --git a/src/share/vm/jfr/leakprofiler/utilities/saveRestore.cpp b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9db8860460c706b0a893bc1ebe6929f5ca3c5d90 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" +#include "jfr/leakprofiler/utilities/saveRestore.hpp" + +MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {} + +MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) { + assert(_obj->mark() == _mark_oop, "invariant"); + // now we will "poison" the mark word of the object + // to the intermediate monitor INFLATING state. + // This is an "impossible" state during a safepoint, + // hence we will use it to quickly identify objects + // during the reachability search from gc roots. + assert(NULL == markOopDesc::INFLATING(), "invariant"); + _obj->set_mark(markOopDesc::INFLATING()); + assert(NULL == obj->mark(), "invariant"); +} + +MarkOopContext::~MarkOopContext() { + if (_obj != NULL) { + _obj->set_mark(_mark_oop); + assert(_obj->mark() == _mark_oop, "invariant"); + } +} + +MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) { + swap(const_cast(rhs)); +} + +void MarkOopContext::operator=(MarkOopContext rhs) { + swap(rhs); +} + +void MarkOopContext::swap(MarkOopContext& rhs) { + oop temp_obj = rhs._obj; + markOop temp_mark_oop = rhs._mark_oop; + rhs._obj = _obj; + rhs._mark_oop = _mark_oop; + _obj = temp_obj; + _mark_oop = temp_mark_oop; +} + +CLDClaimContext::CLDClaimContext() : _cld(NULL) {} + +CLDClaimContext::CLDClaimContext(ClassLoaderData* cld) : _cld(cld) { + assert(_cld->claimed(), "invariant"); + _cld->clear_claimed(); +} + +CLDClaimContext::~CLDClaimContext() { + if (_cld != NULL) { + assert(!_cld->claimed(), "invariant"); + _cld->claim(); + assert(_cld->claimed(), "invariant"); + } +} + +CLDClaimContext::CLDClaimContext(const CLDClaimContext& rhs) : _cld(NULL) { + swap(const_cast(rhs)); +} + +void CLDClaimContext::operator=(CLDClaimContext rhs) { + swap(rhs); +} + +void CLDClaimContext::swap(CLDClaimContext& rhs) { + ClassLoaderData* temp_cld = rhs._cld; + rhs._cld = _cld; + _cld = temp_cld; +} + +CLDClaimStateClosure::CLDClaimStateClosure() : CLDClosure(), _state() {} + +void CLDClaimStateClosure::do_cld(ClassLoaderData* cld) { + assert(cld != NULL, "invariant"); + if (cld->claimed()) { + _state.save(cld); + } +} + +SaveRestoreCLDClaimBits::SaveRestoreCLDClaimBits() : _claim_state_closure() { + ClassLoaderDataGraph::cld_do(&_claim_state_closure); +} + +SaveRestoreCLDClaimBits::~SaveRestoreCLDClaimBits() { + ClassLoaderDataGraph::clear_claimed_marks(); +} diff --git a/src/share/vm/jfr/leakprofiler/utilities/saveRestore.hpp b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..98076eba932b0bc18fcd1efb04ac0cc279ef4a04 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/saveRestore.hpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP +#define SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP + +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" +#include "oops/markOop.hpp" +#include "utilities/growableArray.hpp" + +template +class SaveRestore { + private: + Impl _impl; + public: + SaveRestore() : _impl() { + _impl.setup(); + } + + void save(T const& value) { + _impl.save(value); + } + + ~SaveRestore() { + _impl.restore(); + } +}; + +template +class ContextStore { +private: + GrowableArray* _storage; +public: + ContextStore() : _storage(NULL) {} + + void setup() { + assert(_storage == NULL, "invariant"); + _storage = new GrowableArray(16); + } + + void save(T const& value) { + _storage->push(Context(value)); + } + + void restore() { + for (int i = 0; i < _storage->length(); ++i) { + _storage->at(i).~Context(); + } + } +}; + +/* +* This class will save the original mark oop of an object sample object. +* It will then install an "identifier" mark oop to be used for +* identification purposes in the search for reference chains. +* The destructor will restore the original mark oop. +*/ + +class MarkOopContext { + private: + oop _obj; + markOop _mark_oop; + void swap(MarkOopContext& rhs); + public: + MarkOopContext(); + MarkOopContext(const oop obj); + MarkOopContext(const MarkOopContext& rhs); + void operator=(MarkOopContext rhs); + ~MarkOopContext(); +}; + +typedef SaveRestore > SaveRestoreMarkOops; + +class ClassLoaderData; + +class CLDClaimContext { + private: + ClassLoaderData* _cld; + void swap(CLDClaimContext& rhs); + public: + CLDClaimContext(); + CLDClaimContext(ClassLoaderData* cld); + CLDClaimContext(const CLDClaimContext& rhs); + void operator=(CLDClaimContext rhs); + ~CLDClaimContext(); +}; + +typedef SaveRestore > SaveRestoreCLDClaimState; + +class CLDClaimStateClosure : public CLDClosure { + private: + SaveRestoreCLDClaimState _state; + public: + CLDClaimStateClosure(); + void do_cld(ClassLoaderData* cld); +}; + +class SaveRestoreCLDClaimBits : public StackObj { + private: + CLDClaimStateClosure _claim_state_closure; + public: + SaveRestoreCLDClaimBits(); + ~SaveRestoreCLDClaimBits(); +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_UTILITIES_SAVERESTORE_HPP diff --git a/src/share/vm/jfr/leakprofiler/utilities/unifiedOop.hpp b/src/share/vm/jfr/leakprofiler/utilities/unifiedOop.hpp new file mode 100644 index 0000000000000000000000000000000000000000..64176d3ba96cc34ce09f345550db2ad08d2281e5 --- /dev/null +++ b/src/share/vm/jfr/leakprofiler/utilities/unifiedOop.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP +#define SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP + +#include "oops/oop.inline.hpp" + +class UnifiedOop : public AllStatic { + public: + static const bool is_narrow(const oop* ref) { + assert(ref != NULL, "invariant"); + return 1 == (((u8)ref) & 1); + } + + static const oop* decode(const oop* ref) { + assert(ref != NULL, "invariant"); + return is_narrow(ref) ? (const oop*)(((u8)ref) & ~1) : ref; + } + + static const oop* encode(narrowOop* ref) { + assert(ref != NULL, "invariant"); + return (const oop*)((u8)ref | 1); + } + + static oop dereference(const oop* ref) { + assert(ref != NULL, "invariant"); + return is_narrow(ref) ? + oopDesc::load_decode_heap_oop((narrowOop*)decode(ref)) : + oopDesc::load_heap_oop(const_cast(ref)); + + } +}; + +#endif // SHARE_VM_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP diff --git a/src/share/vm/jfr/metadata/jfrConstantSerializer.hpp b/src/share/vm/jfr/metadata/jfrConstantSerializer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a91f85fdcc5679a22e688fa9fc80ccb8beadb974 --- /dev/null +++ b/src/share/vm/jfr/metadata/jfrConstantSerializer.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_METADATA_JFRCONSTANTSERIALIZER_HPP +#define SHARE_VM_JFR_METADATA_JFRCONSTANTSERIALIZER_HPP + +#include "memory/allocation.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "tracefiles/traceTypes.hpp" + +/* + * A JfrConstant (type) is a relation defined by enumerating a set of ordered pairs: + * + * { <1, "my_first_constant">, <2, "my_second_constant">, ... } + * + * Write the key into event fields and the framework will maintain the mapping (if you register as below). + * + * In order to support mapping of constants, we use an interface called JfrConstantSerializer. + * Inherit JfrConstantSerializer, create a CHeapObj instance and use JfrConstantSerializer::register_serializer(...) to register. + * Once registered, the ownership of the serializer instance is transferred to Jfr. + * + * How to register: + * + * bool register_serializer(JfrConstantTypeId id, bool require_safepoint, bool permit_cache, JfrConstantSerializer* serializer) + * + * The constant types are machine generated into an enum located in tracefiles/traceTypes.hpp (included). + * + * enum JfrConstantTypeId { + * ... + * CONSTANT_TYPE_THREADGROUP, + * CONSTANT_TYPE_CLASSLOADER, + * CONSTANT_TYPE_METHOD, + * CONSTANT_TYPE_SYMBOL, + * CONSTANT_TYPE_THREADSTATE, + * CONSTANT_TYPE_INFLATECAUSE, + * ... + * + * id this is the id of the constant type your are defining (see the enum above). + * require_safepoint indicate if your constants need to be evaluated and written under a safepoint. + * permit_cache indicate if your constants are stable to be cached. + * (implies the callback is invoked only once and the contents will be cached. Set this to true for static information). + * serializer the serializer instance you define. + * + * See below for guidance about how to implement write_constants(). + * + */ +class JfrConstantSerializer : public CHeapObj { + public: + virtual void write_constants(JfrCheckpointWriter& writer) = 0; + virtual ~JfrConstantSerializer() {} + static bool register_serializer(JfrConstantTypeId id, bool require_safepoint, bool permit_cache, JfrConstantSerializer* serializer); +}; + +/* + * Invoke writer.write_number_of_constants(num) to define the total number of constant mappings. + * + * You then write the individual constants as ordered pairs, ... + * + * Here is an example: + * + * void MyConstant::write_constants(JfrCheckpointWriter& writer) { + * const int nof_causes = ObjectSynchronizer::inflate_cause_nof; + * writer.write_number_of_constants(nof_causes); // write number of constant (mappings) to follow + * for (int i = 0; i < nof_causes; i++) { + * writer.write_key(i); // write key + * writer.write(ObjectSynchronizer::inflate_cause_name((ObjectSynchronizer::InflateCause)i)); // write value + * } + * } + * + * Please see jfr/recorder/checkpoint/constant/jfrConstant.cpp for reference. + */ + +#endif // SHARE_VM_JFR_METADATA_JFRCONSTANTSERIALIZER_HPP diff --git a/src/share/vm/jfr/periodic/jfrOSInterface.cpp b/src/share/vm/jfr/periodic/jfrOSInterface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8f0b2c4921be44bc5694cea09c9a3b0a22e8c9a4 --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrOSInterface.cpp @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/periodic/jfrOSInterface.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/os.hpp" +#include "runtime/os_perf.hpp" +#include "utilities/ostream.hpp" +#include "trace/tracing.hpp" + +#include // for environment variables +#ifdef __APPLE__ +#include +#define environ (*_NSGetEnviron()) +#endif + +#ifndef environ +extern char** environ; +#endif + +static JfrOSInterface* _instance = NULL; + +JfrOSInterface& JfrOSInterface::instance() { + return *_instance; +} + +JfrOSInterface* JfrOSInterface::create() { + assert(_instance == NULL, "invariant"); + _instance = new JfrOSInterface(); + return _instance; +} + +void JfrOSInterface::destroy() { + if (_instance != NULL) { + delete _instance; + _instance = NULL; + } +} + +class JfrOSInterface::JfrOSInterfaceImpl : public JfrCHeapObj { + friend class JfrOSInterface; + private: + CPUInformationInterface* _cpu_info_interface; + CPUPerformanceInterface* _cpu_perf_interface; + SystemProcessInterface* _system_process_interface; + + // stub helper + void functionality_not_implemented(char** str) const; + + JfrOSInterfaceImpl(); + bool initialize(); + ~JfrOSInterfaceImpl(); + + // cpu info + int cpu_information(CPUInformation& cpu_info); + int cpu_load(int which_logical_cpu, double* cpu_load); + int context_switch_rate(double* rate); + int cpu_load_total_process(double* cpu_load); + int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotal); + + // os information + int os_version(char** os_version) const; + + // environment information + void generate_environment_variables_events(); + + // system processes information + int system_processes(SystemProcess** system_processes, int* no_of_sys_processes); +}; + +JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL), + _cpu_perf_interface(NULL), + _system_process_interface(NULL) {} + +bool JfrOSInterface::JfrOSInterfaceImpl::initialize() { + _cpu_info_interface = new CPUInformationInterface(); + bool success = _cpu_info_interface != NULL && _cpu_info_interface->initialize(); + if (!success) { + return false; + } + _cpu_perf_interface = new CPUPerformanceInterface(); + success = _cpu_perf_interface != NULL && _cpu_perf_interface->initialize(); + if (!success) { + return false; + } + _system_process_interface = new SystemProcessInterface(); + success = _system_process_interface != NULL && _system_process_interface->initialize(); + return success; +} + +JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) { + if (_cpu_info_interface != NULL) { + delete _cpu_info_interface; + _cpu_info_interface = NULL; + } + if (_cpu_perf_interface != NULL) { + delete _cpu_perf_interface; + _cpu_perf_interface = NULL; + } + if (_system_process_interface != NULL) { + delete _system_process_interface; + _system_process_interface = NULL; + } +} + +int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) { + return _cpu_perf_interface->cpu_load(which_logical_cpu, cpu_load); +} + +int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) { + return _cpu_perf_interface->context_switch_rate(rate); +} + +int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) { + return _cpu_perf_interface->cpu_load_total_process(cpu_load); +} + +int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad, + double* pjvmKernelLoad, + double* psystemTotal) { + return _cpu_perf_interface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal); +} + +int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) { + return _cpu_info_interface->cpu_information(cpu_info); +} + +int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) { + assert(system_processes != NULL, "system_processes pointer is NULL!"); + assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!"); + return _system_process_interface->system_processes(system_processes, no_of_sys_processes); +} + +// assigned char* is RESOURCE_HEAP_ALLOCATED +// caller need to ensure proper ResourceMark placement. +int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const { + assert(os_version != NULL, "os_version pointer is NULL!"); + stringStream os_ver_info; + os::print_os_info_brief(&os_ver_info); + *os_version = os_ver_info.as_string(); + return OS_OK; +} + +void JfrOSInterface::JfrOSInterfaceImpl::functionality_not_implemented(char** str) const { + assert(str != NULL, "address to string is NULL!"); + const char* not_impl = "Functionality_not_implemented"; + const size_t not_impl_len = strlen(not_impl); + *str = NEW_C_HEAP_ARRAY(char, not_impl_len+1, mtTracing); + strncpy(*str, not_impl, not_impl_len); + (*str)[not_impl_len] = '\0'; +} + +JfrOSInterface::JfrOSInterface() { + _impl = NULL; +} + +bool JfrOSInterface::initialize() { + _impl = new JfrOSInterface::JfrOSInterfaceImpl(); + return _impl != NULL && _impl->initialize(); +} + +JfrOSInterface::~JfrOSInterface() { + if (_impl != NULL) { + delete _impl; + _impl = NULL; + } +} + +int JfrOSInterface::cpu_information(CPUInformation& cpu_info) { + return instance()._impl->cpu_information(cpu_info); +} + +int JfrOSInterface::cpu_load(int which_logical_cpu, double* cpu_load) { + return instance()._impl->cpu_load(which_logical_cpu, cpu_load); +} + +int JfrOSInterface::context_switch_rate(double* rate) { + return instance()._impl->context_switch_rate(rate); +} + +int JfrOSInterface::cpu_load_total_process(double* cpu_load) { + return instance()._impl->cpu_load_total_process(cpu_load); +} + +int JfrOSInterface::cpu_loads_process(double* jvm_user_load, double* jvm_kernel_load, double* system_total_load){ + return instance()._impl->cpu_loads_process(jvm_user_load, jvm_kernel_load, system_total_load); +} + +int JfrOSInterface::os_version(char** os_version) { + return instance()._impl->os_version(os_version); +} + +int JfrOSInterface::generate_initial_environment_variable_events() { + if (environ == NULL) { + return OS_ERR; + } + + if (EventInitialEnvironmentVariable::is_enabled()) { + // One time stamp for all events, so they can be grouped together + JfrTraceTime time_stamp(Tracing::time()); + for (char** p = environ; *p != NULL; p++) { + char* variable = *p; + char* equal_sign = strchr(variable, '='); + if (equal_sign != NULL) { + // Extract key/value + ResourceMark rm; + ptrdiff_t key_length = equal_sign - variable; + char* key = NEW_RESOURCE_ARRAY(char, key_length + 1); + char* value = equal_sign + 1; + strncpy(key, variable, key_length); + key[key_length] = '\0'; + EventInitialEnvironmentVariable event(UNTIMED); + event.set_endtime(time_stamp); + event.set_key(key); + event.set_value(value); + event.commit(); + } + } + } + return OS_OK; +} + +int JfrOSInterface::system_processes(SystemProcess** sys_processes, int* no_of_sys_processes) { + return instance()._impl->system_processes(sys_processes, no_of_sys_processes); +} diff --git a/src/share/vm/jfr/periodic/jfrOSInterface.hpp b/src/share/vm/jfr/periodic/jfrOSInterface.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cf3569ccca030916ba4fe370d41b4aa144bc0f2f --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrOSInterface.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP +#define SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP + +#include "jfr/utilities/jfrAllocation.hpp" +#include "utilities/globalDefinitions.hpp" + +class CPUInformation; +class EnvironmentVariable; +class SystemProcess; + +class JfrOSInterface: public JfrCHeapObj { + friend class JfrRecorder; + private: + class JfrOSInterfaceImpl; + JfrOSInterfaceImpl* _impl; + + JfrOSInterface(); + ~JfrOSInterface(); + bool initialize(); + static JfrOSInterface& instance(); + static JfrOSInterface* create(); + static void destroy(); + + public: + static int cpu_information(CPUInformation& cpu_info); + static int cpu_load(int which_logical_cpu, double* cpu_load); + static int context_switch_rate(double* rate); + static int cpu_load_total_process(double* cpu_load); + static int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad); + static int os_version(char** os_version); + static int generate_initial_environment_variable_events(); + static int system_processes(SystemProcess** system_processes, int* no_of_sys_processes); +}; + +#endif // SHARE_VM_JFR_PERIODIC_JFROSINTERFACE_HPP diff --git a/src/share/vm/jfr/periodic/jfrPeriodic.cpp b/src/share/vm/jfr/periodic/jfrPeriodic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..312aa1f5376f5e27ee423227ce8e9590e4de0eeb --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrPeriodic.cpp @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jvm.h" +#include "classfile/classLoaderStats.hpp" +#include "classfile/javaClasses.hpp" +#include "code/codeCache.hpp" +#include "compiler/compileBroker.hpp" +#include "gc_implementation/shared/gcConfiguration.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/objectCountEventSender.hpp" +#include "gc_implementation/shared/vmGCOperations.hpp" +#include "jfr/periodic/jfrOSInterface.hpp" +#include "jfr/periodic/jfrThreadCPULoadEvent.hpp" +#include "jfr/periodic/jfrThreadDumpEvent.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/heapInspection.hpp" +#include "memory/resourceArea.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/arguments.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "runtime/os_perf.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/sweeper.hpp" +#include "runtime/vmThread.hpp" +#include "services/classLoadingService.hpp" +#include "services/management.hpp" +#include "services/threadService.hpp" +#include "trace/tracing.hpp" +#include "tracefiles/tracePeriodic.hpp" +#include "utilities/exceptions.hpp" +#include "utilities/globalDefinitions.hpp" + +/** + * JfrPeriodic class + * Implementation of declarations in + * xsl generated traceRequestables.hpp + */ +#define TRACE_REQUEST_FUNC(id) void JfrPeriodicEventSet::request##id(void) + +TRACE_REQUEST_FUNC(JVMInformation) { + ResourceMark rm; + EventJVMInformation event; + event.set_jvmName(VM_Version::vm_name()); + event.set_jvmVersion(VM_Version::internal_vm_info_string()); + event.set_javaArguments(Arguments::java_command()); + event.set_jvmArguments(Arguments::jvm_args()); + event.set_jvmFlags(Arguments::jvm_flags()); + event.set_jvmStartTime(Management::vm_init_done_time()); + event.commit(); + } + +TRACE_REQUEST_FUNC(OSInformation) { + ResourceMark rm; + char* os_name = NEW_RESOURCE_ARRAY(char, 2048); + JfrOSInterface::os_version(&os_name); + EventOSInformation event; + event.set_osVersion(os_name); + event.commit(); +} + +/* + * This is left empty on purpose, having ExecutionSample as a requestable + * is a way of getting the period. The period is passed to ThreadSampling::update_period. + * Implementation in jfrSamples.cpp + */ +TRACE_REQUEST_FUNC(ExecutionSample) { +} +TRACE_REQUEST_FUNC(NativeMethodSample) { +} + +TRACE_REQUEST_FUNC(ThreadDump) { + ResourceMark rm; + EventThreadDump event; + event.set_result(JfrDcmdEvent::thread_dump()); + event.commit(); +} + +static int _native_library_callback(const char* name, address base, address top, void *param) { + EventNativeLibrary event(UNTIMED); + event.set_name(name); + event.set_baseAddress((u8)base); + event.set_topAddress((u8)top); + event.set_endtime(*(JfrTraceTime*) param); + event.commit(); + return 0; +} + +TRACE_REQUEST_FUNC(NativeLibrary) { + JfrTraceTime ts(Tracing::time()); + os::get_loaded_modules_info(&_native_library_callback, (void *)&ts); +} + +TRACE_REQUEST_FUNC(InitialEnvironmentVariable) { + JfrOSInterface::generate_initial_environment_variable_events(); +} + +TRACE_REQUEST_FUNC(CPUInformation) { + CPUInformation cpu_info; + int ret_val = JfrOSInterface::cpu_information(cpu_info); + if (ret_val == OS_ERR) { + log_debug(jfr, system)( "Unable to generate requestable event CPUInformation"); + return; + } + if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) { + return; + } + if (ret_val == OS_OK) { + EventCPUInformation event; + event.set_cpu(cpu_info.cpu_name()); + event.set_description(cpu_info.cpu_description()); + event.set_sockets(cpu_info.number_of_sockets()); + event.set_cores(cpu_info.number_of_cores()); + event.set_hwThreads(cpu_info.number_of_hardware_threads()); + event.commit(); + } +} + +TRACE_REQUEST_FUNC(CPULoad) { + double u = 0; // user time + double s = 0; // kernel time + double t = 0; // total time + int ret_val = JfrOSInterface::cpu_loads_process(&u, &s, &t); + if (ret_val == OS_ERR) { + log_debug(jfr, system)( "Unable to generate requestable event CPULoad"); + return; + } + if (ret_val == OS_OK) { + EventCPULoad event; + event.set_jvmUser((float)u); + event.set_jvmSystem((float)s); + event.set_machineTotal((float)t); + event.commit(); + } +} + +TRACE_REQUEST_FUNC(ThreadCPULoad) { + JfrThreadCPULoadEvent::send_events(); +} + +TRACE_REQUEST_FUNC(CPUTimeStampCounter) { + EventCPUTimeStampCounter event; + event.set_fastTimeEnabled(JfrTraceTime::is_ft_enabled()); + event.set_fastTimeAutoEnabled(JfrTraceTime::is_ft_supported()); + event.set_osFrequency(os::elapsed_frequency()); + event.set_fastTimeFrequency(JfrTraceTime::frequency()); + event.commit(); +} + +TRACE_REQUEST_FUNC(SystemProcess) { + char pid_buf[16]; + SystemProcess* processes = NULL; + int num_of_processes = 0; + JfrTraceTime start_time(Tracing::time()); + int ret_val = JfrOSInterface::system_processes(&processes, &num_of_processes); + if (ret_val == OS_ERR) { + log_debug(jfr, system)( "Unable to generate requestable event SystemProcesses"); + return; + } + JfrTraceTime end_time(Tracing::time()); + if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) { + return; + } + if (ret_val == OS_OK) { + // feature is implemented, write real event + while (processes != NULL) { + SystemProcess* tmp = processes; + const char* info = processes->command_line(); + if (info == NULL) { + info = processes->path(); + } + if (info == NULL) { + info = processes->name(); + } + if (info == NULL) { + info = "?"; + } + jio_snprintf(pid_buf, sizeof(pid_buf), "%d", processes->pid()); + EventSystemProcess event(UNTIMED); + event.set_pid(pid_buf); + event.set_commandLine(info); + event.set_starttime(start_time); + event.set_endtime(end_time); + event.commit(); + processes = processes->next(); + delete tmp; + } + } +} + +TRACE_REQUEST_FUNC(ThreadContextSwitchRate) { + double rate = 0.0; + int ret_val = JfrOSInterface::context_switch_rate(&rate); + if (ret_val == OS_ERR) { + log_debug(jfr, system)( "Unable to generate requestable event ThreadContextSwitchRate"); + return; + } + if (ret_val == FUNCTIONALITY_NOT_IMPLEMENTED) { + return; + } + if (ret_val == OS_OK) { + EventThreadContextSwitchRate event; + event.set_switchRate((float)rate + 0.0f); + event.commit(); + } +} + +#define SEND_FLAGS_OF_TYPE(eventType, flagType) \ + do { \ + Flag *flag = Flag::flags; \ + while (flag->_name != NULL) { \ + if (flag->is_ ## flagType()) { \ + if (flag->is_unlocked()) { \ + Event ## eventType event; \ + event.set_name(flag->_name); \ + event.set_value(flag->get_ ## flagType()); \ + event.set_origin(flag->get_origin()); \ + event.commit(); \ + } \ + } \ + ++flag; \ + } \ + } while (0) + +TRACE_REQUEST_FUNC(IntFlag) { + SEND_FLAGS_OF_TYPE(IntFlag, int); +} + +TRACE_REQUEST_FUNC(UnsignedIntFlag) { + SEND_FLAGS_OF_TYPE(UnsignedIntFlag, uint); +} + +TRACE_REQUEST_FUNC(LongFlag) { + SEND_FLAGS_OF_TYPE(LongFlag, intx); +} + +TRACE_REQUEST_FUNC(UnsignedLongFlag) { + SEND_FLAGS_OF_TYPE(UnsignedLongFlag, uintx); + SEND_FLAGS_OF_TYPE(UnsignedLongFlag, uint64_t); + SEND_FLAGS_OF_TYPE(UnsignedLongFlag, size_t); +} + +TRACE_REQUEST_FUNC(DoubleFlag) { + SEND_FLAGS_OF_TYPE(DoubleFlag, double); +} + +TRACE_REQUEST_FUNC(BooleanFlag) { + SEND_FLAGS_OF_TYPE(BooleanFlag, bool); +} + +TRACE_REQUEST_FUNC(StringFlag) { + SEND_FLAGS_OF_TYPE(StringFlag, ccstr); +} + +class VM_GC_SendObjectCountEvent : public VM_GC_HeapInspection { + public: + VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(NULL, true) {} + virtual void doit() { + ObjectCountEventSender::enable_requestable_event(); + collect(); + ObjectCountEventSender::disable_requestable_event(); + } +}; + +TRACE_REQUEST_FUNC(ObjectCount) { + VM_GC_SendObjectCountEvent op; + VMThread::execute(&op); +} + + +// Java Mission Control (JMC) uses (Java) Long.MIN_VALUE to describe that a +// long value is undefined. +static jlong jmc_undefined_long = min_jlong; + +TRACE_REQUEST_FUNC(GCConfiguration) { + GCConfiguration conf; + jlong pause_target = conf.has_pause_target_default_value() ? jmc_undefined_long : conf.pause_target(); + EventGCConfiguration event; + event.set_youngCollector(conf.young_collector()); + event.set_oldCollector(conf.old_collector()); + event.set_parallelGCThreads(conf.num_parallel_gc_threads()); + event.set_concurrentGCThreads(conf.num_concurrent_gc_threads()); + event.set_usesDynamicGCThreads(conf.uses_dynamic_gc_threads()); + event.set_isExplicitGCConcurrent(conf.is_explicit_gc_concurrent()); + event.set_isExplicitGCDisabled(conf.is_explicit_gc_disabled()); + event.set_gcTimeRatio(conf.gc_time_ratio()); + event.set_pauseTarget((s8)pause_target); + event.commit(); +} + +TRACE_REQUEST_FUNC(GCTLABConfiguration) { + GCTLABConfiguration conf; + EventGCTLABConfiguration event; + event.set_usesTLABs(conf.uses_tlabs()); + event.set_minTLABSize(conf.min_tlab_size()); + event.set_tlabRefillWasteLimit(conf.tlab_refill_waste_limit()); + event.commit(); +} + +TRACE_REQUEST_FUNC(GCSurvivorConfiguration) { + GCSurvivorConfiguration conf; + EventGCSurvivorConfiguration event; + event.set_maxTenuringThreshold(conf.max_tenuring_threshold()); + event.set_initialTenuringThreshold(conf.initial_tenuring_threshold()); + event.commit(); +} + +TRACE_REQUEST_FUNC(GCHeapConfiguration) { + GCHeapConfiguration conf; + EventGCHeapConfiguration event; + event.set_minSize(conf.min_size()); + event.set_maxSize(conf.max_size()); + event.set_initialSize(conf.initial_size()); + event.set_usesCompressedOops(conf.uses_compressed_oops()); + event.set_compressedOopsMode(conf.narrow_oop_mode()); + event.set_objectAlignment(conf.object_alignment_in_bytes()); + event.set_heapAddressBits(conf.heap_address_size_in_bits()); + event.commit(); +} + +TRACE_REQUEST_FUNC(YoungGenerationConfiguration) { + GCYoungGenerationConfiguration conf; + jlong max_size = conf.has_max_size_default_value() ? jmc_undefined_long : conf.max_size(); + EventYoungGenerationConfiguration event; + event.set_maxSize((u8)max_size); + event.set_minSize(conf.min_size()); + event.set_newRatio(conf.new_ratio()); + event.commit(); +} + +TRACE_REQUEST_FUNC(InitialSystemProperty) { + SystemProperty* p = Arguments::system_properties(); + JfrTraceTime time_stamp(Tracing::time()); + while (p != NULL) { + if (!p->internal()) { + EventInitialSystemProperty event(UNTIMED); + event.set_key(p->key()); + event.set_value(p->value()); + event.set_endtime(time_stamp); + event.commit(); + } + p = p->next(); + } +} + +TRACE_REQUEST_FUNC(ThreadAllocationStatistics) { + ResourceMark rm; + int initial_size = Threads::number_of_threads(); + GrowableArray allocated(initial_size); + GrowableArray thread_ids(initial_size); + JfrTraceTime time_stamp(Tracing::time()); + { + // Collect allocation statistics while holding threads lock + MutexLockerEx ml(Threads_lock); + JavaThread *jt = Threads::first(); + while (jt) { + allocated.append(jt->cooked_allocated_bytes()); + thread_ids.append(THREAD_TRACE_ID(jt)); + jt = jt->next(); + } + } + + // Write allocation statistics to buffer. + for(int i = 0; i < thread_ids.length(); i++) { + EventThreadAllocationStatistics event(UNTIMED); + event.set_allocated(allocated.at(i)); + event.set_thread(thread_ids.at(i)); + event.set_endtime(time_stamp); + event.commit(); + } +} + +/** + * PhysicalMemory event represents: + * + * @totalSize == The amount of physical memory (hw) installed and reported by the OS, in bytes. + * @usedSize == The amount of physical memory currently in use in the system (reserved/committed), in bytes. + * + * Both fields are systemwide, i.e. represents the entire OS/HW environment. + * These fields do not include virtual memory. + * + * If running inside a guest OS on top of a hypervisor in a virtualized environment, + * the total memory reported is the amount of memory configured for the guest OS by the hypervisor. + */ +TRACE_REQUEST_FUNC(PhysicalMemory) { + u8 totalPhysicalMemory = os::physical_memory(); + EventPhysicalMemory event; + event.set_totalSize(totalPhysicalMemory); + event.set_usedSize(totalPhysicalMemory - os::available_memory()); + event.commit(); +} + +TRACE_REQUEST_FUNC(JavaThreadStatistics) { + EventJavaThreadStatistics event; + event.set_activeCount(ThreadService::get_live_thread_count()); + event.set_daemonCount(ThreadService::get_daemon_thread_count()); + event.set_accumulatedCount(ThreadService::get_total_thread_count()); + event.set_peakCount(ThreadService::get_peak_thread_count()); + event.commit(); +} + +TRACE_REQUEST_FUNC(ClassLoadingStatistics) { + EventClassLoadingStatistics event; + event.set_loadedClassCount(ClassLoadingService::loaded_class_count()); + event.set_unloadedClassCount(ClassLoadingService::unloaded_class_count()); + event.commit(); +} + +class JfrClassLoaderStatsClosure : public ClassLoaderStatsClosure { +public: + JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(NULL) {} + + bool do_entry(oop const& key, ClassLoaderStats* const& cls) { + const ClassLoaderData* this_cld = cls->_class_loader != NULL ? + java_lang_ClassLoader::loader_data(cls->_class_loader) : (ClassLoaderData*)NULL; + const ClassLoaderData* parent_cld = cls->_parent != NULL ? + java_lang_ClassLoader::loader_data(cls->_parent) : (ClassLoaderData*)NULL; + EventClassLoaderStatistics event; + event.set_classLoader(this_cld); + event.set_parentClassLoader(parent_cld); + event.set_classLoaderData((intptr_t)cls->_cld); + event.set_classCount(cls->_classes_count); + event.set_chunkSize(cls->_chunk_sz); + event.set_blockSize(cls->_block_sz); + event.set_anonymousClassCount(cls->_anon_classes_count); + event.set_anonymousChunkSize(cls->_anon_chunk_sz); + event.set_anonymousBlockSize(cls->_anon_block_sz); + event.commit(); + return true; + } + + void createEvents(void) { + _stats->iterate(this); + } +}; + +class JfrClassLoaderStatsVMOperation : public ClassLoaderStatsVMOperation { + public: + JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(NULL) { } + + void doit() { + JfrClassLoaderStatsClosure clsc; + ClassLoaderDataGraph::cld_do(&clsc); + clsc.createEvents(); + } +}; + +TRACE_REQUEST_FUNC(ClassLoaderStatistics) { + JfrClassLoaderStatsVMOperation op; + VMThread::execute(&op); +} + +TRACE_REQUEST_FUNC(CompilerStatistics) { + EventCompilerStatistics event; + event.set_compileCount(CompileBroker::get_total_compile_count()); + event.set_bailoutCount(CompileBroker::get_total_bailout_count()); + event.set_invalidatedCount(CompileBroker::get_total_invalidated_count()); + event.set_osrCompileCount(CompileBroker::get_total_osr_compile_count()); + event.set_standardCompileCount(CompileBroker::get_total_standard_compile_count()); + event.set_osrBytesCompiled(CompileBroker::get_sum_osr_bytes_compiled()); + event.set_standardBytesCompiled(CompileBroker::get_sum_standard_bytes_compiled()); + event.set_nmetodsSize(CompileBroker::get_sum_nmethod_size()); + event.set_nmetodCodeSize(CompileBroker::get_sum_nmethod_code_size()); + event.set_peakTimeSpent(CompileBroker::get_peak_compilation_time()); + event.set_totalTimeSpent(CompileBroker::get_total_compilation_time()); + event.commit(); +} + +TRACE_REQUEST_FUNC(CompilerConfiguration) { + EventCompilerConfiguration event; + event.set_threadCount(CICompilerCount); + event.set_tieredCompilation(TieredCompilation); + event.commit(); +} + +TRACE_REQUEST_FUNC(CodeCacheStatistics) { + // Emit stats for all available code heaps + EventCodeCacheStatistics event; + event.set_codeBlobType((u1)CodeBlobType::All); + event.set_startAddress((u8)CodeCache::low_bound()); + event.set_reservedTopAddress((u8)CodeCache::high_bound()); + event.set_entryCount(CodeCache::nof_blobs()); + event.set_methodCount(CodeCache::nof_nmethods()); + event.set_adaptorCount(CodeCache::nof_adapters()); + event.set_unallocatedCapacity(CodeCache::unallocated_capacity()); + event.set_fullCount(CodeCache::get_codemem_full_count()); + event.commit(); +} + +TRACE_REQUEST_FUNC(CodeCacheConfiguration) { + EventCodeCacheConfiguration event; + event.set_initialSize(InitialCodeCacheSize); + event.set_reservedSize(ReservedCodeCacheSize); + event.set_expansionSize(CodeCacheExpansionSize); + event.set_minBlockLength(CodeCacheMinBlockLength); + event.set_startAddress((u8)CodeCache::low_bound()); + event.set_reservedTopAddress((u8)CodeCache::high_bound()); + event.commit(); +} + +TRACE_REQUEST_FUNC(CodeSweeperConfiguration) { + EventCodeSweeperConfiguration event; + event.set_sweeperEnabled(MethodFlushing); + event.set_flushingEnabled(UseCodeCacheFlushing); + event.commit(); +} diff --git a/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.cpp b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2b049dd411ba5d36208f09b11e09ef6714b81eb0 --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.cpp @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/periodic/jfrThreadCPULoadEvent.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "utilities/globalDefinitions.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.inline.hpp" +#include "tracefiles/traceEventClasses.hpp" + +jlong JfrThreadCPULoadEvent::get_wallclock_time() { + return os::javaTimeNanos(); +} + +int JfrThreadCPULoadEvent::_last_active_processor_count = 0; + +int JfrThreadCPULoadEvent::get_processor_count() { + int cur_processor_count = os::active_processor_count(); + int last_processor_count = _last_active_processor_count; + _last_active_processor_count = cur_processor_count; + + // If the number of processors decreases, we don't know at what point during + // the sample interval this happened, so use the largest number to try + // to avoid percentages above 100% + return MAX2(cur_processor_count, last_processor_count); +} + +// Returns false if the thread has not been scheduled since the last call to updateEvent +// (i.e. the delta for both system and user time is 0 milliseconds) +bool JfrThreadCPULoadEvent::update_event(EventThreadCPULoad& event, JavaThread* thread, jlong cur_wallclock_time, int processor_count) { + JfrThreadData* thread_data = thread->trace_data(); + + jlong cur_cpu_time = os::thread_cpu_time(thread, true); + jlong prev_cpu_time = thread_data->get_cpu_time(); + + jlong prev_wallclock_time = thread_data->get_wallclock_time(); + thread_data->set_wallclock_time(cur_wallclock_time); + + // Threshold of 1 ms + if (cur_cpu_time - prev_cpu_time < 1 * NANOSECS_PER_MILLISEC) { + return false; + } + + jlong cur_user_time = os::thread_cpu_time(thread, false); + jlong prev_user_time = thread_data->get_user_time(); + + jlong cur_system_time = cur_cpu_time - cur_user_time; + jlong prev_system_time = prev_cpu_time - prev_user_time; + + // The user and total cpu usage clocks can have different resolutions, which can + // make us see decreasing system time. Ensure time doesn't go backwards. + if (prev_system_time > cur_system_time) { + cur_system_time = prev_system_time; + } + + jlong user_time = cur_user_time - prev_user_time; + jlong system_time = cur_system_time - prev_system_time; + jlong wallclock_time = cur_wallclock_time - prev_wallclock_time; + jlong total_available_time = wallclock_time * processor_count; + + // Avoid reporting percentages above the theoretical max + if (user_time + system_time > wallclock_time) { + jlong excess = user_time + system_time - wallclock_time; + if (user_time > excess) { + user_time -= excess; + cur_user_time -= excess; + } else { + excess -= user_time; + user_time = 0; + cur_user_time = 0; + system_time -= excess; + cur_system_time -= excess; + } + } + event.set_user(total_available_time > 0 ? (double)user_time / total_available_time : 0); + event.set_system(total_available_time > 0 ? (double)system_time / total_available_time : 0); + thread_data->set_user_time(cur_user_time); + thread_data->set_cpu_time(cur_cpu_time); + return true; +} + +void JfrThreadCPULoadEvent::send_events() { + Thread* periodic_thread = Thread::current(); + JfrThreadData* const periodic_trace_data = periodic_thread->trace_data(); + traceid periodic_thread_id = periodic_trace_data->thread_id(); + const int processor_count = JfrThreadCPULoadEvent::get_processor_count(); + JfrTraceTime event_time = JfrTraceTime::now(); + jlong cur_wallclock_time = JfrThreadCPULoadEvent::get_wallclock_time(); + + JavaThread *jt = Threads::first(); + size_t thread_count = 0; + while (jt) { + thread_count++; + EventThreadCPULoad event(UNTIMED); + if (JfrThreadCPULoadEvent::update_event(event, jt, cur_wallclock_time, processor_count)) { + event.set_starttime(event_time); + if (jt != periodic_thread) { + // Commit reads the thread id from this thread's trace data, so put it there temporarily + periodic_trace_data->set_thread_id(THREAD_TRACE_ID(jt)); + } else { + periodic_trace_data->set_thread_id(periodic_thread_id); + } + event.commit(); + } + jt = jt->next(); + } + log_trace(jfr)("Measured CPU usage for %d threads in %.3f milliseconds", thread_count, + (double)(JfrTraceTime::now() - event_time) / (JfrTraceTime::frequency() / 1000)); + // Restore this thread's thread id + periodic_trace_data->set_thread_id(periodic_thread_id); +} + +void JfrThreadCPULoadEvent::send_event_for_thread(JavaThread* jt) { + EventThreadCPULoad event; + if (event.should_commit()) { + if (update_event(event, jt, get_wallclock_time(), get_processor_count())) { + event.commit(); + } + } +} diff --git a/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.hpp b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2dff1b6f5d0775bba3bc63c5398dd307f877b0dc --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrThreadCPULoadEvent.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP +#define SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP + +#include "jni.h" +#include "memory/allocation.hpp" + +class JavaThread; +class EventThreadCPULoad; + +class JfrThreadCPULoadEvent : public AllStatic { + static int _last_active_processor_count; + public: + static jlong get_wallclock_time(); + static int get_processor_count(); + static bool update_event(EventThreadCPULoad& event, JavaThread* thread, jlong cur_wallclock_time, int processor_count); + static void send_events(); + static void send_event_for_thread(JavaThread* jt); +}; + +#endif // SHARE_VM_JFR_PERIODIC_JFRTHREADCPULOAD_HPP + diff --git a/src/share/vm/jfr/periodic/jfrThreadDumpEvent.cpp b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d7ee267182a55632d18aa8798d9567ff1e517ad9 --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/dcmd/jfrDcmds.hpp" +#include "jfr/periodic/jfrThreadDumpEvent.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "tracefiles/traceEventClasses.hpp" +#include "utilities/exceptions.hpp" +#include "utilities/ostream.hpp" + +/** +* Worker impl for generating and writing dcmd commands +* as jfr events. +* dispatch to diagnosticcommands "parse_and_execute" +* +* param: cmd = the DCMD to execute (including options) +*/ +static bool execute_dcmd(bufferedStream& st, const char* const cmd) { + Thread* THREAD = Thread::current(); + assert(!HAS_PENDING_EXCEPTION, "dcmd does not expect pending exceptions on entry!"); + // delegate to DCmd execution + DCmd::parse_and_execute(DCmd_Source_Internal, &st, cmd, ' ', THREAD); + if (HAS_PENDING_EXCEPTION) { + log_debug(jfr, system)("unable to create jfr event for DCMD %s", cmd); + log_debug(jfr, system)("exception type: %s", PENDING_EXCEPTION->klass()->external_name()); + // don't unwind this exception + CLEAR_PENDING_EXCEPTION; + // if exception occurred, + // reset stream. + st.reset(); + return false; + } + return true; +} + +// caller needs ResourceMark +const char* JfrDcmdEvent::thread_dump() { + assert(EventThreadDump::is_enabled(), "invariant"); + bufferedStream st; + execute_dcmd(st, "Thread.print"); + return st.as_string(); +} diff --git a/src/share/vm/jfr/periodic/jfrThreadDumpEvent.hpp b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.hpp new file mode 100644 index 0000000000000000000000000000000000000000..79c82405fe037e2daeee4a8ffcf1ba2f9de2e855 --- /dev/null +++ b/src/share/vm/jfr/periodic/jfrThreadDumpEvent.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP +#define SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP + +#include "memory/allocation.hpp" + +/* + * Helper for generating jfr events using output data from Dcmd's. + */ +class JfrDcmdEvent : public AllStatic { + public: + // caller needs ResourceMark + static const char* thread_dump(); +}; + +#endif // SHARE_VM_JFR_PERIODIC_JFRDCMDEVENT_HPP diff --git a/src/share/vm/jfr/periodic/sampling/jfrCallTrace.cpp b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9cfb21efb50339fbe784a568b3ddd5a13f289c9c --- /dev/null +++ b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "code/debugInfoRec.hpp" +#include "code/nmethod.hpp" +#include "code/pcDesc.hpp" +#include "jfr/periodic/sampling/jfrCallTrace.hpp" +#include "oops/method.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/registerMap.hpp" +#include "runtime/thread.inline.hpp" + +bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) { + assert(top_frame.cb() != NULL, "invariant"); + RegisterMap map(_thread, false); + frame candidate = top_frame; + for (int i = 0; i < MaxJavaStackTraceDepth * 2; ++i) { + if (candidate.is_entry_frame()) { + JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(_thread); + if (jcw == NULL || jcw->is_first_frame()) { + return false; + } + } + + if (candidate.is_interpreted_frame()) { + JavaThreadState state = _thread->thread_state(); + const bool known_valid = (state == _thread_in_native || state == _thread_in_vm || state == _thread_blocked); + if (known_valid || candidate.is_interpreted_frame_valid(_thread)) { + Method* im = candidate.interpreter_frame_method(); + if (known_valid && !im->is_valid_method()) { + return false; + } + *method = im; + first_frame = candidate; + return true; + } + } + + if (candidate.cb()->is_nmethod()) { + // first check to make sure that we have a sane stack, + // the PC is actually inside the code part of the codeBlob, + // and we are past is_frame_complete_at (stack has been setup) + if (!candidate.safe_for_sender(_thread)) { + return false; + } + nmethod* nm = (nmethod*)candidate.cb(); + *method = nm->method(); + + if (_in_java) { + PcDesc* pc_desc = nm->pc_desc_near(candidate.pc() + 1); + if (pc_desc == NULL || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { + return false; + } + candidate.set_pc(pc_desc->real_pc(nm)); + assert(nm->pc_desc_at(candidate.pc()) != NULL, "invalid pc"); + } + first_frame = candidate; + return true; + } + + if (!candidate.safe_for_sender(_thread) || + candidate.is_stub_frame() || + candidate.cb()->frame_size() <= 0) { + return false; + } + + candidate = candidate.sender(&map); + if (candidate.cb() == NULL) { + return false; + } + } + return false; +} + +bool JfrGetCallTrace::get_topframe(void* ucontext, frame& topframe) { + if (!_thread->pd_get_top_frame_for_profiling(&topframe, ucontext, _in_java)) { + return false; + } + + if (topframe.cb() == NULL) { + return false; + } + + frame first_java_frame; + Method* method = NULL; + if (find_top_frame(topframe, &method, first_java_frame)) { + if (method == NULL) { + return false; + } + topframe = first_java_frame; + return true; + } + return false; +} diff --git a/src/share/vm/jfr/periodic/sampling/jfrCallTrace.hpp b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6be4aea20a76769b418c06367879e5ad2efe6737 --- /dev/null +++ b/src/share/vm/jfr/periodic/sampling/jfrCallTrace.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP +#define SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP + +#include "memory/allocation.hpp" + +class frame; +class Method; +class JavaThread; + +class JfrGetCallTrace : public StackObj { + private: + JavaThread* _thread; + bool _in_java; + + public: + JfrGetCallTrace(bool in_java, JavaThread* thread) : _in_java(in_java), _thread(thread) {} + bool find_top_frame(frame& topframe, Method** method, frame& first_frame); + bool get_topframe(void* ucontext, frame& top); +}; + +#endif // SHARE_VM_JFR_ENGINE_SAMPLING_JFRCALLTRACE_HPP diff --git a/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..554e73ba1b60aad7c0a0afd9dfaefa6e7b293377 --- /dev/null +++ b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/periodic/sampling/jfrCallTrace.hpp" +#include "jfr/periodic/sampling/jfrThreadSampler.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.inline.hpp" +#include "trace/tracing.hpp" +#include "tracefiles/traceEventIds.hpp" + +static bool in_java_sample(JavaThread* thread) { + switch (thread->thread_state()) { + case _thread_new: + case _thread_uninitialized: + case _thread_new_trans: + case _thread_in_vm_trans: + case _thread_blocked_trans: + case _thread_in_native_trans: + case _thread_blocked: + case _thread_in_vm: + case _thread_in_native: + case _thread_in_Java_trans: + break; + case _thread_in_Java: + return true; + default: + ShouldNotReachHere(); + break; + } + return false; +} + +static bool in_native_sample(JavaThread* thread) { + switch (thread->thread_state()) { + case _thread_new: + case _thread_uninitialized: + case _thread_new_trans: + case _thread_blocked_trans: + case _thread_blocked: + case _thread_in_vm: + case _thread_in_vm_trans: + case _thread_in_Java_trans: + case _thread_in_Java: + case _thread_in_native_trans: + break; + case _thread_in_native: + return true; + default: + ShouldNotReachHere(); + break; + } + return false; +} + +class JfrThreadSampleClosure { + public: + JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native); + ~JfrThreadSampleClosure() {} + EventExecutionSample* next_event() { return &_events[_added_java++]; } + EventNativeMethodSample* next_event_native() { return &_events_native[_added_native++]; } + void commit_events(); + int added() const { return _added_java; } + JfrSampleType do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, bool java_sample, bool native_sample); + int java_entries() { return _added_java; } + int native_entries() { return _added_native; } + + private: + bool sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames); + bool sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames); + EventExecutionSample* _events; + EventNativeMethodSample* _events_native; + Thread* _self; + int _added_java; + int _added_native; +}; + +class OSThreadSampler : public os::SuspendedThreadTask { + public: + OSThreadSampler(JavaThread* thread, + JfrThreadSampleClosure& closure, + JfrStackFrame *frames, + u4 max_frames) : os::SuspendedThreadTask((Thread*)thread), + _success(false), + _stacktrace(frames, max_frames), + _closure(closure), + _suspend_time(0) {} + + void take_sample(); + void do_task(const os::SuspendedThreadTaskContext& context); + void protected_task(const os::SuspendedThreadTaskContext& context); + bool success() const { return _success; } + const JfrStackTrace& stacktrace() const { return _stacktrace; } + + private: + bool _success; + JfrStackTrace _stacktrace; + JfrThreadSampleClosure& _closure; + JfrTraceTime _suspend_time; +}; + +class OSThreadSamplerCallback : public os::CrashProtectionCallback { + public: + OSThreadSamplerCallback(OSThreadSampler& sampler, const os::SuspendedThreadTaskContext &context) : + _sampler(sampler), _context(context) { + } + virtual void call() { + _sampler.protected_task(_context); + } + private: + OSThreadSampler& _sampler; + const os::SuspendedThreadTaskContext& _context; +}; + +void OSThreadSampler::do_task(const os::SuspendedThreadTaskContext& context) { +#ifndef ASSERT + guarantee(JfrOptionSet::sample_protection(), "Sample Protection should be on in product builds"); +#endif + assert(0 == _suspend_time, "already timestamped!"); + _suspend_time = JfrTraceTime::now(); + + if (JfrOptionSet::sample_protection()) { + OSThreadSamplerCallback cb(*this, context); + os::ThreadCrashProtection crash_protection; + if (!crash_protection.call(cb)) { + log_error(jfr)("Thread method sampler crashed"); + } + } else { + protected_task(context); + } +} + +/* +* From this method and down the call tree we attempt to protect against crashes +* using a signal handler / __try block. Don't take locks, rely on destructors or +* leave memory (in case of signal / exception) in an inconsistent state. */ +void OSThreadSampler::protected_task(const os::SuspendedThreadTaskContext& context) { + JavaThread* jth = (JavaThread*)context.thread(); + // Skip sample if we signaled a thread that moved to other state + if (!in_java_sample(jth)) { + return; + } + JfrGetCallTrace trace(true, jth); + frame topframe; + if (trace.get_topframe(context.ucontext(), topframe)) { + if (_stacktrace.record_thread(*jth, topframe)) { + /* If we managed to get a topframe and a stacktrace, create an event + * and put it into our array. We can't call Jfr::_stacktraces.add() + * here since it would allocate memory using malloc. Doing so while + * the stopped thread is inside malloc would deadlock. */ + _success = true; + EventExecutionSample *ev = _closure.next_event(); + ev->set_starttime(_suspend_time); + ev->set_endtime(JfrTraceTime(1)); // fake to not take an end time + ev->set_sampledThread(THREAD_TRACE_ID(jth)); + ev->set_state(java_lang_Thread::get_thread_status(jth->threadObj())); + } + } +} + +void OSThreadSampler::take_sample() { + run(); +} + +class JfrNativeSamplerCallback : public os::CrashProtectionCallback { + public: + JfrNativeSamplerCallback(JfrThreadSampleClosure& closure, JavaThread* jt, JfrStackFrame* frames, u4 max_frames) : + _closure(closure), _jt(jt), _stacktrace(frames, max_frames), _success(false) { + } + virtual void call(); + bool success() { return _success; } + JfrStackTrace& stacktrace() { return _stacktrace; } + + private: + JfrThreadSampleClosure& _closure; + JavaThread* _jt; + JfrStackTrace _stacktrace; + bool _success; +}; + +static void write_native_event(JfrThreadSampleClosure& closure, JavaThread* jt) { + EventNativeMethodSample *ev = closure.next_event_native(); + ev->set_starttime(JfrTraceTime(0)); + ev->set_sampledThread(THREAD_TRACE_ID(jt)); + ev->set_state(java_lang_Thread::get_thread_status(jt->threadObj())); +} + +void JfrNativeSamplerCallback::call() { + // When a thread is only attach it will be native without a last java frame + if (!_jt->has_last_Java_frame()) { + return; + } + + frame topframe = _jt->last_frame(); + frame first_java_frame; + Method* method = NULL; + JfrGetCallTrace gct(false, _jt); + if (!gct.find_top_frame(topframe, &method, first_java_frame)) { + return; + } + if (method == NULL) { + return; + } + topframe = first_java_frame; + _success = _stacktrace.record_thread(*_jt, topframe); + if (_success) { + write_native_event(_closure, _jt); + } +} + +bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) { + OSThreadSampler sampler(thread, *this, frames, max_frames); + sampler.take_sample(); + /* We don't want to allocate any memory using malloc/etc while the thread + * is stopped, so everything is stored in stack allocated memory until this + * point where the thread has been resumed again, if the sampling was a success + * we need to store the stacktrace in the stacktrace repository and update + * the event with the id that was returned. */ + if (!sampler.success()) { + return false; + } + EventExecutionSample *event = &_events[_added_java - 1]; + traceid id = JfrStackTraceRepository::add(sampler.stacktrace()); + assert(id != 0, "Stacktrace id should not be 0"); + event->set_stackTrace(id); + return true; +} + +bool JfrThreadSampleClosure::sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) { + JfrNativeSamplerCallback cb(*this, thread, frames, max_frames); + if (JfrOptionSet::sample_protection()) { + os::ThreadCrashProtection crash_protection; + if (!crash_protection.call(cb)) { + log_error(jfr)("Thread method sampler crashed for native"); + } + } else { + cb.call(); + } + if (!cb.success()) { + return false; + } + EventNativeMethodSample *event = &_events_native[_added_native - 1]; + traceid id = JfrStackTraceRepository::add(cb.stacktrace()); + assert(id != 0, "Stacktrace id should not be 0"); + event->set_stackTrace(id); + return true; +} + +void JfrThreadSampleClosure::commit_events() { + for (int i = 0; i < _added_java; ++i) { + _events[i].commit(); + } + for (int i = 0; i < _added_native; ++i) { + _events_native[i].commit(); + } +} + +JfrThreadSampleClosure::JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native) : + _events(events), + _events_native(events_native), + _self(Thread::current()), + _added_java(0), + _added_native(0) { +} + +static void clear_transition_block(JavaThread* jt) { + jt->clear_trace_flag(); + JfrThreadData* jtd = jt->trace_data(); + if (jtd->is_trace_block()) { + MutexLockerEx ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag); + JfrThreadSampler::transition_block()->notify_all(); + } +} + +JfrSampleType JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, bool java_sample, bool native_sample) { + assert(Threads_lock->owned_by_self(), "Holding the thread table lock."); + if (thread->is_hidden_from_external_view()) { + return NO_SAMPLE; + } + if (thread->in_deopt_handler()) { + return NO_SAMPLE; + } + JfrSampleType ret = NO_SAMPLE; + thread->set_trace_flag(); + if (!UseMembar) { + os::serialize_thread_states(); + } + if (in_java_sample(thread) && java_sample) { + ret = sample_thread_in_java(thread, frames, max_frames) ? JAVA_SAMPLE : NO_SAMPLE; + } else if (in_native_sample(thread) && native_sample) { + ret = sample_thread_in_native(thread, frames, max_frames) ? NATIVE_SAMPLE : NO_SAMPLE; + } + clear_transition_block(thread); + return ret; +} + +Monitor* JfrThreadSampler::_transition_block_lock = new Monitor(Mutex::leaf, "Trace block", true); + +JfrThreadSampler::JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames) : + _frames(JfrCHeapObj::new_array(max_frames)), + _last_thread_java(NULL), + _last_thread_native(NULL), + _interval_java(interval_java), + _interval_native(interval_native), + _cur_index(-1), + _max_frames(max_frames), + _should_terminate(false) { +} + +JfrThreadSampler::~JfrThreadSampler() { + JfrCHeapObj::free(_frames, sizeof(JfrStackFrame) * _max_frames); +} + +void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) { + JfrThreadData* jtd = thread->trace_data(); + jtd->set_trace_block(); + { + MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag); + while (thread->is_trace_suspend()) { + transition_block()->wait(true); + } + jtd->clear_trace_block(); + } +} + +int JfrThreadSampler::find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target) { + assert(Threads_lock->owned_by_self(), "Holding the thread table lock."); + if (target == NULL) { + return -1; + } + for (uint i = 0; i < length; i++) { + if (target == t_list[i]) { + return (int)i; + } + } + return -1; +} + +JavaThread* JfrThreadSampler::next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current) { + assert(Threads_lock->owned_by_self(), "Holding the thread table lock."); + if (current == NULL) { + _cur_index = 0; + return t_list[_cur_index]; + } + + if (_cur_index == -1 || t_list[_cur_index] != current) { + // 'current' is not at '_cur_index' so find it: + _cur_index = find_index_of_JavaThread(t_list, length, current); + assert(_cur_index != -1, "current JavaThread should be findable."); + } + _cur_index++; + + JavaThread* next = NULL; + // wrap + if ((uint)_cur_index >= length) { + _cur_index = 0; + } + next = t_list[_cur_index]; + + // sample wrap + if (next == first_sampled) { + return NULL; + } + return next; +} + +void JfrThreadSampler::enroll() { + if (os::create_thread(this, os::os_thread)) { + os::start_thread(this); + } else { + log_error(jfr)("Failed to create thread for thread sampling"); + } +} + +void JfrThreadSampler::disenroll() { + _should_terminate = true; +} + +static jlong get_monotonic_ms() { + return os::javaTimeNanos() / 1000000; +} + +void JfrThreadSampler::run() { + jlong last_java_ms = get_monotonic_ms(); + jlong last_native_ms = last_java_ms; + while (!_should_terminate) { + jlong java_interval = _interval_java == 0 ? max_jlong : MAX2(_interval_java, 10); + jlong native_interval = _interval_native == 0 ? max_jlong : MAX2(_interval_native, 10); + + jlong now_ms = get_monotonic_ms(); + + jlong next_j = java_interval + last_java_ms - now_ms; + jlong next_n = native_interval + last_native_ms - now_ms; + + jlong sleep_to_next = MIN2(next_j, next_n); + + if (sleep_to_next > 0) { + os::naked_short_sleep(sleep_to_next); + } + + if ((next_j - sleep_to_next) <= 0) { + task_stacktrace(JAVA_SAMPLE, &_last_thread_java); + last_java_ms = get_monotonic_ms(); + } + if ((next_n - sleep_to_next) <= 0) { + task_stacktrace(NATIVE_SAMPLE, &_last_thread_native); + last_native_ms = get_monotonic_ms(); + } + } + delete this; +} + +static const int MAX_NR_OF_SAMPLES = 5; + +void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) { + ResourceMark rm; + EventExecutionSample samples[MAX_NR_OF_SAMPLES]; + EventNativeMethodSample samples_native[MAX_NR_OF_SAMPLES]; + JfrThreadSampleClosure sample_task(samples, samples_native); + + int num_samples = 0; + { + elapsedTimer sample_time; + sample_time.start(); + + { + MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag); + int max_threads = Threads::number_of_threads(); + assert(max_threads >= 0, "Threads list is empty"); + uint index = 0; + JavaThread** threads_list = NEW_C_HEAP_ARRAY(JavaThread *, max_threads, mtInternal); + for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { + threads_list[index++] = tp; + } + JavaThread* current = Threads::includes(*last_thread) ? *last_thread : NULL; + JavaThread* start = NULL; + + while (num_samples < MAX_NR_OF_SAMPLES) { + current = next_thread(threads_list, index, start, current); + if (current == NULL) { + break; + } + if (start == NULL) { + start = current; // remember thread where we started sampling + } + if (current->is_Compiler_thread()) { + continue; + } + *last_thread = current; // remember thread we last sampled + JfrSampleType ret = sample_task.do_sample_thread(current, _frames, _max_frames, type == JAVA_SAMPLE, type == NATIVE_SAMPLE); + switch (type) { + case JAVA_SAMPLE: + case NATIVE_SAMPLE: + ++num_samples; + break; + default: + break; + } + } + FREE_C_HEAP_ARRAY(JavaThread *, threads_list, mtInternal); + // release Threads_lock + } + sample_time.stop(); + log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples", + sample_time.seconds(), sample_task.java_entries(), sample_task.native_entries()); + } + if (num_samples>0) { + sample_task.commit_events(); + } +} + +static JfrThreadSampling* _instance = NULL; + +JfrThreadSampling& JfrThreadSampling::instance() { + return *_instance; +} + +JfrThreadSampling* JfrThreadSampling::create() { + assert(_instance == NULL, "invariant"); + _instance = new JfrThreadSampling(); + return _instance; +} + +void JfrThreadSampling::destroy() { + if (_instance != NULL) { + delete _instance; + _instance = NULL; + } +} + +JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {} + +JfrThreadSampling::~JfrThreadSampling() { + log_info(jfr)("Disrolling thread sampler"); + stop_sampler(); +} + +static void log(size_t interval_java, size_t interval_native) { + log_info(jfr)("Updated thread sampler for java: " SIZE_FORMAT" ms, native " SIZE_FORMAT " ms", interval_java, interval_native); +} + +void JfrThreadSampling::start_sampler(size_t interval_java, size_t interval_native) { + assert(_sampler == NULL, "invariant"); + log_info(jfr)("Enrolling thread sampler"); + _sampler = new JfrThreadSampler(interval_java, interval_native, JfrOptionSet::stackdepth()); + _sampler->enroll(); +} + +void JfrThreadSampling::stop_sampler() { + if (_sampler != NULL) { + _sampler->disenroll(); + _sampler = NULL; + } +} + +void JfrThreadSampling::set_sampling_interval(bool java_interval, size_t period) { + size_t interval_java = 0; + size_t interval_native = 0; + if (_sampler != NULL) { + interval_java = _sampler->get_java_interval(); + interval_native = _sampler->get_native_interval(); + } + + if (java_interval) { + interval_java = period; + } else { + interval_native = period; + } + + if (interval_java > 0 || interval_native > 0) { + if (_sampler == NULL) { + start_sampler(interval_java, interval_native); + } else { + _sampler->set_java_interval(interval_java); + _sampler->set_native_interval(interval_native); + } + assert(_sampler != NULL, "invariant"); + log(interval_java, interval_native); + } else if (_sampler != NULL) { + log_info(jfr)("Disrolling thread sampler"); + stop_sampler(); + } +} + +void JfrThreadSampling::set_java_sample_interval(size_t period) { + if (_instance == NULL && 0 == period) { + return; + } + instance().set_sampling_interval(true, period); +} + +void JfrThreadSampling::set_native_sample_interval(size_t period) { + if (_instance == NULL && 0 == period) { + return; + } + instance().set_sampling_interval(false, period); +} + diff --git a/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.hpp b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7719054c855e792b469abe9188507c5cc73b10ac --- /dev/null +++ b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.hpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP +#define SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP + +#include "jfr/utilities/jfrAllocation.hpp" +#include "runtime/thread.hpp" + +class Monitor; +class JfrStackFrame; + +enum JfrSampleType { + NO_SAMPLE = 0, + JAVA_SAMPLE = 1, + NATIVE_SAMPLE = 2 +}; + +class JfrThreadSampler : public Thread { + friend class JfrThreadSampling; + private: + JfrStackFrame* const _frames; + JavaThread* _last_thread_java; + JavaThread* _last_thread_native; + size_t _interval_java; + size_t _interval_native; + int _cur_index; + const u4 _max_frames; + bool _should_terminate; + static Monitor* _transition_block_lock; + + int find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target); + JavaThread* next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current); + void task_stacktrace(JfrSampleType type, JavaThread** last_thread); + JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames); + ~JfrThreadSampler(); + void enroll(); + void disenroll(); + void set_java_interval(size_t interval) { _interval_java = interval; }; + void set_native_interval(size_t interval) { _interval_native = interval; }; + size_t get_java_interval() { return _interval_java; }; + size_t get_native_interval() { return _interval_native; }; + + public: + void run(); + static Monitor* transition_block() { return _transition_block_lock; } + static void on_javathread_suspend(JavaThread* thread); +}; + +class JfrThreadSampling : public JfrCHeapObj { + friend class JfrRecorder; + private: + JfrThreadSampler* _sampler; + void start_sampler(size_t interval_java, size_t interval_native); + void stop_sampler(); + void set_sampling_interval(bool java_interval, size_t period); + + JfrThreadSampling(); + ~JfrThreadSampling(); + + static JfrThreadSampling& instance(); + static JfrThreadSampling* create(); + static void destroy(); + + public: + static void set_java_sample_interval(size_t period); + static void set_native_sample_interval(size_t period); +}; + +#endif // SHARE_VM_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP + diff --git a/src/share/vm/jfr/recorder/access/jfrEventClass.cpp b/src/share/vm/jfr/recorder/access/jfrEventClass.cpp new file mode 100644 index 0000000000000000000000000000000000000000..71595bda0d923cf2ecb17814c42baea406043d73 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrEventClass.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/access/jfrEventClass.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" + +bool JdkJfrEvent::is(const Klass* k) { + return JfrTraceId::is_jdk_jfr_event(k); +} + +bool JdkJfrEvent::is(const jclass jc) { + return JfrTraceId::is_jdk_jfr_event(jc); +} + +void JdkJfrEvent::tag_as(const Klass* k) { + JfrTraceId::tag_as_jdk_jfr_event(k); +} + +bool JdkJfrEvent::is_subklass(const Klass* k) { + return JfrTraceId::is_jdk_jfr_event_sub(k); +} + +bool JdkJfrEvent::is_subklass(const jclass jc) { + return JfrTraceId::is_jdk_jfr_event_sub(jc); +} + +void JdkJfrEvent::tag_as_subklass(const Klass* k) { + JfrTraceId::tag_as_jdk_jfr_event_sub(k); +} + +void JdkJfrEvent::tag_as_subklass(const jclass jc) { + JfrTraceId::tag_as_jdk_jfr_event_sub(jc); +} + +bool JdkJfrEvent::is_a(const Klass* k) { + return JfrTraceId::in_jdk_jfr_event_hierarchy(k); +} + +bool JdkJfrEvent::is_a(const jclass jc) { + return JfrTraceId::in_jdk_jfr_event_hierarchy(jc); +} + +bool JdkJfrEvent::is_host(const Klass* k) { + return JfrTraceId::is_event_host(k); +} + +bool JdkJfrEvent::is_host(const jclass jc) { + return JfrTraceId::is_event_host(jc); +} + +void JdkJfrEvent::tag_as_host(const Klass* k) { + JfrTraceId::tag_as_event_host(k); +} + +void JdkJfrEvent::tag_as_host(const jclass jc) { + JfrTraceId::tag_as_event_host(jc); +} + +bool JdkJfrEvent::is_visible(const Klass* k) { + return JfrTraceId::in_visible_set(k); +} + +bool JdkJfrEvent::is_visible(const jclass jc) { + return JfrTraceId::in_visible_set(jc); +} diff --git a/src/share/vm/jfr/recorder/access/jfrEventClass.hpp b/src/share/vm/jfr/recorder/access/jfrEventClass.hpp new file mode 100644 index 0000000000000000000000000000000000000000..195283804edbefe54cdc2f2cc967ee44acb2e475 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrEventClass.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFREVENTCLASS_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFREVENTCLASS_HPP + +#include "jni.h" +#include "memory/allocation.hpp" + +class Klass; + +// +// For convenient access to the jdk.jfr.Event klass hierarchy. +// +class JdkJfrEvent : AllStatic { + public: + // jdk.jfr.Event + static bool is(const Klass* k); + static bool is(const jclass jc); + static void tag_as(const Klass* k); + + // jdk.jfr.Event subklasses + static bool is_subklass(const Klass* k); + static bool is_subklass(const jclass jc); + static void tag_as_subklass(const Klass* k); + static void tag_as_subklass(const jclass jc); + + // jdk.jfr.Event hierarchy + static bool is_a(const Klass* k); + static bool is_a(const jclass jc); + + // klasses that host a jdk.jfr.Event + static bool is_host(const Klass* k); + static bool is_host(const jclass jc); + static void tag_as_host(const Klass* k); + static void tag_as_host(const jclass jc); + + // in the set of classes made visible to java + static bool is_visible(const Klass* k); + static bool is_visible(const jclass jc); +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFREVENTCLASS_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrFlush.cpp b/src/share/vm/jfr/recorder/access/jfrFlush.cpp new file mode 100644 index 0000000000000000000000000000000000000000..05fb5cc30f5ce43f0db601e3567c7e0728c401b6 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrFlush.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/access/jfrFlush.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/jfrEventSetting.inline.hpp" +#include "jfr/recorder/storage/jfrStorage.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/debug.hpp" + + +JfrFlush::JfrFlush(JfrStorage::Buffer* old, size_t used, size_t requested, Thread* t) : + _result(JfrStorage::flush(old, used, requested, true, t)) { +} + +template +class LessThanHalfBufferSize : AllStatic { +public: + static bool evaluate(T* t) { + assert(t != NULL, "invariant"); + return t->free_size() < t->size() / 2; + } +}; + +template +class LessThanSize : AllStatic { + public: + static bool evaluate(T* t, size_t size) { + assert(t != NULL, "invariant"); + return t->free_size() < size; + } +}; + +bool jfr_is_event_enabled(TraceEventId id) { + return JfrEventSetting::is_enabled(id); +} + +bool jfr_has_stacktrace_enabled(TraceEventId id) { + return JfrEventSetting::has_stacktrace(id); +} + +void jfr_conditional_flush(TraceEventId id, size_t size, Thread* t) { + assert(jfr_is_event_enabled(id), "invariant"); + if (t->trace_data()->has_native_buffer()) { + JfrStorage::Buffer* const buffer = t->trace_data()->native_buffer(); + if (LessThanSize::evaluate(buffer, size)) { + JfrFlush f(buffer, 0, 0, t); + } + } +} + +bool jfr_save_stacktrace(Thread* t) { + JfrThreadData* const trace_data = t->trace_data(); + if (trace_data->has_cached_stack_trace()) { + return false; // no ownership + } + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t)); + return true; +} + +void jfr_clear_stacktrace(Thread* t) { + t->trace_data()->clear_cached_stack_trace(); +} diff --git a/src/share/vm/jfr/recorder/access/jfrFlush.hpp b/src/share/vm/jfr/recorder/access/jfrFlush.hpp new file mode 100644 index 0000000000000000000000000000000000000000..501fc35006e08499aa4394a07a225b04bb9fe606 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrFlush.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFRFLUSH_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFRFLUSH_HPP + +#include "jfr/recorder/storage/jfrBuffer.hpp" +#include "memory/allocation.hpp" +#include "tracefiles/traceEventIds.hpp" + +class Thread; + +class JfrFlush : public StackObj { + public: + typedef JfrBuffer Type; + JfrFlush(Type* old, size_t used, size_t requested, Thread* t); + Type* result() const { return _result; } + private: + Type* _result; +}; + +void jfr_conditional_flush(TraceEventId id, size_t size, Thread* t); +bool jfr_is_event_enabled(TraceEventId id); +bool jfr_has_stacktrace_enabled(TraceEventId id); +bool jfr_save_stacktrace(Thread* t); +void jfr_clear_stacktrace(Thread* t); + +template +class JfrEventConditionalFlush { + public: + typedef JfrBuffer Type; + JfrEventConditionalFlush(Thread* t) { + if (jfr_is_event_enabled(Event::eventId)) { + jfr_conditional_flush(Event::eventId, sizeof(Event), t); + } + } +}; + +template +class JfrEventConditionalFlushWithStacktrace : public JfrEventConditionalFlush { + Thread* _t; + bool _owner; + public: + JfrEventConditionalFlushWithStacktrace(Thread* t) : JfrEventConditionalFlush(t), _t(t), _owner(false) { + if (Event::hasStackTrace && jfr_has_stacktrace_enabled(Event::eventId)) { + _owner = jfr_save_stacktrace(t); + } + } + ~JfrEventConditionalFlushWithStacktrace() { + if (_owner) { + jfr_clear_stacktrace(_t); + } + } +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFRFLUSH_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrMemorySizer.cpp b/src/share/vm/jfr/recorder/access/jfrMemorySizer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..242a67ea5616615a872031ae61ca9af12d4ba4ff --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrMemorySizer.cpp @@ -0,0 +1,434 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/access/jfrMemorySizer.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "runtime/os.hpp" + +const julong MAX_ADJUSTED_GLOBAL_BUFFER_SIZE = 1 * M; +const julong MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF = 512 * K; +const julong MIN_GLOBAL_BUFFER_SIZE = 64 * K; +// implies at least 2 * MIN_GLOBAL_BUFFER SIZE +const julong MIN_BUFFER_COUNT = 2; +// MAX global buffer count open ended +const julong DEFAULT_BUFFER_COUNT = 20; +// MAX thread local buffer size == size of a single global buffer (runtime determined) +// DEFAULT thread local buffer size = 2 * os page size (runtime determined) +const julong MIN_THREAD_BUFFER_SIZE = 4 * K; +const julong MIN_MEMORY_SIZE = 1 * M; +const julong DEFAULT_MEMORY_SIZE = 10 * M; + +// +// In pages: +// +// units = total_pages / per_unit_pages +// +static julong div_pages(julong& total_pages, julong& per_unit_pages) { + assert(total_pages > 0, "invariant"); + assert(per_unit_pages > 0, "invariant"); + assert(total_pages >= per_unit_pages, "invariant"); + + const julong units = total_pages / per_unit_pages; + const julong rem = total_pages % per_unit_pages; + + assert(units > 0, "invariant"); + + if (rem > 0) { + total_pages -= rem % units; + per_unit_pages += rem / units; + } + + assert(per_unit_pages > 0, "invariant"); + assert(total_pages % units == 0, "invariant"); + assert(units * per_unit_pages == total_pages, "invariant"); + assert(units == total_pages / per_unit_pages, "invariant"); + + return units; +} + +static void page_size_align_up(julong& value) { + static const julong alignment = os::vm_page_size() - 1; + value = (value + alignment) & ~alignment; +} + +// +// In bytes: +// units = total_bytes / per_unit_bytes +// +static julong div_total_by_per_unit(julong& total_bytes, julong& per_unit_bytes) { + assert(total_bytes > 0, "invariant"); + assert(per_unit_bytes > 0, "invariant"); + assert(total_bytes >= per_unit_bytes, "invariant"); + + page_size_align_up(total_bytes); + assert(total_bytes % os::vm_page_size() == 0, "invariant"); + julong total_pages = total_bytes / os::vm_page_size(); + + page_size_align_up(per_unit_bytes); + assert(per_unit_bytes % os::vm_page_size() == 0, "invariant"); + julong per_unit_pages = per_unit_bytes / os::vm_page_size(); + + const julong units = div_pages(total_pages, per_unit_pages); + assert(units > 0, "invariant"); + + total_bytes = total_pages * os::vm_page_size(); + per_unit_bytes = per_unit_pages * os::vm_page_size(); + + assert(per_unit_bytes > 0, "invariant"); + assert(total_bytes / per_unit_bytes == units, "invariant"); + + return units; +} + +// +// per_unit_bytes = total_bytes / units +// +static julong div_total_by_units(julong& total_bytes, julong& units) { + page_size_align_up(total_bytes); + assert(total_bytes % os::vm_page_size() == 0, "invariant"); + julong total_pages = total_bytes / os::vm_page_size(); + assert(units > 0, "invariant"); + + julong per_unit_pages = total_pages <= units ? 1 : total_pages / units; + units = div_pages(total_pages, per_unit_pages); + + julong per_unit_bytes = per_unit_pages * os::vm_page_size(); + assert(per_unit_bytes % os::vm_page_size() == 0, "invariant"); + + total_bytes = total_pages * os::vm_page_size(); + assert(total_bytes % os::vm_page_size() == 0, "invariant"); + + assert(total_bytes % units == 0, "invariant"); + assert(total_bytes / units == per_unit_bytes, "invariant"); + assert(units * per_unit_bytes == total_bytes, "invariant"); + + return per_unit_bytes; +} + +// +// total_bytes = per_unit_bytes * units; +// +static julong multiply(julong& per_unit_bytes, julong& units) { + page_size_align_up(per_unit_bytes); + assert(per_unit_bytes % os::vm_page_size() == 0, "invariant"); + assert(units > 0, "invariant"); + + julong total_bytes = per_unit_bytes * units; + assert(total_bytes % os::vm_page_size() == 0, "invariant"); + + assert(total_bytes % units == 0, "invariant"); + assert(total_bytes / units == per_unit_bytes, "invariant"); + assert(units * per_unit_bytes == total_bytes, "invariant"); + + return total_bytes; +} + +// Total_bytes is explicitly set. +// +// Deduce other parameters by delegating to a sizing policy +template +static julong adjust(JfrMemoryOptions* options) { + page_size_align_up(options->memory_size); + assert(options->memory_size % os::vm_page_size() == 0, "invariant"); + julong total_pages = options->memory_size / os::vm_page_size(); + assert(options->buffer_count > 0, "invariant"); + julong per_unit_pages = total_pages / options->buffer_count; + page_size_align_up(options->thread_buffer_size); + assert(options->thread_buffer_size % os::vm_page_size() == 0, "invariant"); + julong thread_buffer_pages = options->thread_buffer_size / os::vm_page_size(); + + SizingPolicy::adjust(total_pages, per_unit_pages, options->buffer_count, thread_buffer_pages, options->thread_buffer_size_configured); + assert(options->buffer_count * per_unit_pages == total_pages, "invariant"); + + const julong per_unit_bytes = per_unit_pages * os::vm_page_size(); + options->memory_size = total_pages * os::vm_page_size(); + options->thread_buffer_size = thread_buffer_pages * os::vm_page_size(); + + assert(options->memory_size % options->buffer_count == 0, "invariant"); + assert(options->memory_size / options->buffer_count == per_unit_bytes, "invariant"); + assert(options->buffer_count * per_unit_bytes == options->memory_size, "invariant"); + assert(per_unit_bytes >= options->thread_buffer_size, "invariant"); + return per_unit_bytes; +} + +static void align_buffer_size(julong& buffer_size_in_pages, julong max_size_pages, julong min_size_pages, bool sizeup = false) { + buffer_size_in_pages = MIN2(buffer_size_in_pages, max_size_pages); + buffer_size_in_pages = MAX2(buffer_size_in_pages, min_size_pages); + size_t multiples = 0; + if (buffer_size_in_pages < max_size_pages) { + while (buffer_size_in_pages >= + (min_size_pages << (multiples + (sizeup ? 0 : 1)))) { + ++multiples; + } + buffer_size_in_pages = min_size_pages << multiples; + } + assert(buffer_size_in_pages >= min_size_pages && buffer_size_in_pages <= max_size_pages, "invariant"); +} + +static void adjust_buffer_size_to_total_memory_size(julong& total_pages, julong& buffer_size_pages) { + static const julong max_buffer_size_pages = MAX_ADJUSTED_GLOBAL_BUFFER_SIZE / os::vm_page_size(); + // If memory size is less than DEFAULT_MEMORY_SIZE, + // the adjustment algorithm can decrease the size of the global buffer + // all the way down to the MIN_GLOBAL_BUFFER_SIZE (taking embedded use case in account). + // If memory size is larger than DEFAULT_MEMORY_SIZE, the lowest size of + // a global buffer will be the size of MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF + static const julong min_buffer_size_pages = + total_pages * os::vm_page_size() < DEFAULT_MEMORY_SIZE ? + MIN_GLOBAL_BUFFER_SIZE / os::vm_page_size() : + MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF / os::vm_page_size(); + + align_buffer_size(buffer_size_pages, max_buffer_size_pages, min_buffer_size_pages); + assert(buffer_size_pages % min_buffer_size_pages == 0, "invariant"); + + julong remainder = total_pages % buffer_size_pages; + while (remainder >= (buffer_size_pages >> 1)) { + if (buffer_size_pages <= min_buffer_size_pages) { + break; + } + buffer_size_pages >>= 1; + remainder = total_pages % buffer_size_pages; + } +} + +// Sizing policy class +class ScaleOutAdjuster : public AllStatic { + public: + static void adjust(julong& total_pages, + julong& buffer_size_pages, + julong& buffer_count, + julong& thread_buffer_size_pages, + bool is_thread_buffer_size_set) { + assert(buffer_count > 0, "invariant"); + adjust_buffer_size_to_total_memory_size(total_pages, buffer_size_pages); + assert(buffer_size_pages * os::vm_page_size() >= MIN_GLOBAL_BUFFER_SIZE, "invariant"); + assert((buffer_size_pages * os::vm_page_size()) % MIN_GLOBAL_BUFFER_SIZE == 0, "invariant"); + if (is_thread_buffer_size_set) { + if (thread_buffer_size_pages > buffer_size_pages) { + buffer_size_pages = thread_buffer_size_pages; + } + } + // and with this information, calculate what the new buffer count will be + buffer_count = div_pages(total_pages, buffer_size_pages); + } +}; + +static void memory_and_thread_buffer_size(JfrMemoryOptions* options) { + assert(options->memory_size_configured, "invariant"); + assert(!options->buffer_count_configured, "invariant"); + assert(!options->global_buffer_size_configured, "invariant"); + // here the only thing specified is the overall total memory size + // we can and will apply some sizing heuristics to derive both + // the size of an individual global buffer and by implication the number of global + // buffers to use. Starting values for buffer count and global_buffer_size + // will be the defaults. + options->global_buffer_size = adjust(options); +} + +static void memory_size_and_buffer_count(JfrMemoryOptions* options) { + assert(options->memory_size_configured, "invariant"); + assert(!options->global_buffer_size_configured, "invariant"); + assert(!options->thread_buffer_size_configured, "invariant"); + assert(options->buffer_count_configured, "invariant"); + options->global_buffer_size = div_total_by_units(options->memory_size, options->buffer_count); +} + +static void memory_size_and_global_buffer_size(JfrMemoryOptions* options) { + assert(options->memory_size_configured, "invariant"); + assert(options->global_buffer_size_configured, "invariant"); + assert(!options->buffer_count_configured, "invariant"); + page_size_align_up(options->thread_buffer_size); + options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size); + if (options->thread_buffer_size > options->global_buffer_size) { + options->global_buffer_size = options->thread_buffer_size; + options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size); + } + assert(options->global_buffer_size >= options->thread_buffer_size, "invariant"); +} + +static bool is_ambiguous(const JfrMemoryOptions* options) { + assert(options->memory_size_configured, "invariant"); + assert(options->global_buffer_size_configured, "invariant"); + assert(options->buffer_count_configured, "invariant"); + assert(options->thread_buffer_size <= options->global_buffer_size, "invariant"); + // This can cause an ambiguous situation because all three parameters are explicitly set. + return options->global_buffer_size * options->buffer_count != options->memory_size; +} + +static void all_options_set(JfrMemoryOptions* options) { + options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size); + page_size_align_up(options->thread_buffer_size); + if (options->thread_buffer_size > options->global_buffer_size) { + options->global_buffer_size = options->thread_buffer_size; + options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size); + } + assert(options->global_buffer_size >= options->thread_buffer_size, "invariant"); + assert(options->memory_size / options->global_buffer_size == options->buffer_count, "invariant"); + assert(options->memory_size % options->global_buffer_size == 0, "invariant"); +} + +static void global_buffer_size(JfrMemoryOptions* options) { + assert(!options->memory_size_configured, "invariant"); + page_size_align_up(options->thread_buffer_size); + if (options->thread_buffer_size > options->global_buffer_size) { + options->global_buffer_size = options->thread_buffer_size; + } + options->memory_size = multiply(options->global_buffer_size, options->buffer_count); + assert(options->global_buffer_size >= options->thread_buffer_size, "invariant"); +} + +static void thread_buffer_size(JfrMemoryOptions* options) { + assert(!options->global_buffer_size_configured, "invariant"); + assert(options->thread_buffer_size_configured, "invariant"); + page_size_align_up(options->thread_buffer_size); + options->global_buffer_size = div_total_by_units(options->memory_size, options->buffer_count); + if (options->thread_buffer_size > options->global_buffer_size) { + options->global_buffer_size = options->thread_buffer_size; + options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size); + } + assert(options->global_buffer_size >= options->thread_buffer_size, "invariant"); +} + +static void default_size(const JfrMemoryOptions* options) { + // no memory options explicitly set + // default values already statically adjusted + assert(!options->thread_buffer_size_configured, "invariant"); + assert(!options->memory_size_configured, "invariant"); + assert(!options->buffer_count_configured, "invarinat"); + assert(!options->global_buffer_size_configured, "invariant"); +} + +#ifdef ASSERT +static void assert_post_condition(const JfrMemoryOptions* options) { + assert(options->memory_size % os::vm_page_size() == 0, "invariant"); + assert(options->global_buffer_size % os::vm_page_size() == 0, "invariant"); + assert(options->thread_buffer_size % os::vm_page_size() == 0, "invariant"); + assert(options->buffer_count > 0, "invariant"); +} +#endif + +// MEMORY SIZING ALGORITHM + +bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) { + assert(options != NULL, "invariant"); + + enum MemoryOptions { + MEMORY_SIZE = 1, + GLOBAL_BUFFER_SIZE = 2, + GLOBAL_BUFFER_COUNT = 4, + THREAD_BUFFER_SIZE = 8 + }; + + // LEGEND + // + // M = "memorysize" option + // G = "globalbuffersize" option + // C = "numglobalbuffers" option + // T = "threadbuffersize" option + // + // The memory options comprise an n-set (a 4-set) = { M, G, C, T } + // + // Number of r-subsets = 5 (0, 1, 2, 3, 4) (including null set) + // + // Unordered selection: + // + // C(4, 0) = {} = NULL set = 1 + // C(4, 1) = { (M), (G), (C), (T) } = 4 + // C(4, 2) = { (M, G), (M, C), (M, T), (G, C), (G, T), (C, T) } = 6 + // C(4, 3) = { (M, G, C), (M, G, T), (M, C, T), (G, C, T) } = 4 + // C(4, 4) = { (M, G, C, T) } = 1 + // + // in shorter terms: P({ M, G, C, T}) = 16 + // +#define MG (MEMORY_SIZE | GLOBAL_BUFFER_SIZE) +#define MC (MEMORY_SIZE | GLOBAL_BUFFER_COUNT) +#define MT (MEMORY_SIZE | THREAD_BUFFER_SIZE) +#define MGC (MG | GLOBAL_BUFFER_COUNT) +#define MGT (MG | THREAD_BUFFER_SIZE) +#define MCT (MC | THREAD_BUFFER_SIZE) +#define MGCT (MGC | THREAD_BUFFER_SIZE) +#define GC (GLOBAL_BUFFER_SIZE | GLOBAL_BUFFER_COUNT) +#define GT (GLOBAL_BUFFER_SIZE | THREAD_BUFFER_SIZE) +#define GCT (GC | THREAD_BUFFER_SIZE) +#define CT (GLOBAL_BUFFER_COUNT | THREAD_BUFFER_SIZE) + + int set_of_options = 0; + + if (options->memory_size_configured) { + set_of_options |= MEMORY_SIZE; + } + if (options->global_buffer_size_configured) { + set_of_options |= GLOBAL_BUFFER_SIZE; + } + if (options->buffer_count_configured) { + set_of_options |= GLOBAL_BUFFER_COUNT; + } + if (options->thread_buffer_size_configured) { + set_of_options |= THREAD_BUFFER_SIZE; + } + + switch (set_of_options) { + case MT: + case MEMORY_SIZE: + memory_and_thread_buffer_size(options); + break; + case MC: + memory_size_and_buffer_count(options); + break; + case MGT: + assert(options->thread_buffer_size_configured, "invariant"); + case MG: + memory_size_and_global_buffer_size(options); + break; + case MGC: + case MGCT: + if (is_ambiguous(options)) { + // Let the user resolve the ambiguity by bailing. + return false; + } + all_options_set(options); + break; + case GCT: + assert(options->buffer_count_configured, "invariant"); + assert(options->thread_buffer_size_configured, "invariant"); + case GC: + assert(options->global_buffer_size_configured, "invariant"); + case GT: + case GLOBAL_BUFFER_COUNT: + case GLOBAL_BUFFER_SIZE: + global_buffer_size(options); + break; + case MCT: + assert(options->memory_size_configured, "invariant"); + case CT: + assert(options->buffer_count_configured, "invariant"); + case THREAD_BUFFER_SIZE: + thread_buffer_size(options); + break; + default: + default_size(options); + } + DEBUG_ONLY(assert_post_condition(options);) + return true; +} diff --git a/src/share/vm/jfr/recorder/access/jfrMemorySizer.hpp b/src/share/vm/jfr/recorder/access/jfrMemorySizer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7b8b6499a88ef34b614a8ada80aa30f06dda0189 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrMemorySizer.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFRMEMORYSIZER_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFRMEMORYSIZER_HPP + +#include "jni.h" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +extern const julong MIN_BUFFER_COUNT; +extern const julong MIN_GLOBAL_BUFFER_SIZE; +extern const julong MIN_MEMORY_SIZE; +extern const julong MIN_THREAD_BUFFER_SIZE; + +struct JfrMemoryOptions { + julong memory_size; + julong global_buffer_size; + julong buffer_count; + julong thread_buffer_size; + bool memory_size_configured; + bool global_buffer_size_configured; + bool buffer_count_configured; + bool thread_buffer_size_configured; +}; + +// +// Encapsulates sizing of memory options +// The options parameter is modified with updated values. +// +class JfrMemorySizer: AllStatic { + public: + static bool adjust_options(JfrMemoryOptions* options); +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFRMEMORYSIZER_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp b/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7cedba7142e4a68f0e3c7bc25b8b59abd6c9839e --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrOptionSet.cpp @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/dcmd/jfrDcmds.hpp" +#include "jfr/recorder/access/jfrMemorySizer.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "jfr/utilities/jfrAllocation.hpp" +#include "jfr/utilities/jfrLog.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/java.hpp" +#include "runtime/thread.inline.hpp" +#include "services/diagnosticArgument.hpp" +#include "services/diagnosticFramework.hpp" +#include "utilities/ostream.hpp" + +struct ObsoleteOption { + const char* name; + const char* message; +}; + +static const ObsoleteOption OBSOLETE_OPTIONS[] = { + {"checkpointbuffersize", ""}, + {"maxsize", "Use -XX:StartFlightRecording=maxsize=... instead."}, + {"maxage", "Use -XX:StartFlightRecording=maxage=... instead."}, + {"settings", "Use -XX:StartFlightRecording=settings=... instead."}, + {"defaultrecording", "Use -XX:StartFlightRecording=disk=false to create an in-memory recording."}, + {"disk", "Use -XX:StartFlightRecording=disk=... instead."}, + {"dumponexit", "Use -XX:StartFlightRecording=dumponexit=... instead."}, + {"dumponexitpath", "Use -XX:StartFlightRecording=filename=... instead."}, + {"loglevel", "Use -Xlog:jfr=... instead."} +}; + +jlong JfrOptionSet::max_chunk_size() { + return _max_chunk_size; +} + +void JfrOptionSet::set_max_chunk_size(jlong value) { + _max_chunk_size = value; +} + +jlong JfrOptionSet::global_buffer_size() { + return _global_buffer_size; +} + +void JfrOptionSet::set_global_buffer_size(jlong value) { + _global_buffer_size = value; +} + +jlong JfrOptionSet::thread_buffer_size() { + return _thread_buffer_size; +} + +void JfrOptionSet::set_thread_buffer_size(jlong value) { + _thread_buffer_size = value; +} + +jlong JfrOptionSet::memory_size() { + return _memory_size; +} + +void JfrOptionSet::set_memory_size(jlong value) { + _memory_size = value; +} + +jlong JfrOptionSet::num_global_buffers() { + return _num_global_buffers; +} + +void JfrOptionSet::set_num_global_buffers(jlong value) { + _num_global_buffers = value; +} + +jint JfrOptionSet::old_object_queue_size() { + return (jint)_old_object_queue_size; +} + +void JfrOptionSet::set_old_object_queue_size(jlong value) { + _old_object_queue_size = value; +} + +u4 JfrOptionSet::stackdepth() { + return _stack_depth; +} + +static const u4 STACK_DEPTH_DEFAULT = 64; +static const u4 MIN_STACK_DEPTH = 1; +static const u4 MAX_STACK_DEPTH = 2048; + +void JfrOptionSet::set_stackdepth(u4 depth) { + if (depth < MIN_STACK_DEPTH) { + _stack_depth = MIN_STACK_DEPTH; + } else if (depth > MAX_STACK_DEPTH) { + _stack_depth = MAX_STACK_DEPTH; + } else { + _stack_depth = depth; + } +} + +bool JfrOptionSet::sample_threads() { + return _sample_threads == JNI_TRUE; +} + +void JfrOptionSet::set_sample_threads(jboolean sample) { + _sample_threads = sample; +} + +bool JfrOptionSet::can_retransform() { + return _retransform == JNI_TRUE; +} + +void JfrOptionSet::set_retransform(jboolean value) { + _retransform = value; +} + +bool JfrOptionSet::sample_protection() { + return _sample_protection == JNI_TRUE; +} + +#ifdef ASSERT +void JfrOptionSet::set_sample_protection(jboolean protection) { + _sample_protection = protection; +} +#endif + +bool JfrOptionSet::compressed_integers() { + // Set this to false for debugging purposes. + return true; +} + +bool JfrOptionSet::allow_retransforms() { +#if INCLUDE_JVMTI + return true; +#else + return false; +#endif +} + +bool JfrOptionSet::allow_event_retransforms() { + return allow_retransforms() && (DumpSharedSpaces || can_retransform()); +} + +// default options for the dcmd parser +const char* const default_repository = NULL; +const char* const default_global_buffer_size = "512k"; +const char* const default_num_global_buffers = "20"; +const char* const default_memory_size = "10m"; +const char* const default_thread_buffer_size = "8k"; +const char* const default_max_chunk_size = "12m"; +const char* const default_sample_threads = "true"; +const char* const default_stack_depth = "64"; +const char* const default_retransform = "true"; +const char* const default_old_object_queue_size = "256"; +DEBUG_ONLY(const char* const default_sample_protection = "false";) + +// statics +static DCmdArgument _dcmd_repository( + "repository", + "Flight recorder disk repository location", + "STRING", + false, + default_repository); + +static DCmdArgument _dcmd_threadbuffersize( + "threadbuffersize", + "Thread buffer size", + "MEMORY SIZE", + false, + default_thread_buffer_size); + +static DCmdArgument _dcmd_memorysize( + "memorysize", + "Size of memory to be used by Flight Recorder", + "MEMORY SIZE", + false, + default_memory_size); + +static DCmdArgument _dcmd_globalbuffersize( + "globalbuffersize", + "Global buffer size", + "MEMORY SIZE", + false, + default_global_buffer_size); + +static DCmdArgument _dcmd_numglobalbuffers( + "numglobalbuffers", + "Number of global buffers", + "JULONG", + false, + default_num_global_buffers); + +static DCmdArgument _dcmd_maxchunksize( + "maxchunksize", + "Maximum size of a single repository disk chunk", + "MEMORY SIZE", + false, + default_max_chunk_size); + +static DCmdArgument _dcmd_old_object_queue_size ( + "old-object-queue-size", + "Maximum number of old objects to track", + "JINT", + false, + default_old_object_queue_size); + +static DCmdArgument _dcmd_sample_threads( + "samplethreads", + "Thread sampling enable / disable (only sampling when event enabled and sampling enabled)", + "BOOLEAN", + false, + default_sample_threads); + +#ifdef ASSERT +static DCmdArgument _dcmd_sample_protection( + "sampleprotection", + "Safeguard for stackwalking while sampling threads (false by default)", + "BOOLEAN", + false, + default_sample_protection); +#endif + +static DCmdArgument _dcmd_stackdepth( + "stackdepth", + "Stack depth for stacktraces (minimum 1, maximum 2048)", + "JULONG", + false, + default_stack_depth); + +static DCmdArgument _dcmd_retransform( + "retransform", + "If event classes should be instrumented using JVMTI (by default true)", + "BOOLEAN", + true, + default_retransform); + +static DCmdParser _parser; + +static void register_parser_options() { + _parser.add_dcmd_option(&_dcmd_repository); + _parser.add_dcmd_option(&_dcmd_threadbuffersize); + _parser.add_dcmd_option(&_dcmd_memorysize); + _parser.add_dcmd_option(&_dcmd_globalbuffersize); + _parser.add_dcmd_option(&_dcmd_numglobalbuffers); + _parser.add_dcmd_option(&_dcmd_maxchunksize); + _parser.add_dcmd_option(&_dcmd_stackdepth); + _parser.add_dcmd_option(&_dcmd_sample_threads); + _parser.add_dcmd_option(&_dcmd_retransform); + _parser.add_dcmd_option(&_dcmd_old_object_queue_size); + DEBUG_ONLY(_parser.add_dcmd_option(&_dcmd_sample_protection);) +} + +static bool parse_flight_recorder_options_internal(TRAPS) { + if (FlightRecorderOptions == NULL) { + return true; + } + const size_t length = strlen((const char*)FlightRecorderOptions); + CmdLine cmdline((const char*)FlightRecorderOptions, length, true); + _parser.parse(&cmdline, ',', THREAD); + if (HAS_PENDING_EXCEPTION) { + for (int index = 0; index < 9; index++) { + ObsoleteOption option = OBSOLETE_OPTIONS[index]; + const char* p = strstr((const char*)FlightRecorderOptions, option.name); + const size_t option_length = strlen(option.name); + if (p != NULL && p[option_length] == '=') { + log_error(arguments) ("-XX:FlightRecorderOptions=%s=... has been removed. %s", option.name, option.message); + return false; + } + } + ResourceMark rm(THREAD); + oop message = java_lang_Throwable::message(PENDING_EXCEPTION); + if (message != NULL) { + const char* msg = java_lang_String::as_utf8_string(message); + log_error(arguments) ("%s", msg); + } + CLEAR_PENDING_EXCEPTION; + return false; + } + return true; +} + +jlong JfrOptionSet::_max_chunk_size = 0; +jlong JfrOptionSet::_global_buffer_size = 0; +jlong JfrOptionSet::_thread_buffer_size = 0; +jlong JfrOptionSet::_memory_size = 0; +jlong JfrOptionSet::_num_global_buffers = 0; +jlong JfrOptionSet::_old_object_queue_size = 0; +u4 JfrOptionSet::_stack_depth = STACK_DEPTH_DEFAULT; +jboolean JfrOptionSet::_sample_threads = JNI_TRUE; +jboolean JfrOptionSet::_retransform = JNI_TRUE; +#ifdef ASSERT +jboolean JfrOptionSet::_sample_protection = JNI_FALSE; +#else +jboolean JfrOptionSet::_sample_protection = JNI_TRUE; +#endif + +bool JfrOptionSet::initialize(Thread* thread) { + register_parser_options(); + if (!parse_flight_recorder_options_internal(thread)) { + return false; + } + if (_dcmd_retransform.is_set()) { + set_retransform(_dcmd_retransform.value()); + } + set_old_object_queue_size(_dcmd_old_object_queue_size.value()); + return adjust_memory_options(); +} + +bool JfrOptionSet::configure(TRAPS) { + if (FlightRecorderOptions == NULL) { + return true; + } + ResourceMark rm(THREAD); + bufferedStream st; + // delegate to DCmd execution + JfrConfigureFlightRecorderDCmd configure(&st, false); + configure._repository_path.set_is_set(_dcmd_repository.is_set()); + char* repo = _dcmd_repository.value(); + if (repo != NULL) { + const size_t len = strlen(repo); + char* repo_copy = JfrCHeapObj::new_array(len + 1); + if (NULL == repo_copy) { + return false; + } + strncpy(repo_copy, repo, len + 1); + configure._repository_path.set_value(repo_copy); + } + + configure._stack_depth.set_is_set(_dcmd_stackdepth.is_set()); + configure._stack_depth.set_value(_dcmd_stackdepth.value()); + + configure._thread_buffer_size.set_is_set(_dcmd_threadbuffersize.is_set()); + configure._thread_buffer_size.set_value(_dcmd_threadbuffersize.value()._size); + + configure._global_buffer_count.set_is_set(_dcmd_numglobalbuffers.is_set()); + configure._global_buffer_count.set_value(_dcmd_numglobalbuffers.value()); + + configure._global_buffer_size.set_is_set(_dcmd_globalbuffersize.is_set()); + configure._global_buffer_size.set_value(_dcmd_globalbuffersize.value()._size); + + configure._max_chunk_size.set_is_set(_dcmd_maxchunksize.is_set()); + configure._max_chunk_size.set_value(_dcmd_maxchunksize.value()._size); + + configure._memory_size.set_is_set(_dcmd_memorysize.is_set()); + configure._memory_size.set_value(_dcmd_memorysize.value()._size); + + configure._sample_threads.set_is_set(_dcmd_sample_threads.is_set()); + configure._sample_threads.set_value(_dcmd_sample_threads.value()); + + configure.execute(DCmd_Source_Internal, THREAD); + + if (HAS_PENDING_EXCEPTION) { + java_lang_Throwable::print(PENDING_EXCEPTION, tty); + CLEAR_PENDING_EXCEPTION; + return false; + } + return true; +} + +template +static julong divide_with_user_unit(Argument& memory_argument, julong value) { + if (memory_argument.value()._size != memory_argument.value()._val) { + switch (memory_argument.value()._multiplier) { + case 'k': case 'K': + return value / K; + case 'm': case 'M': + return value / M; + case 'g': case 'G': + return value / G; + } + } + return value; +} + +template +static void log_lower_than_min_value(Argument& memory_argument, julong min_value) { + if (memory_argument.value()._size != memory_argument.value()._val) { + // has multiplier + log_error(arguments) ( + "This value is lower than the minimum size required " JULONG_FORMAT "%c", + divide_with_user_unit(memory_argument, min_value), + memory_argument.value()._multiplier); + return; + } + log_error(arguments) ( + "This value is lower than the minimum size required " JULONG_FORMAT, + divide_with_user_unit(memory_argument, min_value)); +} + +template +static void log_set_value(Argument& memory_argument) { + if (memory_argument.value()._size != memory_argument.value()._val) { + // has multiplier + log_error(arguments) ( + "Value specified for option \"%s\" is " JULONG_FORMAT "%c", + memory_argument.name(), + memory_argument.value()._val, + memory_argument.value()._multiplier); + return; + } + log_error(arguments) ( + "Value specified for option \"%s\" is " JULONG_FORMAT, + memory_argument.name(), memory_argument.value()._val); +} + +template +static void log_adjustments(MemoryArg& original_memory_size, julong new_memory_size, const char* msg) { + log_trace(arguments) ( + "%s size (original) " JULONG_FORMAT " B (user defined: %s)", + msg, + original_memory_size.value()._size, + original_memory_size.is_set() ? "true" : "false"); + log_trace(arguments) ( + "%s size (adjusted) " JULONG_FORMAT " B (modified: %s)", + msg, + new_memory_size, + original_memory_size.value()._size != new_memory_size ? "true" : "false"); + log_trace(arguments) ( + "%s size (adjustment) %s" JULONG_FORMAT " B", + msg, + new_memory_size < original_memory_size.value()._size ? "-" : "+", + new_memory_size < original_memory_size.value()._size ? + original_memory_size.value()._size - new_memory_size : + new_memory_size - original_memory_size.value()._size); +} + +// All "triangular" options are explicitly set +// check that they are congruent and not causing +// an ambiguous situtation +template +static bool check_for_ambiguity(MemoryArg& memory_size, MemoryArg& global_buffer_size, NumberArg& num_global_buffers) { + assert(memory_size.is_set(), "invariant"); + assert(global_buffer_size.is_set(), "invariant"); + assert(num_global_buffers.is_set(), "invariant"); + const julong calc_size = global_buffer_size.value()._size * (julong)num_global_buffers.value(); + if (calc_size != memory_size.value()._size) { + // ambiguous + log_set_value(global_buffer_size); + log_error(arguments) ( + "Value specified for option \"%s\" is " JLONG_FORMAT, + num_global_buffers.name(), num_global_buffers.value()); + log_set_value(memory_size); + log_error(arguments) ( + "These values are causing an ambiguity when trying to determine how much memory to use"); + log_error(arguments) ("\"%s\" * \"%s\" do not equal \"%s\"", + global_buffer_size.name(), + num_global_buffers.name(), + memory_size.name()); + log_error(arguments) ( + "Try to remove one of the involved options or make sure they are unambigous"); + return false; + } + return true; +} + +template +static bool ensure_minimum_count(Argument& buffer_count_argument, jlong min_count) { + if (buffer_count_argument.value() < min_count) { + log_error(arguments) ( + "Value specified for option \"%s\" is " JLONG_FORMAT, + buffer_count_argument.name(), buffer_count_argument.value()); + log_error(arguments) ( + "This value is lower than the minimum required number " JLONG_FORMAT, + min_count); + return false; + } + return true; +} + +// global buffer size and num global buffers specified +// ensure that particular combination to be ihigher than minimum memory size +template +static bool ensure_calculated_gteq(MemoryArg& global_buffer_size, NumberArg& num_global_buffers, julong min_value) { + assert(global_buffer_size.is_set(), "invariant"); + assert(num_global_buffers.is_set(), "invariant"); + const julong calc_size = global_buffer_size.value()._size * (julong)num_global_buffers.value(); + if (calc_size < min_value) { + log_set_value(global_buffer_size); + log_error(arguments) ( + "Value specified for option \"%s\" is " JLONG_FORMAT, + num_global_buffers.name(), num_global_buffers.value()); + log_error(arguments) ("\"%s\" * \"%s\" (" JULONG_FORMAT + ") is lower than minimum memory size required " JULONG_FORMAT, + global_buffer_size.name(), + num_global_buffers.name(), + calc_size, + min_value); + return false; + } + return true; +} + +template +static bool ensure_first_gteq_second(Argument& first_argument, Argument& second_argument) { + if (second_argument.value()._size > first_argument.value()._size) { + log_set_value(first_argument); + log_set_value(second_argument); + log_error(arguments) ( + "The value for option \"%s\" should not be larger than the value specified for option \"%s\"", + second_argument.name(), first_argument.name()); + return false; + } + return true; +} + +static bool valid_memory_relations(const JfrMemoryOptions& options) { + if (options.global_buffer_size_configured) { + if (options.memory_size_configured) { + if (!ensure_first_gteq_second(_dcmd_memorysize, _dcmd_globalbuffersize)) { + return false; + } + } + if (options.thread_buffer_size_configured) { + if (!ensure_first_gteq_second(_dcmd_globalbuffersize, _dcmd_threadbuffersize)) { + return false; + } + } + if (options.buffer_count_configured) { + if (!ensure_calculated_gteq(_dcmd_globalbuffersize, _dcmd_numglobalbuffers, MIN_MEMORY_SIZE)) { + return false; + } + } + } + return true; +} + +static void post_process_adjusted_memory_options(const JfrMemoryOptions& options) { + assert(options.memory_size >= MIN_MEMORY_SIZE, "invariant"); + assert(options.global_buffer_size >= MIN_GLOBAL_BUFFER_SIZE, "invariant"); + assert(options.buffer_count >= MIN_BUFFER_COUNT, "invariant"); + assert(options.thread_buffer_size >= MIN_THREAD_BUFFER_SIZE, "invariant"); + log_adjustments(_dcmd_memorysize, options.memory_size, "Memory"); + log_adjustments(_dcmd_globalbuffersize, options.global_buffer_size, "Global buffer"); + log_adjustments(_dcmd_threadbuffersize, options.thread_buffer_size, "Thread local buffer"); + log_trace(arguments) ("Number of global buffers (original) " JLONG_FORMAT " (user defined: %s)", + _dcmd_numglobalbuffers.value(), + _dcmd_numglobalbuffers.is_set() ? "true" : "false"); + log_trace(arguments) ( "Number of global buffers (adjusted) " JULONG_FORMAT " (modified: %s)", + options.buffer_count, + _dcmd_numglobalbuffers.value() != (jlong)options.buffer_count ? "true" : "false"); + log_trace(arguments) ("Number of global buffers (adjustment) %s" JLONG_FORMAT, + (jlong)options.buffer_count < _dcmd_numglobalbuffers.value() ? "" : "+", + (jlong)options.buffer_count - _dcmd_numglobalbuffers.value()); + + MemorySizeArgument adjusted_memory_size; + adjusted_memory_size._val = divide_with_user_unit(_dcmd_memorysize, options.memory_size); + adjusted_memory_size._multiplier = _dcmd_memorysize.value()._multiplier; + adjusted_memory_size._size = options.memory_size; + + MemorySizeArgument adjusted_global_buffer_size; + adjusted_global_buffer_size._val = divide_with_user_unit(_dcmd_globalbuffersize, options.global_buffer_size); + adjusted_global_buffer_size._multiplier = _dcmd_globalbuffersize.value()._multiplier; + adjusted_global_buffer_size._size = options.global_buffer_size; + + MemorySizeArgument adjusted_thread_buffer_size; + adjusted_thread_buffer_size._val = divide_with_user_unit(_dcmd_threadbuffersize, options.thread_buffer_size); + adjusted_thread_buffer_size._multiplier = _dcmd_threadbuffersize.value()._multiplier; + adjusted_thread_buffer_size._size = options.thread_buffer_size; + + // store back to dcmd + _dcmd_memorysize.set_value(adjusted_memory_size); + _dcmd_memorysize.set_is_set(true); + _dcmd_globalbuffersize.set_value(adjusted_global_buffer_size); + _dcmd_globalbuffersize.set_is_set(true); + _dcmd_numglobalbuffers.set_value((jlong)options.buffer_count); + _dcmd_numglobalbuffers.set_is_set(true); + _dcmd_threadbuffersize.set_value(adjusted_thread_buffer_size); + _dcmd_threadbuffersize.set_is_set(true); +} + +static void initialize_memory_options_from_dcmd(JfrMemoryOptions& options) { + options.memory_size = _dcmd_memorysize.value()._size; + options.global_buffer_size = MAX2(_dcmd_globalbuffersize.value()._size, (julong)os::vm_page_size()); + options.buffer_count = (julong)_dcmd_numglobalbuffers.value(); + options.thread_buffer_size = MAX2(_dcmd_threadbuffersize.value()._size, (julong)os::vm_page_size()); + // determine which options have been explicitly set + options.memory_size_configured = _dcmd_memorysize.is_set(); + options.global_buffer_size_configured = _dcmd_globalbuffersize.is_set(); + options.buffer_count_configured = _dcmd_numglobalbuffers.is_set(); + options.thread_buffer_size_configured = _dcmd_threadbuffersize.is_set(); + assert(options.memory_size >= MIN_MEMORY_SIZE, "invariant"); + assert(options.global_buffer_size >= MIN_GLOBAL_BUFFER_SIZE, "invariant"); + assert(options.buffer_count >= MIN_BUFFER_COUNT, "invariant"); + assert(options.thread_buffer_size >= MIN_THREAD_BUFFER_SIZE, "invariant"); +} + +template +static bool ensure_gteq(Argument& memory_argument, const jlong value) { + if ((jlong)memory_argument.value()._size < value) { + log_set_value(memory_argument); + log_lower_than_min_value(memory_argument, value); + return false; + } + return true; +} + +static bool ensure_valid_minimum_sizes() { + // ensure valid minimum memory sizes + if (_dcmd_memorysize.is_set()) { + if (!ensure_gteq(_dcmd_memorysize, MIN_MEMORY_SIZE)) { + return false; + } + } + if (_dcmd_globalbuffersize.is_set()) { + if (!ensure_gteq(_dcmd_globalbuffersize, MIN_GLOBAL_BUFFER_SIZE)) { + return false; + } + } + if (_dcmd_numglobalbuffers.is_set()) { + if (!ensure_minimum_count(_dcmd_numglobalbuffers, MIN_BUFFER_COUNT)) { + return false; + } + } + if (_dcmd_threadbuffersize.is_set()) { + if (!ensure_gteq(_dcmd_threadbuffersize, MIN_THREAD_BUFFER_SIZE)) { + return false; + } + } + return true; +} + +/** + * Starting with the initial set of memory values from the user, + * sanitize, enforce min/max rules and adjust to a set of consistent options. + * + * Adjusted memory sizes will be page aligned. + */ +bool JfrOptionSet::adjust_memory_options() { + if (!ensure_valid_minimum_sizes()) { + return false; + } + JfrMemoryOptions options; + initialize_memory_options_from_dcmd(options); + if (!valid_memory_relations(options)) { + return false; + } + if (!JfrMemorySizer::adjust_options(&options)) { + if (!check_for_ambiguity(_dcmd_memorysize, _dcmd_globalbuffersize, _dcmd_numglobalbuffers)) { + return false; + } + } + post_process_adjusted_memory_options(options); + return true; +} + +/* + +to support starting multiple startup recordings + +static const char* start_flight_recording_option_original = NULL; +static const char* flight_recorder_option_original = NULL; + +static void copy_option_string(const JavaVMOption* option, const char** addr) { + assert(option != NULL, "invariant"); + assert(option->optionString != NULL, "invariant"); + const size_t length = strlen(option->optionString); + *addr = JfrCHeapObj::new_array(length + 1); + assert(*addr != NULL, "invarinat"); + strncpy((char*)*addr, option->optionString, length + 1); + assert(strncmp(*addr, option->optionString, length + 1) == 0, "invariant"); +} + +copy_option_string(*option, &start_flight_recording_option_original); +copy_option_string(*option, &flight_recorder_option_original); +*/ + +bool JfrOptionSet::parse_start_flight_recording(const JavaVMOption** option, char* tail) { + assert(option != NULL, "invariant"); + assert(tail != NULL, "invariant"); + assert((*option)->optionString != NULL, "invariant"); + assert(strncmp((*option)->optionString, "-XX:StartFlightRecording", 24) == 0, "invariant"); + if (*tail == '\0') { + // Add dummy dumponexit=false so -XX:StartFlightRecording can be used without a parameter. + // The existing option->optionString points to stack memory so no need to deallocate. + const_cast(*option)->optionString = (char*)"-XX:StartFlightRecording=dumponexit=false"; + } else { + *tail = '='; // ":" -> "=" + } + return false; +} + +bool JfrOptionSet::parse_flight_recorder_options(const JavaVMOption** option, char* tail) { + assert(option != NULL, "invariant"); + assert(tail != NULL, "invariant"); + assert((*option)->optionString != NULL, "invariant"); + assert(strncmp((*option)->optionString, "-XX:FlightRecorderOptions", 25) == 0, "invariant"); + if (tail != NULL) { + *tail = '='; // ":" -> "=" + } + return false; +} + diff --git a/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp b/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dc48eba6905163a5194ae9a1db91b2c2e595bfbf --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrOptionSet.hpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFROPTIONSET_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFROPTIONSET_HPP + +#include "jni.h" +#include "memory/allocation.hpp" +#include "utilities/exceptions.hpp" + +// +// Command-line options and defaults +// +class JfrOptionSet : public AllStatic { + friend class JfrRecorder; + private: + static jlong _max_chunk_size; + static jlong _global_buffer_size; + static jlong _thread_buffer_size; + static jlong _memory_size; + static jlong _num_global_buffers; + static jlong _old_object_queue_size; + static u4 _stack_depth; + static jboolean _sample_threads; + static jboolean _retransform; + static jboolean _sample_protection; + + static bool initialize(Thread* thread); + static bool configure(TRAPS); + static bool adjust_memory_options(); + + public: + static jlong max_chunk_size(); + static void set_max_chunk_size(jlong value); + static jlong global_buffer_size(); + static void set_global_buffer_size(jlong value); + static jlong thread_buffer_size(); + static void set_thread_buffer_size(jlong value); + static jlong memory_size(); + static void set_memory_size(jlong value); + static jlong num_global_buffers(); + static void set_num_global_buffers(jlong value); + static jint old_object_queue_size(); + static void set_old_object_queue_size(jlong value); + static u4 stackdepth(); + static void set_stackdepth(u4 depth); + static bool sample_threads(); + static void set_sample_threads(jboolean sample); + static bool can_retransform(); + static void set_retransform(jboolean value); + static bool compressed_integers(); + static bool allow_retransforms(); + static bool allow_event_retransforms(); + static bool sample_protection(); + DEBUG_ONLY(static void set_sample_protection(jboolean protection);) + + static bool parse_start_flight_recording(const JavaVMOption** option, char* tail); + static bool parse_flight_recorder_options(const JavaVMOption** option, char* tail); + +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFROPTIONSET_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cc527a6cc3d4d260b04276e07d223b8089b284a3 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/jfrEventSetting.inline.hpp" +#include "jfr/recorder/access/jfrStackTraceMark.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "runtime/thread.inline.hpp" + +JfrStackTraceMark::JfrStackTraceMark() : _t(Thread::current()), _previous_id(0), _previous_hash(0) { + JfrThreadData* const trace_data = _t->trace_data(); + if (trace_data->has_cached_stack_trace()) { + _previous_id = trace_data->cached_stack_trace_id(); + _previous_hash = trace_data->cached_stack_trace_hash(); + } + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(Thread::current())); +} + +JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previous_hash(0) { + JfrThreadData* const trace_data = _t->trace_data(); + if (trace_data->has_cached_stack_trace()) { + _previous_id = trace_data->cached_stack_trace_id(); + _previous_hash = trace_data->cached_stack_trace_hash(); + } + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(t)); +} + +JfrStackTraceMark::JfrStackTraceMark(TraceEventId eventId) : _t(NULL), _previous_id(0), _previous_hash(0) { + if (JfrEventSetting::has_stacktrace(eventId)) { + _t = Thread::current(); + JfrThreadData* const trace_data = _t->trace_data(); + if (trace_data->has_cached_stack_trace()) { + _previous_id = trace_data->cached_stack_trace_id(); + _previous_hash = trace_data->cached_stack_trace_hash(); + } + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t)); + } +} + +JfrStackTraceMark::JfrStackTraceMark(TraceEventId eventId, Thread* t) : _t(NULL), _previous_id(0), _previous_hash(0) { + if (JfrEventSetting::has_stacktrace(eventId)) { + _t = t; + JfrThreadData* const trace_data = _t->trace_data(); + if (trace_data->has_cached_stack_trace()) { + _previous_id = trace_data->cached_stack_trace_id(); + _previous_hash = trace_data->cached_stack_trace_hash(); + } + trace_data->set_cached_stack_trace_id(JfrStackTraceRepository::record(_t)); + } +} + +JfrStackTraceMark::~JfrStackTraceMark() { + if (_previous_id != 0) { + _t->trace_data()->set_cached_stack_trace_id(_previous_id, _previous_hash); + } else { + if (_t != NULL) { + _t->trace_data()->clear_cached_stack_trace(); + } + } +} diff --git a/src/share/vm/jfr/recorder/access/jfrStackTraceMark.hpp b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50ebaa0f779aa840908d6f2b4debdfc5552811cd --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrStackTraceMark.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFRSTACKTRACEMARK_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFRSTACKTRACEMARK_HPP + +#include "memory/allocation.hpp" +#include "jfr/utilities/jfrTypes.hpp" +#include "tracefiles/traceEventIds.hpp" + +class Thread; + +class JfrStackTraceMark { + private: + Thread* _t; + traceid _previous_id; + unsigned int _previous_hash; + public: + JfrStackTraceMark(); + JfrStackTraceMark(Thread* t); + JfrStackTraceMark(TraceEventId eventId); + JfrStackTraceMark(TraceEventId eventId, Thread* t); + ~JfrStackTraceMark(); +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFRSTACKTRACEMARK_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrThreadData.cpp b/src/share/vm/jfr/recorder/access/jfrThreadData.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eee6463d3bab0c367b71baf30d273c6f4ac69e6f --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrThreadData.cpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/periodic/jfrThreadCPULoadEvent.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/recorder/access/jfrbackend.hpp" +#include "jfr/recorder/access/jfrOptionSet.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/storage/jfrStorage.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.inline.hpp" + +/* This data structure is per thread and only accessed by the thread itself, no locking required */ +JfrThreadData::JfrThreadData() : + _java_event_writer(NULL), + _java_buffer(NULL), + _native_buffer(NULL), + _shelved_buffer(NULL), + _stackframes(NULL), + _trace_id(JfrTraceId::assign_thread_id()), + _thread_cp(), + _data_lost(0), + _stack_trace_id(max_julong), + _user_time(0), + _cpu_time(0), + _wallclock_time(os::javaTimeNanos()), + _stack_trace_hash(0), + _stackdepth(0), + _entering_suspend_flag(0) {} + +u8 JfrThreadData::add_data_lost(u8 value) { + _data_lost += value; + return _data_lost; +} + +bool JfrThreadData::has_thread_checkpoint() const { + return _thread_cp.valid(); +} + +void JfrThreadData::set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) { + assert(!_thread_cp.valid(), "invariant"); + _thread_cp = ref; +} + +const JfrCheckpointBlobHandle& JfrThreadData::thread_checkpoint() const { + return _thread_cp; +} + +void JfrThreadData::on_exit(JavaThread* thread) { + JfrCheckpointManager::write_thread_checkpoint(thread); + JfrThreadCPULoadEvent::send_event_for_thread(thread); +} + +void JfrThreadData::on_destruct(Thread* thread) { + JfrThreadData* const thread_data = thread->trace_data(); + if (thread_data->has_native_buffer()) { + release(thread_data->native_buffer(), thread); + } + if (thread_data->has_java_buffer()) { + release(thread_data->java_buffer(), thread); + } + assert(thread_data->shelved_buffer() == NULL, "invariant"); + if (thread->trace_data()->has_java_event_writer()) { + JfrJavaSupport::destroy_global_jni_handle(thread_data->java_event_writer()); + } + destroy_stackframes(thread); +} + +JfrBuffer* JfrThreadData::acquire(Thread* thread, size_t size) { + return JfrStorage::acquire_thread_local(thread, size); +} + +void JfrThreadData::release(JfrBuffer* buffer, Thread* thread) { + assert(buffer != NULL, "invariant"); + JfrStorage::release_thread_local(buffer, thread); +} + +JfrBuffer* JfrThreadData::install_native_buffer() const { + assert(!has_native_buffer(), "invariant"); + _native_buffer = acquire(Thread::current()); + return _native_buffer; +} + +JfrBuffer* JfrThreadData::install_java_buffer() const { + assert(!has_java_buffer(), "invariant"); + assert(!has_java_event_writer(), "invariant"); + _java_buffer = acquire(Thread::current()); + return _java_buffer; +} + +JfrStackFrame* JfrThreadData::install_stackframes() const { + assert(_stackframes == NULL, "invariant"); + _stackdepth = (u4)JfrOptionSet::stackdepth(); + guarantee(_stackdepth > 0, "Stackdepth must be > 0"); + _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing); + return _stackframes; +} + +void JfrThreadData::destroy_stackframes(Thread* thread) { + assert(thread != NULL, "invariant"); + JfrStackFrame* frames = thread->trace_data()->stackframes(); + if (frames != NULL) { + FREE_C_HEAP_ARRAY(JfrStackFrame, frames, mtTracing); + thread->trace_data()->set_stackframes(NULL); + } +} diff --git a/src/share/vm/jfr/recorder/access/jfrThreadData.hpp b/src/share/vm/jfr/recorder/access/jfrThreadData.hpp new file mode 100644 index 0000000000000000000000000000000000000000..51bcdf6f4b5711604d159f00ce01c34c98f3a5f9 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrThreadData.hpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_JFRTHREADDATA_HPP +#define SHARE_VM_JFR_JFRTHREADDATA_HPP + +#include "jni.h" +#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp" +#include "jfr/utilities/jfrTypes.hpp" +#include "trace/traceMacros.hpp" + +class JavaThread; +class JfrBuffer; +class JfrStackFrame; + +class JfrThreadData { + private: + jobject _java_event_writer; + mutable JfrBuffer* _java_buffer; + mutable JfrBuffer* _native_buffer; + JfrBuffer* _shelved_buffer; + mutable JfrStackFrame* _stackframes; + TRACE_DEFINE_TRACE_ID_FIELD; + JfrCheckpointBlobHandle _thread_cp; + u8 _data_lost; + traceid _stack_trace_id; + jlong _user_time; + jlong _cpu_time; + jlong _wallclock_time; + unsigned int _stack_trace_hash; + mutable u4 _stackdepth; + volatile jint _entering_suspend_flag; + + JfrBuffer* install_native_buffer() const; + JfrBuffer* install_java_buffer() const; + JfrStackFrame* install_stackframes() const; + + public: + JfrThreadData(); + + JfrBuffer* native_buffer() const { + return _native_buffer != NULL ? _native_buffer : install_native_buffer(); + } + + bool has_native_buffer() const { + return _native_buffer != NULL; + } + + void set_native_buffer(JfrBuffer* buffer) { + _native_buffer = buffer; + } + + JfrBuffer* java_buffer() const { + return _java_buffer != NULL ? _java_buffer : install_java_buffer(); + } + + bool has_java_buffer() const { + return _java_buffer != NULL; + } + + void set_java_buffer(JfrBuffer* buffer) { + _java_buffer = buffer; + } + + JfrBuffer* shelved_buffer() const { + return _shelved_buffer; + } + + void shelve_buffer(JfrBuffer* buffer) { + _shelved_buffer = buffer; + } + + bool has_java_event_writer() const { + return _java_event_writer != NULL; + } + + jobject java_event_writer() { + return _java_event_writer; + } + + void set_java_event_writer(jobject java_event_writer) { + _java_event_writer = java_event_writer; + } + + JfrStackFrame* stackframes() const { + return _stackframes != NULL ? _stackframes : install_stackframes(); + } + + void set_stackframes(JfrStackFrame* frames) { + _stackframes = frames; + } + + u4 stackdepth() const { + return _stackdepth; + } + + void set_stackdepth(u4 depth) { + _stackdepth = depth; + } + + traceid thread_id() const { + return _trace_id; + } + + void set_thread_id(traceid thread_id) { + _trace_id = thread_id; + } + + void set_cached_stack_trace_id(traceid id, unsigned int hash = 0) { + _stack_trace_id = id; + _stack_trace_hash = hash; + } + + bool has_cached_stack_trace() const { + return _stack_trace_id != max_julong; + } + + void clear_cached_stack_trace() { + _stack_trace_id = max_julong; + _stack_trace_hash = 0; + } + + traceid cached_stack_trace_id() const { + return _stack_trace_id; + } + + unsigned int cached_stack_trace_hash() const { + return _stack_trace_hash; + } + + + void set_trace_block() { + _entering_suspend_flag = 1; + } + + void clear_trace_block() { + _entering_suspend_flag = 0; + } + + bool is_trace_block() const { + return _entering_suspend_flag != 0; + } + + u8 data_lost() const { + return _data_lost; + } + + u8 add_data_lost(u8 value); + + jlong get_user_time() const { + return _user_time; + } + + void set_user_time(jlong user_time) { + _user_time = user_time; + } + + jlong get_cpu_time() const { + return _cpu_time; + } + + void set_cpu_time(jlong cpu_time) { + _cpu_time = cpu_time; + } + + jlong get_wallclock_time() const { + return _wallclock_time; + } + + void set_wallclock_time(jlong wallclock_time) { + _wallclock_time = wallclock_time; + } + + bool has_thread_checkpoint() const; + void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle); + const JfrCheckpointBlobHandle& thread_checkpoint() const; + + static JfrBuffer* acquire(Thread* t, size_t size = 0); + static void release(JfrBuffer* buffer, Thread* t); + static void destroy_stackframes(Thread* t); + static void on_exit(JavaThread* t); + static void on_destruct(Thread* t); + + TRACE_DEFINE_TRACE_ID_METHODS; + // Code generation + TRACE_DEFINE_THREAD_ID_OFFSET; + TRACE_DEFINE_THREAD_ID_SIZE; + TRACE_DEFINE_THREAD_DATA_WRITER_OFFSET; +}; + +#endif // SHARE_VM_JFR_JFRTHREADDATA_HPP diff --git a/src/share/vm/jfr/recorder/access/jfrbackend.cpp b/src/share/vm/jfr/recorder/access/jfrbackend.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4d470a89c8858e8077fe7d654aa7f060b51be23b --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrbackend.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/jfr.hpp" +#include "jfr/periodic/sampling/jfrThreadSampler.hpp" +#include "jfr/recorder/access/jfrbackend.hpp" +#include "jfr/recorder/jfrEventSetting.inline.hpp" + +void JfrBackend::on_javathread_exit(JavaThread *thread) { + Jfr::on_thread_exit(thread); +} + +void JfrBackend::on_thread_destruct(Thread* thread) { + // vm thread shouldn't call Jfr::on_thread_destruct(), ref jdk11. + if (thread == VMThread::vm_thread()) { + return; + } + Jfr::on_thread_destruct(thread); +} + +void JfrBackend::on_javathread_suspend(JavaThread* thread) { + JfrThreadSampler::on_javathread_suspend(thread); +} + +bool JfrBackend::enabled() { + return Jfr::is_enabled(); +} + +bool JfrBackend::is_event_enabled(TraceEventId event_id) { + return JfrEventSetting::is_enabled(event_id); +} + +bool JfrBackend::is_stacktrace_enabled(TraceEventId event_id) { + return JfrEventSetting::has_stacktrace(event_id); +} + +jlong JfrBackend::threshold(TraceEventId event_id) { + return JfrEventSetting::threshold(event_id); +} + +void JfrBackend::on_unloading_classes() { + Jfr::on_unloading_classes(); +} + +JfrTraceTime JfrBackend::time() { + return JfrTraceTime::now(); +} diff --git a/src/share/vm/jfr/recorder/access/jfrbackend.hpp b/src/share/vm/jfr/recorder/access/jfrbackend.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f6eff3d479f58aad81e24fdff0987b73f42ad9a0 --- /dev/null +++ b/src/share/vm/jfr/recorder/access/jfrbackend.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_JFR_RECORDER_ACCESS_JFRBACKEND_HPP +#define SHARE_VM_JFR_RECORDER_ACCESS_JFRBACKEND_HPP + +#include "memory/allocation.hpp" +#include "jfr/utilities/jfrTraceTime.hpp" +#include "tracefiles/traceEventIds.hpp" + +// trace macro targets + +class JfrBackend { + public: + static void on_javathread_exit(JavaThread *thread); + static void on_thread_destruct(Thread* thread); + static void on_javathread_suspend(JavaThread* thread); + static void on_unloading_classes(); + static bool enabled(); + static bool is_event_enabled(TraceEventId event_id); + static bool is_stacktrace_enabled(TraceEventId event_id); + static jlong threshold(TraceEventId event_id); + static JfrTraceTime time(); +}; + +#endif // SHARE_VM_JFR_RECORDER_ACCESS_JFRBACKEND_HPP diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.cpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2343c957a9d84a0e18094b293522e2b1f3079354 --- /dev/null +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.cpp @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "code/codeBlob.hpp" +#include "code/codeCache.hpp" +#include "gc_interface/gcCause.hpp" +#include "gc_interface/gcName.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcWhen.hpp" +#include "jfr/recorder/access/jfrThreadData.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" +#include "jfr/recorder/checkpoint/constant/jfrConstant.hpp" +#include "jfr/recorder/jfrRecorder.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSet.hpp" +#include "jfr/recorder/checkpoint/constant/jfrThreadGroup.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/writers/jfrJavaEventWriter.hpp" +#include "memory/metaspaceGCThresholdUpdater.hpp" +#include "memory/referenceType.hpp" +#include "memory/universe.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/osThread.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/synchronizer.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/vm_operations.hpp" +#include "trace/traceVM.hpp" + +#ifdef COMPILER2 +#include "opto/compile.hpp" +#include "opto/node.hpp" +#endif +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" +#endif + +// implementation for the static registration function exposed in the api +bool JfrConstantSerializer::register_serializer(JfrConstantTypeId id, bool require_safepoint, bool permit_cache, JfrConstantSerializer* cs) { + assert(cs != NULL, "invariant"); + return JfrCheckpointManager::register_serializer(id, require_safepoint, permit_cache, cs); +} + +class JfrCheckpointThreadCountClosure : public ThreadClosure { +private: + u4 _total_threads; +public: + JfrCheckpointThreadCountClosure() : _total_threads(0) {} + u4 total_threads() { return _total_threads; } + void do_thread(Thread *t) { _total_threads++; } +}; + +// Requires a ResourceMark for get_thread_name/as_utf8 +class JfrCheckpointThreadClosure : public ThreadClosure { + private: + JfrCheckpointWriter& _writer; + Thread* _curthread; + public: + JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer), _curthread(Thread::current()) {} + void do_thread(Thread* t); +}; + +// Requires a ResourceMark for get_thread_name/as_utf8 +void JfrCheckpointThreadClosure::do_thread(Thread* t) { + assert(t != NULL, "invariant"); + assert_locked_or_safepoint(Threads_lock); + _writer.write_key(t->trace_data()->thread_id()); + _writer.write(t->name()); + const OSThread* const os_thread = t->osthread(); + _writer.write(os_thread != NULL ? os_thread->thread_id() : (u8)0); + if (t->is_Java_thread()) { + JavaThread* const jt = (JavaThread*)t; + _writer.write(jt->name()); + _writer.write(java_lang_Thread::thread_id(jt->threadObj())); + _writer.write(JfrThreadGroup::thread_group_id(jt, _curthread)); + // since we are iterating threads during a safepoint, also issue notification + JfrJavaEventWriter::notify(jt); + return; + } + _writer.write((const char*)NULL); // java name + _writer.write((traceid)0); // java thread id + _writer.write((traceid)0); // java thread group +} + +void JfrThreadConstantSet::write_constants(JfrCheckpointWriter& writer) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + JfrCheckpointThreadCountClosure tcc; + Threads::threads_do(&tcc); + const u4 total_threads = tcc.total_threads(); + // THREADS + writer.write_number_of_constants(total_threads); + JfrCheckpointThreadClosure tc(writer); + Threads::threads_do(&tc); +} + +void JfrThreadGroupConstant::write_constants(JfrCheckpointWriter& writer) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + JfrThreadGroup::write(writer); +} + +static const char* flag_value_origin_to_string(Flag::Flags origin) { + switch (origin) { + case Flag::DEFAULT: return "Default"; + case Flag::COMMAND_LINE: return "Command line"; + case Flag::ENVIRON_VAR: return "Environment variable"; + case Flag::CONFIG_FILE: return "Config file"; + case Flag::MANAGEMENT: return "Management"; + case Flag::ERGONOMIC: return "Ergonomic"; + case Flag::ATTACH_ON_DEMAND: return "Attach on demand"; + case Flag::INTERNAL: return "Internal"; + default: ShouldNotReachHere(); return ""; + } +} + +void FlagValueOriginConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = Flag::LAST_VALUE_ORIGIN + 1; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(flag_value_origin_to_string((Flag::Flags)i)); + } +} + +void GCCauseConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = GCCause::_last_gc_cause; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(GCCause::to_string((GCCause::Cause)i)); + } +} + +void GCNameConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = GCNameEndSentinel; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(GCNameHelper::to_string((GCName)i)); + } +} + +void GCWhenConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = GCWhen::GCWhenEndSentinel; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(GCWhen::to_string((GCWhen::Type)i)); + } +} + +void G1HeapRegionTypeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i)); + } +} + +void GCThresholdUpdaterConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = MetaspaceGCThresholdUpdater::Last; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(MetaspaceGCThresholdUpdater::to_string((MetaspaceGCThresholdUpdater::Type)i)); + } +} + +void MetadataTypeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = Metaspace::MetadataTypeCount; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(Metaspace::metadata_type_name((Metaspace::MetadataType)i)); + } +} + +void MetaspaceObjTypeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = MetaspaceObj::_number_of_types; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(MetaspaceObj::type_name((MetaspaceObj::Type)i)); + } +} + +void G1YCTypeConstant::write_constants(JfrCheckpointWriter& writer) { +#if INCLUDE_ALL_GCS + static const u4 nof_entries = G1YCTypeEndSentinel; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(G1YCTypeHelper::to_string((G1YCType)i)); + } +#endif +} + +static const char* reference_type_to_string(ReferenceType rt) { + switch (rt) { + case REF_NONE: return "None reference"; + case REF_OTHER: return "Other reference"; + case REF_SOFT: return "Soft reference"; + case REF_WEAK: return "Weak reference"; + case REF_FINAL: return "Final reference"; + case REF_PHANTOM: return "Phantom reference"; + default: + ShouldNotReachHere(); + return NULL; + } +} + +void ReferenceTypeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = REF_PHANTOM + 1; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(reference_type_to_string((ReferenceType)i)); + } +} + +void NarrowOopModeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = Universe::HeapBasedNarrowOop + 1; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(Universe::narrow_oop_mode_to_string((Universe::NARROW_OOP_MODE)i)); + } +} + +void CompilerPhaseTypeConstant::write_constants(JfrCheckpointWriter& writer) { +#ifdef COMPILER2 + static const u4 nof_entries = PHASE_NUM_TYPES; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(CompilerPhaseTypeHelper::to_string((CompilerPhaseType)i)); + } +#endif +} + +void CodeBlobTypeConstant::write_constants(JfrCheckpointWriter& writer) { + // only one code blob now in ajdk8 + static const u4 nof_entries = CodeBlobType::NumTypes; + writer.write_number_of_constants(nof_entries); + writer.write_key((u4)CodeBlobType::All); + writer.write("CodeCache"); +}; + +void VMOperationTypeConstant::write_constants(JfrCheckpointWriter& writer) { + static const u4 nof_entries = VM_Operation::VMOp_Terminating; + writer.write_number_of_constants(nof_entries); + for (u4 i = 0; i < nof_entries; ++i) { + writer.write_key(i); + writer.write(VM_Operation::name(VM_Operation::VMOp_Type(i))); + } +} + +class ConstantTagSet { + private: + bool _class_unload; + public: + explicit ConstantTagSet(bool class_unload) : _class_unload(class_unload) {} + void write(JfrCheckpointWriter& writer, JfrCheckpointWriter* leakp_writer) { + JfrTagSet::write(&writer, leakp_writer, _class_unload); + } +}; + +void ClassUnloadConstantSet::write_constants(JfrCheckpointWriter& writer) { + ConstantTagSet tag_set(true); + if (LeakProfiler::is_running()) { + JfrCheckpointWriter leakp_writer(false, true, Thread::current()); + tag_set.write(writer, &leakp_writer); + ObjectSampleCheckpoint::install(leakp_writer, true, true); + return; + } + tag_set.write(writer, NULL); +}; + +void ConstantSet::write_constants(JfrCheckpointWriter& writer) { + ConstantTagSet tag_set(false); + if (LeakProfiler::is_suspended()) { + JfrCheckpointWriter leakp_writer(false, true, Thread::current()); + tag_set.write(writer, &leakp_writer); + ObjectSampleCheckpoint::install(leakp_writer, false, true); + return; + } + tag_set.write(writer, NULL); +}; + +void ThreadStateConstant::write_constants(JfrCheckpointWriter& writer) { + // Thread states + { + enum { +#define _dummy_enum(n, v) _dummy_##n, + MAKE_JAVATHREAD_STATES(_dummy_enum) + NUM_JAVATHREAD_STATES + }; + + writer.write_number_of_constants(NUM_JAVATHREAD_STATES); +#define _write_ts(n, v) writer.write(v); writer.write(#n); + MAKE_JAVATHREAD_STATES(_write_ts) + } +} + +void JfrThreadConstant::write_constants(JfrCheckpointWriter& writer) { + assert(_thread != NULL, "invariant"); + assert(_thread == Thread::current(), "invariant"); + assert(_thread->is_Java_thread(), "invariant"); + assert(!_thread->trace_data()->has_thread_checkpoint(), "invariant"); + ResourceMark rm(_thread); + const oop threadObj = _thread->threadObj(); + assert(threadObj != NULL, "invariant"); + const u8 java_lang_thread_id = java_lang_Thread::thread_id(threadObj); + const char* const thread_name = _thread->name(); + const traceid thread_group_id = JfrThreadGroup::thread_group_id(_thread); + writer.write_number_of_constants(1); + writer.write_key(_thread->trace_data()->thread_id()); + writer.write(thread_name); + writer.write((u8)_thread->osthread()->thread_id()); + writer.write(thread_name); + writer.write(java_lang_thread_id); + writer.write(thread_group_id); + JfrThreadGroup::write(&writer, thread_group_id); +} diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.hpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dda8b3abbf9adac960ec6cf3b5a3acec5ecc6321 --- /dev/null +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstant.hpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANT_HPP +#define SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANT_HPP + +#include "jfr/metadata/jfrConstantSerializer.hpp" + +class JfrThreadConstantSet : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class JfrThreadGroupConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class ClassUnloadConstantSet : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class FlagValueOriginConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class GCCauseConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class GCNameConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class GCWhenConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class G1HeapRegionTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class GCThresholdUpdaterConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class MetadataTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class MetaspaceObjTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class G1YCTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class ReferenceTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class NarrowOopModeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class CompilerPhaseTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class CodeBlobTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class VMOperationTypeConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class ConstantSet : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class ThreadStateConstant : public JfrConstantSerializer { + public: + void write_constants(JfrCheckpointWriter& writer); +}; + +class JfrThreadConstant : public JfrConstantSerializer { + private: + JavaThread* _thread; + public: + JfrThreadConstant(JavaThread* jt) : _thread(jt) {} + void write_constants(JfrCheckpointWriter& writer); +}; + +#endif // SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANT_HPP diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.cpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..47e28e15da82e0a31f4bd0655bedc129b5342621 --- /dev/null +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.cpp @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "jfr/recorder/checkpoint/constant/jfrConstantManager.hpp" +#include "jfr/recorder/checkpoint/constant/jfrConstant.hpp" +#include "jfr/utilities/jfrIterator.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/exceptions.hpp" + +JfrSerializerRegistration::JfrSerializerRegistration(JfrConstantTypeId id, bool permit_cache, JfrConstantSerializer* cs) : + _next(NULL), + _prev(NULL), + _serializer(cs), + _cache(), + _id(id), + _permit_cache(permit_cache) {} + +JfrSerializerRegistration::~JfrSerializerRegistration() { + delete _serializer; +} + +JfrSerializerRegistration* JfrSerializerRegistration::next() const { + return _next; +} + +void JfrSerializerRegistration::set_next(JfrSerializerRegistration* next) { + _next = next; +} + +JfrSerializerRegistration* JfrSerializerRegistration::prev() const { + return _prev; +} + +void JfrSerializerRegistration::set_prev(JfrSerializerRegistration* prev) { + _prev = prev; +} + +JfrConstantTypeId JfrSerializerRegistration::id() const { + return _id; +} + +void JfrSerializerRegistration::invoke_serializer(JfrCheckpointWriter& writer) const { + if (_cache.valid()) { + writer.increment(); + _cache->write(writer); + return; + } + const JfrCheckpointContext ctx = writer.context(); + writer.write_constant_type(_id); + _serializer->write_constants(writer); + if (_permit_cache) { + _cache = writer.copy(&ctx); + } +} + +JfrConstantManager::~JfrConstantManager() { + Iterator iter(_constants); + JfrSerializerRegistration* registration; + while (iter.has_next()) { + registration = _constants.remove(iter.next()); + assert(registration != NULL, "invariant"); + delete registration; + } + Iterator sp_type_iter(_safepoint_constants); + while (sp_type_iter.has_next()) { + registration = _safepoint_constants.remove(sp_type_iter.next()); + assert(registration != NULL, "invariant"); + delete registration; + } +} + +size_t JfrConstantManager::number_of_registered_constant_types() const { + size_t count = 0; + const Iterator iter(_constants); + while (iter.has_next()) { + ++count; + iter.next(); + } + const Iterator sp_type_iter(_safepoint_constants); + while (sp_type_iter.has_next()) { + ++count; + sp_type_iter.next(); + } + return count; +} + +void JfrConstantManager::write_constants(JfrCheckpointWriter& writer) const { + const Iterator iter(_constants); + while (iter.has_next()) { + iter.next()->invoke_serializer(writer); + } +} + +void JfrConstantManager::write_safepoint_constants(JfrCheckpointWriter& writer) const { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + const Iterator iter(_safepoint_constants); + while (iter.has_next()) { + iter.next()->invoke_serializer(writer); + } +} + +void JfrConstantManager::write_constant_tag_set() const { + assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); + // can safepoint here because of PackageTable_lock + MutexLockerEx lock(PackageTable_lock); + JfrCheckpointWriter writer(true, true, Thread::current()); + ConstantSet constant_set; + constant_set.write_constants(writer); +} + +void JfrConstantManager::write_constant_tag_set_for_unloaded_classes() const { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + JfrCheckpointWriter writer(false, true, Thread::current()); + ClassUnloadConstantSet class_unload_constant_set; + class_unload_constant_set.write_constants(writer); +} + +void JfrConstantManager::create_thread_checkpoint(JavaThread* jt) const { + assert(jt != NULL, "invariant"); + JfrThreadConstant constant_type_thread(jt); + JfrCheckpointWriter writer(false, true, jt); + writer.write_constant_type(CONSTANT_TYPE_THREAD); + constant_type_thread.write_constants(writer); + // create and install a checkpoint blob + jt->trace_data()->set_thread_checkpoint(writer.checkpoint_blob()); + assert(jt->trace_data()->has_thread_checkpoint(), "invariant"); +} + +void JfrConstantManager::write_thread_checkpoint(JavaThread* jt) const { + assert(jt != NULL, "JavaThread is NULL!"); + ResourceMark rm(jt); + if (jt->trace_data()->has_thread_checkpoint()) { + JfrCheckpointWriter writer(false, false, jt); + jt->trace_data()->thread_checkpoint()->write(writer); + } else { + JfrThreadConstant constant_type_thread(jt); + JfrCheckpointWriter writer(false, true, jt); + writer.write_constant_type(CONSTANT_TYPE_THREAD); + constant_type_thread.write_constants(writer); + } +} + +#ifdef ASSERT +static void assert_not_registered_twice(JfrConstantTypeId id, JfrConstantManager::List& list) { + const JfrConstantManager::Iterator iter(list); + while (iter.has_next()) { + assert(iter.next()->id() != id, "invariant"); + } +} +#endif + +bool JfrConstantManager::register_serializer(JfrConstantTypeId id, bool require_safepoint, bool permit_cache, JfrConstantSerializer* cs) { + assert(cs != NULL, "invariant"); + JfrSerializerRegistration* const registration = new JfrSerializerRegistration(id, permit_cache, cs); + if (registration == NULL) { + delete cs; + return false; + } + if (require_safepoint) { + assert(!_safepoint_constants.in_list(registration), "invariant"); + DEBUG_ONLY(assert_not_registered_twice(id, _safepoint_constants);) + _safepoint_constants.prepend(registration); + } + else { + assert(!_constants.in_list(registration), "invariant"); + DEBUG_ONLY(assert_not_registered_twice(id, _constants);) + _constants.prepend(registration); + } + return true; +} + +bool JfrConstantManager::initialize() { + // non-safepointing serializers + for (size_t i = 0; i < 16; ++i) { + switch (i) { + case 0: register_serializer(CONSTANT_TYPE_FLAGVALUEORIGIN, false, true, new FlagValueOriginConstant()); break; + case 1: break; + case 2: register_serializer(CONSTANT_TYPE_GCCAUSE, false, true, new GCCauseConstant()); break; + case 3: register_serializer(CONSTANT_TYPE_GCNAME, false, true, new GCNameConstant()); break; + case 4: register_serializer(CONSTANT_TYPE_GCWHEN, false, true, new GCWhenConstant()); break; + case 5: register_serializer(CONSTANT_TYPE_G1HEAPREGIONTYPE, false, true, new G1HeapRegionTypeConstant()); break; + case 6: register_serializer(CONSTANT_TYPE_GCTHRESHOLDUPDATER, false, true, new GCThresholdUpdaterConstant()); break; + case 7: register_serializer(CONSTANT_TYPE_METADATATYPE, false, true, new MetadataTypeConstant()); break; + case 8: register_serializer(CONSTANT_TYPE_METASPACEOBJTYPE, false, true, new MetaspaceObjTypeConstant()); break; + case 9: register_serializer(CONSTANT_TYPE_G1YCTYPE, false, true, new G1YCTypeConstant()); break; + case 10: register_serializer(CONSTANT_TYPE_REFERENCETYPE, false, true, new ReferenceTypeConstant()); break; + case 11: register_serializer(CONSTANT_TYPE_NARROWOOPMODE, false, true, new NarrowOopModeConstant()); break; + case 12: register_serializer(CONSTANT_TYPE_COMPILERPHASETYPE, false, true, new CompilerPhaseTypeConstant()); break; + case 13: register_serializer(CONSTANT_TYPE_CODEBLOBTYPE, false, true, new CodeBlobTypeConstant()); break; + case 14: register_serializer(CONSTANT_TYPE_VMOPERATIONTYPE, false, true, new VMOperationTypeConstant()); break; + case 15: register_serializer(CONSTANT_TYPE_THREADSTATE, false, true, new ThreadStateConstant()); break; + default: + guarantee(false, "invariant"); + } + } + + // safepointing serializers + for (size_t i = 0; i < 2; ++i) { + switch (i) { + case 0: register_serializer(CONSTANT_TYPE_THREADGROUP, true, false, new JfrThreadGroupConstant()); break; + case 1: register_serializer(CONSTANT_TYPE_THREAD, true, false, new JfrThreadConstantSet()); break; + default: + guarantee(false, "invariant"); + } + } + return true; +} + + diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.hpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5767d77f8a5df911a780d34c096c9785a805ed22 --- /dev/null +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrConstantManager.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANTMANAGER_HPP +#define SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANTMANAGER_HPP + +#include "jfr/metadata/jfrConstantSerializer.hpp" +#include "jfr/utilities/jfrAllocation.hpp" +#include "jfr/utilities/jfrDoublyLinkedList.hpp" +#include "jfr/utilities/jfrIterator.hpp" + +class JfrSerializerRegistration : public JfrCHeapObj { + private: + JfrSerializerRegistration* _next; + JfrSerializerRegistration* _prev; + JfrConstantSerializer* _serializer; + mutable JfrCheckpointBlobHandle _cache; + JfrConstantTypeId _id; + bool _permit_cache; + + public: + JfrSerializerRegistration(JfrConstantTypeId id, bool permit_cache, JfrConstantSerializer* serializer); + ~JfrSerializerRegistration(); + JfrSerializerRegistration* next() const; + void set_next(JfrSerializerRegistration* next); + JfrSerializerRegistration* prev() const; + void set_prev(JfrSerializerRegistration* prev); + void invoke_serializer(JfrCheckpointWriter& writer) const; + JfrConstantTypeId id() const; +}; + +class JfrConstantManager : public JfrCHeapObj { + friend class JfrCheckpointManager; + public: + typedef JfrDoublyLinkedList List; + typedef StopOnNullIterator Iterator; + private: + List _constants; + List _safepoint_constants; + + ~JfrConstantManager(); + bool initialize(); + size_t number_of_registered_constant_types() const; + void write_constants(JfrCheckpointWriter& writer) const; + void write_safepoint_constants(JfrCheckpointWriter& writer) const; + void write_constant_tag_set() const; + void write_constant_tag_set_for_unloaded_classes() const; + void create_thread_checkpoint(JavaThread* jt) const; + void write_thread_checkpoint(JavaThread* jt) const; + bool register_serializer(JfrConstantTypeId id, bool require_safepoint, bool permit_cache, JfrConstantSerializer* serializer); +}; +#endif // SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANTMANAGER_HPP diff --git a/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp b/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d84691a541d2264704e22f2ec18cd0d05fa3f42 --- /dev/null +++ b/src/share/vm/jfr/recorder/checkpoint/constant/jfrTagSet.cpp @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.inline.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "jfr/jfr.hpp" +#include "jfr/jni/jfrGetAllEventClasses.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSet.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSetUtils.hpp" +#include "jfr/recorder/checkpoint/constant/jfrTagSetWriter.hpp" +#include "jfr/recorder/checkpoint/constant/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/storage/jfrBuffer.hpp" +#include "jfr/utilities/jfrHashtable.hpp" +#include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/oop.inline.hpp" +#include "memory/resourceArea.hpp" +// to get CONTENT_TYPE defines +#include "tracefiles/traceTypes.hpp" +#include "utilities/accessFlags.hpp" + +// incremented on each checkpoint +static u8 checkpoint_id = 0; + +// creates a unique id by combining a checkpoint relative symbol id (2^24) +// with the current checkpoint id (2^40) +#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id))) + +typedef const Klass* KlassPtr; +typedef const ClassLoaderData* CldPtr; +typedef const Method* MethodPtr; +typedef const Symbol* SymbolPtr; +typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr; +typedef const JfrSymbolId::CStringEntry* CStringEntryPtr; + +static traceid cld_id(CldPtr cld) { + assert(cld != NULL, "invariant"); + return cld->is_anonymous() ? 0 : TRACE_ID(cld); +} + +static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) { + assert(k != NULL, "invariant"); + CldPtr cld = k->class_loader_data(); + assert(cld != NULL, "invariant"); + if (!cld->is_anonymous()) { + tag_leakp_artifact(cld, class_unload); + } +} + +class TagLeakpKlassArtifact { + bool _class_unload; + public: + TagLeakpKlassArtifact(bool class_unload) : _class_unload(class_unload) {} + bool operator()(KlassPtr klass) { + if (_class_unload) { + if (LEAKP_USED_THIS_EPOCH(klass)) { + tag_leakp_klass_artifacts(klass, _class_unload); + } + } else { + if (LEAKP_USED_PREV_EPOCH(klass)) { + tag_leakp_klass_artifacts(klass, _class_unload); + } + } + return true; + } +}; + +/* + * In C++03, functions used as template parameters must have external linkage; + * this restriction was removed in C++11. Change back to "static" and + * rename functions when C++11 becomes available. + * + * The weird naming is an effort to decrease the risk of name clashes. + */ + +int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) { + assert(writer != NULL, "invariant"); + assert(artifacts != NULL, "invariant"); + assert(k != NULL, "invariant"); + KlassPtr klass = (KlassPtr)k; + traceid pkg_id = 0; + KlassPtr theklass = klass; + if (theklass->oop_is_objArray()) { + const ObjArrayKlass* obj_arr_klass = ObjArrayKlass::cast(klass); + theklass = obj_arr_klass->bottom_klass(); + } + if (theklass->oop_is_instance()) { + } else { + assert(theklass->oop_is_typeArray(), "invariant"); + } + const traceid symbol_id = artifacts->mark(klass); + assert(symbol_id > 0, "need to have an address for symbol!"); + writer->write(TRACE_ID(klass)); + writer->write(cld_id(klass->class_loader_data())); + writer->write((traceid)CREATE_SYMBOL_ID(symbol_id)); + writer->write((s4)klass->access_flags().get_flags()); + return 1; +} + +typedef LeakPredicate LeakKlassPredicate; +typedef JfrPredicatedArtifactWriterImplHost LeakKlassWriterImpl; +typedef JfrArtifactWriterHost LeakKlassWriter; +typedef JfrArtifactWriterImplHost KlassWriterImpl; +typedef JfrArtifactWriterHost KlassWriter; + +int write__artifact__method(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) { + assert(writer != NULL, "invariant"); + assert(artifacts != NULL, "invariant"); + assert(m != NULL, "invariant"); + MethodPtr method = (MethodPtr)m; + const traceid method_name_symbol_id = artifacts->mark(method->name()); + assert(method_name_symbol_id > 0, "invariant"); + const traceid method_sig_symbol_id = artifacts->mark(method->signature()); + assert(method_sig_symbol_id > 0, "invariant"); + KlassPtr klass = method->method_holder(); + assert(klass != NULL, "invariant"); + assert(METHOD_USED_ANY_EPOCH(klass), "invariant"); + writer->write((u8)METHOD_ID(klass, method)); + writer->write((u8)TRACE_ID(klass)); + writer->write((u8)CREATE_SYMBOL_ID(method_name_symbol_id)); + writer->write((u8)CREATE_SYMBOL_ID(method_sig_symbol_id)); + writer->write((u2)method->access_flags().get_flags()); + writer->write(const_cast(method)->is_hidden() ? (u1)1 : (u1)0); + return 1; +} + +typedef JfrArtifactWriterImplHost MethodWriterImplTarget; +typedef JfrArtifactWriterHost MethodWriterImpl; + +int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) { + assert(c != NULL, "invariant"); + CldPtr cld = (CldPtr)c; + assert(!cld->is_anonymous(), "invariant"); + const traceid cld_id = TRACE_ID(cld); + // class loader type + const oop class_loader_oop = cld->class_loader(); + if (class_loader_oop == NULL) { + // (primordial) boot class loader + writer->write(cld_id); // class loader instance id + writer->write((traceid)0); // class loader type id (absence of) + writer->write((traceid)CREATE_SYMBOL_ID(1)); // 1 maps to synthetic name -> "boot" + return 1; + } + assert(class_loader_oop != NULL, "invariant"); + KlassPtr class_loader_klass = class_loader_oop->klass(); + traceid symbol_name_id = 0; + const oop class_loader_name_oop = NULL; + if (class_loader_name_oop != NULL) { + const char* class_loader_instance_name = + java_lang_String::as_utf8_string(class_loader_name_oop); + if (class_loader_instance_name != NULL && class_loader_instance_name[0] != '\0') { + // tag the symbol as "anonymous" since its not really a Symbol* but a const char*, + // it will be handled correctly if it has the anonymous tag on insertion + symbol_name_id = artifacts->mark(class_loader_instance_name, + java_lang_String::hash_code(class_loader_name_oop)); + } + } + writer->write(cld_id); // class loader instance id + writer->write(TRACE_ID(class_loader_klass)); // class loader type id + writer->write(symbol_name_id == 0 ? (traceid)0 : + (traceid)CREATE_SYMBOL_ID(symbol_name_id)); // class loader instance name + return 1; +} + +typedef LeakPredicate LeakCldPredicate; +int _compare_cld_ptr_(CldPtr const& lhs, CldPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; } +typedef UniquePredicate CldPredicate; +typedef JfrPredicatedArtifactWriterImplHost LeakCldWriterImpl; +typedef JfrPredicatedArtifactWriterImplHost CldWriterImpl; +typedef JfrArtifactWriterHost LeakCldWriter; +typedef JfrArtifactWriterHost CldWriter; + +typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr; + +static int write__artifact__symbol__entry__(JfrCheckpointWriter* writer, + SymbolEntryPtr entry) { + assert(writer != NULL, "invariant"); + assert(entry != NULL, "invariant"); + ResourceMark rm; + writer->write(CREATE_SYMBOL_ID(entry->id())); + writer->write(entry->value()->as_C_string()); + return 1; +} + +int write__artifact__symbol__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) { + assert(e != NULL, "invariant"); + return write__artifact__symbol__entry__(writer, (SymbolEntryPtr)e); +} + +typedef JfrArtifactWriterImplHost SymbolEntryWriterImpl; +typedef JfrArtifactWriterHost SymbolEntryWriter; + +typedef const JfrSymbolId::CStringEntry* CStringEntryPtr; + +static int write__artifact__cstring__entry__(JfrCheckpointWriter* writer, CStringEntryPtr entry) { + assert(writer != NULL, "invariant"); + assert(entry != NULL, "invariant"); + writer->write(CREATE_SYMBOL_ID(entry->id())); + writer->write(entry->value()); + return 1; +} + +int write__artifact__cstring__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) { + assert(e != NULL, "invariant"); + return write__artifact__cstring__entry__(writer, (CStringEntryPtr)e); +} + +typedef JfrArtifactWriterImplHost CStringEntryWriterImpl; +typedef JfrArtifactWriterHost CStringEntryWriter; + +int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) { + assert(writer != NULL, "invariant"); + assert(artifacts != NULL, "invaiant"); + assert(k != NULL, "invariant"); + const InstanceKlass* const ik = (const InstanceKlass*)k; + if (ik->is_anonymous()) { + CStringEntryPtr entry = + artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik)); + assert(entry != NULL, "invariant"); + return write__artifact__cstring__entry__(writer, entry); + } + + SymbolEntryPtr entry = artifacts->map_symbol(JfrSymbolId::regular_klass_name_hash_code(ik)); + return write__artifact__symbol__entry__(writer, entry); +} + +int _compare_traceid_(const traceid& lhs, const traceid& rhs) { + return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; +} + +template