diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp index f7a73d2c7a83bd7bda386dd4c0a682b0ef65a3ee..55cc6e0efb3431c6947cafeac1f6e3f6d2d069f2 100644 --- a/src/share/vm/gc_interface/allocTracer.cpp +++ b/src/share/vm/gc_interface/allocTracer.cpp @@ -22,6 +22,7 @@ * */ +#include "precompiled.hpp" #include "gc_implementation/shared/gcId.hpp" #include "gc_interface/allocTracer.hpp" #include "trace/tracing.hpp" diff --git a/src/share/vm/gc_interface/allocTracer.hpp b/src/share/vm/gc_interface/allocTracer.hpp index b519b1be46a91201a808170d3b16b3fbb2419a06..4bb8de92cdff7028afb541ddd3b8152fba891d97 100644 --- a/src/share/vm/gc_interface/allocTracer.hpp +++ b/src/share/vm/gc_interface/allocTracer.hpp @@ -30,17 +30,19 @@ #include "utilities/globalDefinitions.hpp" class AllocTracer : AllStatic { - private: - static void send_opto_array_allocation_event(KlassHandle klass, oop obj,size_t alloc_size, Thread* thread); - static void send_opto_instance_allocation_event(KlassHandle klass, oop obj, Thread* thread); public: static void send_allocation_outside_tlab_event(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread); static void send_allocation_in_new_tlab_event(KlassHandle klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread); static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId); +#if INCLUDE_TRACE static void opto_slow_allocation_enter(bool is_array, Thread* thread); static void opto_slow_allocation_leave(bool is_array, Thread* thread); static void send_slow_allocation_event(KlassHandle klass, oop obj,size_t alloc_size, Thread* thread); static void send_opto_fast_allocation_event(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread); + private: + static void send_opto_array_allocation_event(KlassHandle klass, oop obj,size_t alloc_size, Thread* thread); + static void send_opto_instance_allocation_event(KlassHandle klass, oop obj, Thread* thread); +#endif }; #endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */ diff --git a/src/share/vm/gc_interface/allocTracer.inline.hpp b/src/share/vm/gc_interface/allocTracer.inline.hpp index 82fac7a8376898cfb1dc8f880343900075bc0b42..8c37d4966dc24de0eb86f9021a37d9882b06f778 100644 --- a/src/share/vm/gc_interface/allocTracer.inline.hpp +++ b/src/share/vm/gc_interface/allocTracer.inline.hpp @@ -22,6 +22,7 @@ #ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP #define SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP +#if INCLUDE_TRACE #include "trace/tracing.hpp" #include "gc_implementation/shared/gcId.hpp" #include "runtime/handles.hpp" @@ -117,4 +118,5 @@ inline void AllocTracer::send_opto_fast_allocation_event(KlassHandle klass, oop thread->trace_data()->incr_alloc_count_until_sample(interval); } +#endif // INCLUDE_TRACE #endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_INLINE_HPP */ diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp index 5ee4f88183439f90122e2534bbcb1454ad6eff74..b9398bb2acef5613a1f13326787eedb52b90ef45 100644 --- a/src/share/vm/gc_interface/collectedHeap.hpp +++ b/src/share/vm/gc_interface/collectedHeap.hpp @@ -324,10 +324,12 @@ class CollectedHeap : public CHeapObj { inline static void check_array_size(int size, int length, TRAPS); public: +#if INCLUDE_TRACE // Implicit Jfr inline methods. static void trace_slow_allocation(KlassHandle klass, oop obj, size_t alloc_size, Thread* thread) { AllocTracer::send_slow_allocation_event(klass, obj, alloc_size, thread); } +#endif static void trace_allocation_outside_tlab(KlassHandle klass, HeapWord* obj, size_t alloc_size, Thread* thread) { AllocTracer::send_allocation_outside_tlab_event(klass, obj, alloc_size, thread); diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp index 7c8ccb7dc4b7795e91349df1233f3f1946f88f1e..433982f9aa173e27a5ee236f3f1e6f2d97ac2982 100644 --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp @@ -85,8 +85,10 @@ inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { } } +#if INCLUDE_TRACE // support for jfr CollectedHeap::trace_slow_allocation(klass, obj, size * HeapWordSize, Thread::current()); +#endif } void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp index a147e457f45a3c932ef3eec424cd082088d5ed84..e54c09109c172ed18a9ec4d5bbe704187ecbdaad 100644 --- a/src/share/vm/opto/macro.cpp +++ b/src/share/vm/opto/macro.cpp @@ -1442,9 +1442,11 @@ void PhaseMacroExpand::expand_allocate_common( } } + #if INCLUDE_TRACE if (EnableJFR && JfrOptionSet::sample_object_allocations()) { jfr_sample_fast_object_allocation(alloc, fast_oop, fast_oop_ctrl, fast_oop_rawmem); } +#endif if (C->env()->dtrace_extended_probes()) { // Slow-path call @@ -1643,6 +1645,7 @@ static jint bottom_java_frame_bci(JVMState* state) { return last->bci(); } +#if INCLUDE_TRACE // // Pseudo code: // @@ -1739,6 +1742,7 @@ void PhaseMacroExpand::jfr_sample_fast_object_allocation( fast_oop_rawmem = alloc_sample_enabled_region_phi_mem; } } +#endif // Helper for PhaseMacroExpand::expand_allocate_common. // Initializes the newly-allocated storage. diff --git a/src/share/vm/opto/macro.hpp b/src/share/vm/opto/macro.hpp index 9268cf524331688e36b18220f302de5d07173dfd..a5524a3acc286ad19c4b6c7384accddf7b74d536 100644 --- a/src/share/vm/opto/macro.hpp +++ b/src/share/vm/opto/macro.hpp @@ -123,9 +123,11 @@ private: Node* old_eden_top, Node* new_eden_top, Node* length); +#if INCLUDE_TRACE //JFR tracing void jfr_sample_fast_object_allocation(AllocateNode* alloc, Node* fast_oop, Node*& fast_oop_ctrl, Node*& fast_oop_rawmem); +#endif public: PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) { diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp index 95fc0c22b14e492931b7023087a002f856065096..4d632bbc38ebccad6816fe3072a11bdec2750587 100644 --- a/src/share/vm/opto/runtime.cpp +++ b/src/share/vm/opto/runtime.cpp @@ -1677,6 +1677,7 @@ JRT_END //----------------------------------------------------------------------------- // JFR support. +#if INCLUDE_TRACE const TypeFunc *OptoRuntime::jfr_fast_object_alloc_Type() { const Type **fields = TypeTuple::fields(3); fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // newly allocated object @@ -1703,3 +1704,4 @@ void OptoRuntime::jfr_fast_object_alloc_C(oopDesc* obj, jint top_frame_bci, Java thread->trace_data()->clear_cached_top_frame_bci(); thread->set_vm_result(obj); } +#endif diff --git a/src/share/vm/opto/runtime.hpp b/src/share/vm/opto/runtime.hpp index 6a0c80daa1656fd329d64eb58f0750e6baff7a38..37ba65a5eb898af1e36dd29eaa5943139915046d 100644 --- a/src/share/vm/opto/runtime.hpp +++ b/src/share/vm/opto/runtime.hpp @@ -179,8 +179,10 @@ public: static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread); static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock); +#if INCLUDE_TRACE // JFR support static void jfr_fast_object_alloc_C(oopDesc* obj, jint bci, JavaThread* thread); +#endif private: // Implicit exception support @@ -339,8 +341,10 @@ private: static const TypeFunc* zap_dead_locals_Type(); # endif +#if INCLUDE_TRACE // JFR support static const TypeFunc* jfr_fast_object_alloc_Type(); +#endif private: static NamedCounter * volatile _named_counters;