提交 cee8cf25 编写于 作者: A anoll

8020151: PSR:PERF Large performance regressions when code cache is filled

Summary: Code cache sweeping based on method hotness; removed speculatively disconnect
Reviewed-by: kvn, iveresov
上级 5fef85db
......@@ -124,7 +124,6 @@ int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
nmethod* CodeCache::_saved_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
......@@ -464,96 +463,11 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
}
#endif //PRODUCT
/**
* Remove and return nmethod from the saved code list in order to reanimate it.
*/
nmethod* CodeCache::reanimate_saved_code(Method* m) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved->is_in_use() && saved->method() == m) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) {
saved->print_on(tty, " ### nmethod is reconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
xtty->method(m);
xtty->stamp();
xtty->end_elem();
}
return saved;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
return NULL;
}
/**
* Remove nmethod from the saved code list in order to discard it permanently
*/
void CodeCache::remove_saved_code(nmethod* nm) {
// For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved == nm) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
xtty->stamp();
xtty->end_elem();
}
return;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
ShouldNotReachHere();
}
void CodeCache::speculatively_disconnect(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm;
if (PrintMethodFlushing) {
nm->print_on(tty, " ### nmethod is speculatively disconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
xtty->method(nm->method());
xtty->stamp();
xtty->end_elem();
}
nm->method()->clear_code();
nm->set_speculatively_disconnected(true);
}
void CodeCache::gc_prologue() {
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) {
......
......@@ -57,7 +57,6 @@ class CodeCache : AllStatic {
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
static void verify_if_often() PRODUCT_RETURN;
......@@ -167,17 +166,12 @@ class CodeCache : AllStatic {
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static double reverse_free_ratio();
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
static nmethod* reanimate_saved_code(Method* m);
static void remove_saved_code(nmethod* nm);
static void speculatively_disconnect(nmethod* nm);
// Deoptimization
static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP
......
......@@ -462,7 +462,6 @@ void nmethod::init_defaults() {
_state = alive;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
......@@ -481,7 +480,6 @@ void nmethod::init_defaults() {
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
#ifdef HAVE_DTRACE_H
......@@ -686,6 +684,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
......@@ -770,6 +769,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
debug_only(verify_scavenge_root_oops());
......@@ -842,6 +842,7 @@ nmethod::nmethod(
_comp_level = comp_level;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
// Section offsets
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
......@@ -1176,7 +1177,7 @@ void nmethod::cleanup_inline_caches() {
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_not_entrant(), "must be a non-entrant method");
assert(is_alive(), "Must be an alive method");
// Set the traversal mark to ensure that the sweeper does 2
// cleaning passes before moving to zombie.
set_stack_traversal_mark(NMethodSweeper::traversal_count());
......@@ -1261,7 +1262,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
NMethodSweeper::notify(this);
NMethodSweeper::notify();
}
void nmethod::invalidate_osr_method() {
......@@ -1351,6 +1352,15 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
nmethod_needs_unregister = true;
}
// Must happen before state change. Otherwise we have a race condition in
// nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
// transition its state from 'not_entrant' to 'zombie' without having to wait
// for stack scanning.
if (state == not_entrant) {
mark_as_seen_on_stack();
OrderAccess::storestore();
}
// Change state
_state = state;
......@@ -1369,11 +1379,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
HandleMark hm;
method()->clear_code();
}
if (state == not_entrant) {
mark_as_seen_on_stack();
}
} // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
......@@ -1416,7 +1421,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
// Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify(this);
NMethodSweeper::notify();
return true;
}
......@@ -1451,10 +1456,6 @@ void nmethod::flush() {
CodeCache::drop_scavenge_root_nmethod(this);
}
if (is_speculatively_disconnected()) {
CodeCache::remove_saved_code(this);
}
#ifdef SHARK
((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
#endif // SHARK
......
......@@ -119,7 +119,6 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
......@@ -165,7 +164,6 @@ class nmethod : public CodeBlob {
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
bool _speculatively_disconnected; // Marked for potential unload
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
bool _marked_for_deoptimization; // Used for stack deoptimization
......@@ -180,7 +178,7 @@ class nmethod : public CodeBlob {
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
......@@ -202,11 +200,18 @@ class nmethod : public CodeBlob {
// not_entrant method removal. Each mark_sweep pass will update
// this mark to current sweep invocation count if it is seen on the
// stack. An not_entrant method can be removed when there is no
// stack. An not_entrant method can be removed when there are no
// more activations, i.e., when the _stack_traversal_mark is less than
// current sweep traversal index.
long _stack_traversal_mark;
// The _hotness_counter indicates the hotness of a method. The higher
// the value the hotter the method. The hotness counter of a nmethod is
// set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
// is active while stack scanning (mark_active_nmethods()). The hotness
// counter is decreased (by 1) while sweeping.
int _hotness_counter;
ExceptionCache *_exception_cache;
PcDescCache _pc_desc_cache;
......@@ -382,6 +387,10 @@ class nmethod : public CodeBlob {
int total_size () const;
void dec_hotness_counter() { _hotness_counter--; }
void set_hotness_counter(int val) { _hotness_counter = val; }
int hotness_counter() const { return _hotness_counter; }
// Containment
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
......@@ -408,8 +417,8 @@ class nmethod : public CodeBlob {
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
......@@ -437,9 +446,6 @@ class nmethod : public CodeBlob {
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
......@@ -499,9 +505,6 @@ public:
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
public:
// Sweeper support
......
......@@ -634,19 +634,36 @@ CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
// Wait for an available CompileTask.
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
// we ran out of code cache so compilation has been disabled. In the latter
// case we perform code cache sweeps to free memory such that we can re-enable
// compilation.
while (_first == NULL) {
// There is no work to be done right now. Wait.
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
// During the emergency sweeping periods, wake up and sweep occasionally
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
if (timedout) {
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
// Wait a certain amount of time to possibly do another sweep.
// We must wait until stack scanning has happened so that we can
// transition a method's state from 'not_entrant' to 'zombie'.
long wait_time = NmethodSweepCheckInterval * 1000;
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
// Only one thread at a time can do sweeping. Scale the
// wait time according to the number of compiler threads.
// As a result, the next sweep is likely to happen every 100ms
// with an arbitrary number of threads that do sweeping.
wait_time = 100 * CICompilerCount;
}
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
if (timeout) {
MutexUnlocker ul(lock());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper::possibly_sweep();
}
} else {
// During normal operation no need to wake up on timer
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
lock()->wait();
}
}
......@@ -1227,16 +1244,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
return method_code;
}
}
if (method->is_not_compilable(comp_level)) return NULL;
if (UseCodeCacheFlushing) {
nmethod* saved = CodeCache::reanimate_saved_code(method());
if (saved != NULL) {
method->set_code(method, saved);
return saved;
}
if (method->is_not_compilable(comp_level)) {
return NULL;
}
} else {
// osr compilation
#ifndef TIERED
......@@ -1585,9 +1595,6 @@ void CompileBroker::compiler_thread_loop() {
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
// Attempt to start cleaning the code cache while there is still a little headroom
NMethodSweeper::handle_full_code_cache(false);
}
CompileTask* task = queue->get();
......@@ -1943,7 +1950,11 @@ void CompileBroker::handle_full_code_cache() {
}
#endif
if (UseCodeCacheFlushing) {
NMethodSweeper::handle_full_code_cache(true);
// Since code cache is full, immediately stop new compiles
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
NMethodSweeper::possibly_sweep();
}
} else {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
......
......@@ -901,16 +901,6 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
// This function must not hit a safepoint!
address Method::verified_code_entry() {
debug_only(No_Safepoint_Verifier nsv;)
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
if (code == NULL && UseCodeCacheFlushing) {
nmethod *saved_code = CodeCache::reanimate_saved_code(this);
if (saved_code != NULL) {
methodHandle method(this);
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
set_code( method, saved_code );
}
}
assert(_from_compiled_entry != NULL, "must be set");
return _from_compiled_entry;
}
......
......@@ -1130,6 +1130,9 @@ void Arguments::set_tiered_flags() {
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
}
}
#if INCLUDE_ALL_GCS
......@@ -2333,6 +2336,10 @@ bool Arguments::check_vm_args_consistency() {
(2*G)/M);
status = false;
}
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
return status;
}
......
......@@ -2868,6 +2868,10 @@ class CommandLineFlags {
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
product(intx, NmethodSweepActivity, 10, \
"Removes cold nmethods from code cache if > 0. Higher values " \
"result in more aggressive sweeping") \
\
notproduct(bool, LogSweeper, false, \
"Keep a ring buffer of sweeper activity") \
\
......@@ -3239,15 +3243,6 @@ class CommandLineFlags {
product(bool, UseCodeCacheFlushing, true, \
"Attempt to clean the code cache before shutting off compiler") \
\
product(intx, MinCodeCacheFlushingInterval, 30, \
"Min number of seconds between code cache cleaning sessions") \
\
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
"When less than X space left, start code cache cleaning") \
\
product(uintx, CodeCacheFlushingFraction, 2, \
"Fraction of the code cache that is flushed when full") \
\
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \
......
......@@ -519,8 +519,8 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
{
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
NMethodSweeper::scan_stacks();
TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
NMethodSweeper::mark_active_nmethods();
}
if (SymbolTable::needs_rehashing()) {
......
此差异已折叠。
......@@ -27,8 +27,30 @@
// An NmethodSweeper is an incremental cleaner for:
// - cleanup inline caches
// - reclamation of unreferences zombie nmethods
//
// - reclamation of nmethods
// Removing nmethods from the code cache includes two operations
// 1) mark active nmethods
// Is done in 'mark_active_nmethods()'. This function is called at a
// safepoint and marks all nmethods that are active on a thread's stack.
// 2) sweep nmethods
// Is done in sweep_code_cache(). This function is the only place in the
// sweeper where memory is reclaimed. Note that sweep_code_cache() is not
// called at a safepoint. However, sweep_code_cache() stops executing if
// another thread requests a safepoint. Consequently, 'mark_active_nmethods()'
// and sweep_code_cache() cannot execute at the same time.
// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
// invalidation, and (iv) being replaced be a different method version (tiered
// compilation). Not-entrant nmethod cannot be called by Java threads, but they
// can still be active on the stack. To ensure that active nmethod are not reclaimed,
// we have to wait until the next marking phase has completed. If a not-entrant
// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
// state change happens during separate sweeps. It may take at least 3 sweeps before an
// nmethod's space is freed. Sweeping is currently done by compiler threads between
// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
// is full.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
......@@ -41,46 +63,38 @@ class NMethodSweeper : public AllStatic {
static volatile int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
//The following are reset in scan_stacks and synchronized by the safepoint
static bool _resweep; // Indicates that a change has happend and we want another sweep,
// always checked and reset at a safepoint so memory will be in sync.
static int _locked_seen; // Number of locked nmethods encountered during the scan
//The following are reset in mark_active_nmethods and synchronized by the safepoint
static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse,
// always checked and reset at a safepoint so memory will be in sync.
static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
static jint _flush_token; // token that guards method flushing, making sure it is executed only once.
// These are set during a flush, a VM-operation
static long _last_flush_traversal_id; // trav number at last flush unloading
static jlong _last_full_flush_time; // timestamp of last emergency unloading
// These are synchronized by the _sweep_started token
static int _highest_marked; // highest compile id dumped at last emergency unloading
static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
// Stat counters
static int _number_of_flushes; // Total of full traversals caused by full cache
static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static jlong _total_time_sweeping; // Accumulated time sweeping
static jlong _total_time_this_sweep; // Total time this sweep
static jlong _peak_sweep_time; // Peak time for a full sweep
static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction
static jlong _total_disconnect_time; // Total time cleaning code mem
static jlong _peak_disconnect_time; // Peak time cleaning code mem
static void process_nmethod(nmethod *nm);
static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
static void log_sweep(const char* msg, const char* format = NULL, ...);
static bool sweep_in_progress();
static void sweep_code_cache();
static void request_nmethod_marking() { _request_mark_phase = true; }
static void reset_nmethod_marking() { _request_mark_phase = false; }
static bool need_marking_phase() { return _request_mark_phase; }
static int _hotness_counter_reset_val;
public:
static long traversal_count() { return _traversals; }
static int number_of_flushes() { return _number_of_flushes; }
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
static jlong total_time_sweeping() { return _total_time_sweeping; }
static jlong peak_sweep_time() { return _peak_sweep_time; }
static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
static jlong total_disconnect_time() { return _total_disconnect_time; }
static jlong peak_disconnect_time() { return _peak_disconnect_time; }
static void log_sweep(const char* msg, const char* format = NULL, ...);
#ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current == which; }
......@@ -90,19 +104,18 @@ class NMethodSweeper : public AllStatic {
static void report_events();
#endif
static void scan_stacks(); // Invoked at the end of each safepoint
static void sweep_code_cache(); // Concurrent part of sweep job
static void possibly_sweep(); // Compiler threads call this to sweep
static void mark_active_nmethods(); // Invoked at the end of each safepoint
static void possibly_sweep(); // Compiler threads call this to sweep
static void notify(nmethod* nm) {
static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
static int hotness_counter_reset_val();
static void notify() {
// Request a new sweep of the code cache from the beginning. No
// need to synchronize the setting of this flag since it only
// changes to false at safepoint so we can never overwrite it with false.
_resweep = true;
request_nmethod_marking();
}
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
};
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
......@@ -842,7 +842,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
nonstatic_field(nmethod, _state, unsigned char) \
nonstatic_field(nmethod, _state, volatile unsigned char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _deoptimize_mh_offset, int) \
......@@ -1360,6 +1360,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_integer_type(long) \
declare_integer_type(char) \
declare_unsigned_integer_type(unsigned char) \
declare_unsigned_integer_type(volatile unsigned char) \
declare_unsigned_integer_type(u_char) \
declare_unsigned_integer_type(unsigned int) \
declare_unsigned_integer_type(uint) \
......@@ -1382,6 +1383,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_toplevel_type(char**) \
declare_toplevel_type(u_char*) \
declare_toplevel_type(unsigned char*) \
declare_toplevel_type(volatile unsigned char*) \
\
/*******************************************************************/ \
/* Types which it will be handy to have available over in the SA */ \
......
......@@ -173,10 +173,6 @@ void VM_UnlinkSymbols::doit() {
SymbolTable::unlink();
}
void VM_HandleFullCodeCache::doit() {
NMethodSweeper::speculative_disconnect_nmethods(_is_full);
}
void VM_Verify::doit() {
Universe::heap()->prepare_for_verify();
Universe::verify(_silent);
......
......@@ -51,7 +51,6 @@
template(DeoptimizeAll) \
template(ZombieAll) \
template(UnlinkSymbols) \
template(HandleFullCodeCache) \
template(Verify) \
template(PrintJNI) \
template(HeapDumper) \
......@@ -261,16 +260,6 @@ class VM_DeoptimizeFrame: public VM_Operation {
bool allow_nested_vm_operations() const { return true; }
};
class VM_HandleFullCodeCache: public VM_Operation {
private:
bool _is_full;
public:
VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; }
VMOp_Type type() const { return VMOp_HandleFullCodeCache; }
void doit();
bool allow_nested_vm_operations() const { return true; }
};
#ifndef PRODUCT
class VM_DeoptimizeAll: public VM_Operation {
private:
......
......@@ -313,13 +313,6 @@ Declares a structure type that can be used in other events.
<value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
</event>
<event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
description="Clean code cache from oldest methods"
has_thread="true" is_requestable="false" is_constant="false">
<value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
<value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
</event>
<!-- Code cache events -->
<event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册