提交 be5b9106 编写于 作者: N never

6950075: nmethod sweeper should operate concurrently

Reviewed-by: never, kvn
Contributed-by: eric.caspole@amd.com
上级 bbc9d454
...@@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { ...@@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
return (nmethod*)cb; return (nmethod*)cb;
} }
nmethod* CodeCache::first_nmethod() {
assert_locked_or_safepoint(CodeCache_lock);
CodeBlob* cb = first();
while (cb != NULL && !cb->is_nmethod()) {
cb = next(cb);
}
return (nmethod*)cb;
}
nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
cb = next(cb);
while (cb != NULL && !cb->is_nmethod()) {
cb = next(cb);
}
return (nmethod*)cb;
}
CodeBlob* CodeCache::allocate(int size) { CodeBlob* CodeCache::allocate(int size) {
// Do not seize the CodeCache lock here--if the caller has not // Do not seize the CodeCache lock here--if the caller has not
...@@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) { ...@@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
saved->set_speculatively_disconnected(false); saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL); saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) { if (PrintMethodFlushing) {
saved->print_on(tty, " ### nmethod is reconnected"); saved->print_on(tty, " ### nmethod is reconnected\n");
} }
if (LogCompilation && (xtty != NULL)) { if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl; ttyLocker ttyl;
...@@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) { ...@@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
} }
void CodeCache::remove_saved_code(nmethod* nm) { void CodeCache::remove_saved_code(nmethod* nm) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods; nmethod* saved = _saved_nmethods;
nmethod* prev = NULL; nmethod* prev = NULL;
...@@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) { ...@@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
nm->set_saved_nmethod_link(_saved_nmethods); nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm; _saved_nmethods = nm;
if (PrintMethodFlushing) { if (PrintMethodFlushing) {
nm->print_on(tty, " ### nmethod is speculatively disconnected"); nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
} }
if (LogCompilation && (xtty != NULL)) { if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl; ttyLocker ttyl;
......
...@@ -102,6 +102,8 @@ class CodeCache : AllStatic { ...@@ -102,6 +102,8 @@ class CodeCache : AllStatic {
static CodeBlob* next (CodeBlob* cb); static CodeBlob* next (CodeBlob* cb);
static CodeBlob* alive(CodeBlob *cb); static CodeBlob* alive(CodeBlob *cb);
static nmethod* alive_nmethod(CodeBlob *cb); static nmethod* alive_nmethod(CodeBlob *cb);
static nmethod* first_nmethod();
static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; } static int nof_blobs() { return _number_of_blobs; }
// GC support // GC support
......
...@@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() { ...@@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
void nmethod::cleanup_inline_caches() { void nmethod::cleanup_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint() && assert_locked_or_safepoint(CompiledIC_lock);
!CompiledIC_lock->is_locked() &&
!Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
// If the method is not entrant or zombie then a JMP is plastered over the // If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop // first few bytes. If an oop in the old code was there, that oop
...@@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() { ...@@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool nmethod::can_not_entrant_be_converted() { bool nmethod::can_not_entrant_be_converted() {
assert(is_not_entrant(), "must be a non-entrant method"); assert(is_not_entrant(), "must be a non-entrant method");
assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal // Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the // count can be greater than the stack traversal count before it hits the
...@@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { ...@@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
_method = NULL; // Clear the method of this dead nmethod _method = NULL; // Clear the method of this dead nmethod
} }
// Make the class unloaded - i.e., change state and notify sweeper // Make the class unloaded - i.e., change state and notify sweeper
check_safepoint(); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (is_in_use()) { if (is_in_use()) {
// Transitioning directly from live to unloaded -- so // Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this // we need to force a cache clean-up; remember this
...@@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) { ...@@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, ""); assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
} }
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
flush_dependencies(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
was_alive = is_in_use(); // Read state under lock was_alive = is_in_use(); // Read state under lock
// Change state // Change state
...@@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) { ...@@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
} // leave critical region under Patching_lock } // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
flush_dependencies(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
if (state == not_entrant) { if (state == not_entrant) {
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this); Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
} else { } else {
...@@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) { ...@@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
return true; return true;
} }
#ifndef PRODUCT
void nmethod::check_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
}
#endif
void nmethod::flush() { void nmethod::flush() {
// Note that there are no valid oops in the nmethod anymore. // Note that there are no valid oops in the nmethod anymore.
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method"); assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation"); assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
check_safepoint(); assert_locked_or_safepoint(CodeCache_lock);
// completely deallocate this method // completely deallocate this method
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, ""); EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
...@@ -1373,7 +1362,7 @@ void nmethod::flush() { ...@@ -1373,7 +1362,7 @@ void nmethod::flush() {
// notifies instanceKlasses that are reachable // notifies instanceKlasses that are reachable
void nmethod::flush_dependencies(BoolObjectClosure* is_alive) { void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); assert_locked_or_safepoint(CodeCache_lock);
assert(Universe::heap()->is_gc_active() == (is_alive != NULL), assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
"is_alive is non-NULL if and only if we are called during GC"); "is_alive is non-NULL if and only if we are called during GC");
if (!has_flushed_dependencies()) { if (!has_flushed_dependencies()) {
...@@ -2266,7 +2255,6 @@ void nmethod::print() const { ...@@ -2266,7 +2255,6 @@ void nmethod::print() const {
tty->print(" for method " INTPTR_FORMAT , (address)method()); tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { "); tty->print(" { ");
if (version()) tty->print("v%d ", version()); if (version()) tty->print("v%d ", version());
if (level()) tty->print("l%d ", level());
if (is_in_use()) tty->print("in_use "); if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant "); if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie "); if (is_zombie()) tty->print("zombie ");
......
...@@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC { ...@@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct nmFlags { struct nmFlags {
friend class VMStructs; friend class VMStructs;
unsigned int version:8; // version number (0 = first version) unsigned int version:8; // version number (0 = first version)
unsigned int level:4; // optimization level
unsigned int age:4; // age (in # of sweep steps) unsigned int age:4; // age (in # of sweep steps)
unsigned int state:2; // {alive, zombie, unloaded) unsigned int state:2; // {alive, zombie, unloaded)
...@@ -410,14 +409,13 @@ class nmethod : public CodeBlob { ...@@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
void flush_dependencies(BoolObjectClosure* is_alive); void flush_dependencies(BoolObjectClosure* is_alive);
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; } bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
void set_has_flushed_dependencies() { void set_has_flushed_dependencies() {
check_safepoint();
assert(!has_flushed_dependencies(), "should only happen once"); assert(!has_flushed_dependencies(), "should only happen once");
flags.hasFlushedDependencies = 1; flags.hasFlushedDependencies = 1;
} }
bool is_marked_for_reclamation() const { return flags.markedForReclamation; } bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; } void mark_for_reclamation() { flags.markedForReclamation = 1; }
void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; } void unmark_for_reclamation() { flags.markedForReclamation = 0; }
bool has_unsafe_access() const { return flags.has_unsafe_access; } bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; } void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
...@@ -428,9 +426,6 @@ class nmethod : public CodeBlob { ...@@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; } bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; } void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
int level() const { return flags.level; }
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
int comp_level() const { return _comp_level; } int comp_level() const { return _comp_level; }
int version() const { return flags.version; } int version() const { return flags.version; }
......
...@@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) { ...@@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) {
// //
// Get the next CompileTask from a CompileQueue // Get the next CompileTask from a CompileQueue
CompileTask* CompileQueue::get() { CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock()); MutexLocker locker(lock());
// Wait for an available CompileTask. // Wait for an available CompileTask.
while (_first == NULL) { while (_first == NULL) {
// There is no work to be done right now. Wait. // There is no work to be done right now. Wait.
lock()->wait(); if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
// During the emergency sweeping periods, wake up and sweep occasionally
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
if (timedout) {
MutexUnlocker ul(lock());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper::possibly_sweep();
}
} else {
// During normal operation no need to wake up on timer
lock()->wait();
}
} }
CompileTask* task = _first; CompileTask* task = _first;
......
...@@ -2756,6 +2756,9 @@ class CommandLineFlags { ...@@ -2756,6 +2756,9 @@ class CommandLineFlags {
product(intx, NmethodSweepFraction, 4, \ product(intx, NmethodSweepFraction, 4, \
"Number of invocations of sweeper to cover all nmethods") \ "Number of invocations of sweeper to cover all nmethods") \
\ \
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
notproduct(intx, MemProfilingInterval, 500, \ notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \ "Time between each invocation of the MemProfiler") \
\ \
......
...@@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() { ...@@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
} }
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime); TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
NMethodSweeper::sweep(); NMethodSweeper::scan_stacks();
} }
......
...@@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we ...@@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false; bool NMethodSweeper::_rescan = false;
bool NMethodSweeper::_do_sweep = false;
jint NMethodSweeper::_sweep_started = 0;
bool NMethodSweeper::_was_full = false; bool NMethodSweeper::_was_full = false;
jint NMethodSweeper::_advise_to_sweep = 0; jint NMethodSweeper::_advise_to_sweep = 0;
jlong NMethodSweeper::_last_was_full = 0; jlong NMethodSweeper::_last_was_full = 0;
...@@ -50,14 +52,20 @@ public: ...@@ -50,14 +52,20 @@ public:
}; };
static MarkActivationClosure mark_activation_closure; static MarkActivationClosure mark_activation_closure;
void NMethodSweeper::sweep() { void NMethodSweeper::scan_stacks() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (!MethodFlushing) return; if (!MethodFlushing) return;
_do_sweep = true;
// No need to synchronize access, since this is always executed at a // No need to synchronize access, since this is always executed at a
// safepoint. If we aren't in the middle of scan and a rescan // safepoint. If we aren't in the middle of scan and a rescan
// hasn't been requested then just return. // hasn't been requested then just return. If UseCodeCacheFlushing is on and
if (_current == NULL && !_rescan) return; // code cache flushing is in progress, don't skip sweeping to help make progress
// clearing space in the code cache.
if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
_do_sweep = false;
return;
}
// Make sure CompiledIC_lock in unlocked, since we might update some // Make sure CompiledIC_lock in unlocked, since we might update some
// inline caches. If it is, we just bail-out and try later. // inline caches. If it is, we just bail-out and try later.
...@@ -68,7 +76,7 @@ void NMethodSweeper::sweep() { ...@@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
if (_current == NULL) { if (_current == NULL) {
_seen = 0; _seen = 0;
_invocations = NmethodSweepFraction; _invocations = NmethodSweepFraction;
_current = CodeCache::first(); _current = CodeCache::first_nmethod();
_traversals += 1; _traversals += 1;
if (PrintMethodFlushing) { if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals); tty->print_cr("### Sweep: stack traversal %d", _traversals);
...@@ -81,48 +89,9 @@ void NMethodSweeper::sweep() { ...@@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
_not_entrant_seen_on_stack = 0; _not_entrant_seen_on_stack = 0;
} }
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int todo = CodeCache::nof_blobs();
if (_invocations != 1) {
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
_invocations--;
}
for(int i = 0; i < todo && _current != NULL; i++) {
CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
if (_current->is_nmethod()) {
process_nmethod((nmethod *)_current);
}
_seen++;
_current = next;
}
// Because we could stop on a codeBlob other than an nmethod we skip forward
// to the next nmethod (if any). codeBlobs other than nmethods can be freed
// async to us and make _current invalid while we sleep.
while (_current != NULL && !_current->is_nmethod()) {
_current = CodeCache::next(_current);
}
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
}
}
if (UseCodeCacheFlushing) { if (UseCodeCacheFlushing) {
if (!CodeCache::needs_flushing()) { if (!CodeCache::needs_flushing()) {
// In a safepoint, no race with setters // scan_stacks() runs during a safepoint, no race with setters
_advise_to_sweep = 0; _advise_to_sweep = 0;
} }
...@@ -155,13 +124,99 @@ void NMethodSweeper::sweep() { ...@@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
} }
} }
void NMethodSweeper::possibly_sweep() {
if ((!MethodFlushing) || (!_do_sweep)) return;
if (_invocations > 0) {
// Only one thread at a time will sweep
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
return;
}
sweep_code_cache();
}
_sweep_started = 0;
}
void NMethodSweeper::sweep_code_cache() {
#ifdef ASSERT
jlong sweep_start;
if(PrintMethodFlushing) {
sweep_start = os::javaTimeMillis();
}
#endif
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int todo = CodeCache::nof_blobs();
if (_invocations > 1) {
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
}
// Compilers may check to sweep more often than stack scans happen,
// don't keep trying once it is all scanned
_invocations--;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
for(int i = 0; i < todo && _current != NULL; i++) {
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
CodeBlob* next = CodeCache::next_nmethod(_current);
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
process_nmethod((nmethod *)_current);
}
_seen++;
_current = next;
}
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current = CodeCache::next_nmethod(_current);
}
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
}
}
#ifdef ASSERT
if(PrintMethodFlushing) {
jlong sweep_end = os::javaTimeMillis();
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
}
#endif
}
void NMethodSweeper::process_nmethod(nmethod *nm) { void NMethodSweeper::process_nmethod(nmethod *nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
// Skip methods that are currently referenced by the VM // Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) { if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods // But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) { if (nm->is_alive()) {
// Clean-up all inline caches that points to zombie/non-reentrant methods // Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); nm->cleanup_inline_caches();
} else { } else {
_locked_seen++; _locked_seen++;
...@@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
} }
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush(); nm->flush();
} else { } else {
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
...@@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_rescan = true; _rescan = true;
} else { } else {
// Still alive, clean up its inline caches // Still alive, clean up its inline caches
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); nm->cleanup_inline_caches();
// we coudn't transition this nmethod so don't immediately // we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a // request a rescan. If this method stays on the stack for a
// long time we don't want to keep rescanning at every safepoint. // long time we don't want to keep rescanning the code cache.
_not_entrant_seen_on_stack++; _not_entrant_seen_on_stack++;
} }
} else if (nm->is_unloaded()) { } else if (nm->is_unloaded()) {
...@@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
if (nm->is_osr_method()) { if (nm->is_osr_method()) {
// No inline caches will ever point to osr methods, so we can just remove it // No inline caches will ever point to osr methods, so we can just remove it
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush(); nm->flush();
} else { } else {
nm->make_zombie(); nm->make_zombie();
...@@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
} }
// Clean-up all inline caches that points to zombie/non-reentrant methods // Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); nm->cleanup_inline_caches();
} }
} }
...@@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// they will call a vm op that comes here. This code attempts to speculatively // they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by // unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then // saving the old code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second // execution resumes. If a method so marked is not called by the second sweeper
// safepoint from the current one, the nmethod will be marked non-entrant and // stack traversal after the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's // got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod // _code field is restored and the methodOop/nmethod
// go back to their normal state. // go back to their normal state.
...@@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { ...@@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
xtty->end_elem(); xtty->end_elem();
} }
// Shut off compiler. Sweeper will run exiting from this safepoint // Shut off compiler. Sweeper will start over with a new stack scan and
// and turn it back on if it clears enough space // traversal cycle and turn it back on if it clears enough space.
if (was_full()) { if (was_full()) {
_last_was_full = os::javaTimeMillis(); _last_was_full = os::javaTimeMillis();
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
......
...@@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic { ...@@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
static bool _rescan; // Indicates that we should do a full rescan of the static bool _rescan; // Indicates that we should do a full rescan of the
// of the code cache looking for work to do. // of the code cache looking for work to do.
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
static jint _sweep_started; // Flag to control conc sweeper
static int _locked_seen; // Number of locked nmethods encountered during the scan static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
...@@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic { ...@@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
public: public:
static long traversal_count() { return _traversals; } static long traversal_count() { return _traversals; }
static void sweep(); // Invoked at the end of each safepoint static void scan_stacks(); // Invoked at the end of each safepoint
static void sweep_code_cache(); // Concurrent part of sweep job
static void possibly_sweep(); // Compiler threads call this to sweep
static void notify(nmethod* nm) { static void notify(nmethod* nm) {
// Perform a full scan of the code cache from the beginning. No // Perform a full scan of the code cache from the beginning. No
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册