提交 360cba23 编写于 作者: K kvn

4360113: Evict nmethods when code cache gets full

Summary: Speculatively unload the oldest nmethods when code cache gets full.
Reviewed-by: never, kvn
Contributed-by: eric.caspole@amd.com
上级 7dc56cff
...@@ -962,18 +962,10 @@ void ciEnv::register_method(ciMethod* target, ...@@ -962,18 +962,10 @@ void ciEnv::register_method(ciMethod* target,
if (nm == NULL) { if (nm == NULL) {
// The CodeCache is full. Print out warning and disable compilation. // The CodeCache is full. Print out warning and disable compilation.
record_failure("code cache is full"); record_failure("code cache is full");
UseInterpreter = true; {
if (UseCompiler || AlwaysCompileLoopMethods ) { MutexUnlocker ml(Compile_lock);
#ifndef PRODUCT MutexUnlocker locker(MethodCompileQueue_lock);
warning("CodeCache is full. Compiler has been disabled"); CompileBroker::handle_full_code_cache();
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
UseCompiler = false;
AlwaysCompileLoopMethods = false;
} }
} else { } else {
NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); ) NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
......
...@@ -96,6 +96,7 @@ int CodeCache::_number_of_blobs = 0; ...@@ -96,6 +96,7 @@ int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false; bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_scavenge_root_nmethods = NULL;
nmethod* CodeCache::_saved_nmethods = NULL;
CodeBlob* CodeCache::first() { CodeBlob* CodeCache::first() {
...@@ -395,6 +396,85 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { ...@@ -395,6 +396,85 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
} }
#endif //PRODUCT #endif //PRODUCT
nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved->is_in_use() && saved->method() == m) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) {
saved->print_on(tty, " ### nmethod is reconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
xtty->method(methodOop(m));
xtty->stamp();
xtty->end_elem();
}
return saved;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
return NULL;
}
void CodeCache::remove_saved_code(nmethod* nm) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved == nm) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
xtty->stamp();
xtty->end_elem();
}
return;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
ShouldNotReachHere();
}
void CodeCache::speculatively_disconnect(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm;
if (PrintMethodFlushing) {
nm->print_on(tty, " ### nmethod is speculatively disconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
xtty->method(methodOop(nm->method()));
xtty->stamp();
xtty->end_elem();
}
nm->method()->clear_code();
nm->set_speculatively_disconnected(true);
}
void CodeCache::gc_prologue() { void CodeCache::gc_prologue() {
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
} }
......
...@@ -46,6 +46,7 @@ class CodeCache : AllStatic { ...@@ -46,6 +46,7 @@ class CodeCache : AllStatic {
static int _number_of_nmethods_with_dependencies; static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean; static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
static void verify_if_often() PRODUCT_RETURN; static void verify_if_often() PRODUCT_RETURN;
...@@ -141,11 +142,16 @@ class CodeCache : AllStatic { ...@@ -141,11 +142,16 @@ class CodeCache : AllStatic {
static size_t capacity() { return _heap->capacity(); } static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); } static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_cache_clean() { return _needs_cache_clean; } static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches static void clear_inline_caches(); // clear all inline caches
static nmethod* find_and_remove_saved_code(methodOop m);
static void remove_saved_code(nmethod* nm);
static void speculatively_disconnect(nmethod* nm);
// Deoptimization // Deoptimization
static int mark_for_deoptimization(DepChange& changes); static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP #ifdef HOTSWAP
......
...@@ -587,6 +587,7 @@ nmethod::nmethod( ...@@ -587,6 +587,7 @@ nmethod::nmethod(
_osr_link = NULL; _osr_link = NULL;
_scavenge_root_link = NULL; _scavenge_root_link = NULL;
_scavenge_root_state = 0; _scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL; _compiler = NULL;
// We have no exception handler or deopt handler make the // We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry // values something that will never match a pc like the nmethod vtable entry
...@@ -1033,7 +1034,7 @@ void nmethod::cleanup_inline_caches() { ...@@ -1033,7 +1034,7 @@ void nmethod::cleanup_inline_caches() {
if( cb != NULL && cb->is_nmethod() ) { if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb; nmethod* nm = (nmethod*)cb;
// Clean inline caches pointing to both zombie and not_entrant methods // Clean inline caches pointing to both zombie and not_entrant methods
if (!nm->is_in_use()) ic->set_to_clean(); if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
} }
break; break;
} }
...@@ -1043,7 +1044,7 @@ void nmethod::cleanup_inline_caches() { ...@@ -1043,7 +1044,7 @@ void nmethod::cleanup_inline_caches() {
if( cb != NULL && cb->is_nmethod() ) { if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb; nmethod* nm = (nmethod*)cb;
// Clean inline caches pointing to both zombie and not_entrant methods // Clean inline caches pointing to both zombie and not_entrant methods
if (!nm->is_in_use()) csc->set_to_clean(); if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
} }
break; break;
} }
...@@ -1312,7 +1313,8 @@ void nmethod::flush() { ...@@ -1312,7 +1313,8 @@ void nmethod::flush() {
// completely deallocate this method // completely deallocate this method
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, ""); EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
if (PrintMethodFlushing) { if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs()); tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
} }
// We need to deallocate any ExceptionCache data. // We need to deallocate any ExceptionCache data.
...@@ -1330,6 +1332,10 @@ void nmethod::flush() { ...@@ -1330,6 +1332,10 @@ void nmethod::flush() {
CodeCache::drop_scavenge_root_nmethod(this); CodeCache::drop_scavenge_root_nmethod(this);
} }
if (is_speculatively_disconnected()) {
CodeCache::remove_saved_code(this);
}
((CodeBlob*)(this))->flush(); ((CodeBlob*)(this))->flush();
CodeCache::free(this); CodeCache::free(this);
......
...@@ -95,6 +95,8 @@ struct nmFlags { ...@@ -95,6 +95,8 @@ struct nmFlags {
unsigned int has_unsafe_access:1; // May fault due to unsafe access. unsigned int has_unsafe_access:1; // May fault due to unsafe access.
unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes? unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int speculatively_disconnected:1; // Marked for potential unload
void clear(); void clear();
}; };
...@@ -137,6 +139,7 @@ class nmethod : public CodeBlob { ...@@ -137,6 +139,7 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods: // To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from instanceKlass::osr_nmethods_head nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
static nmethod* volatile _oops_do_mark_nmethods; static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link; nmethod* volatile _oops_do_mark_link;
...@@ -413,6 +416,9 @@ class nmethod : public CodeBlob { ...@@ -413,6 +416,9 @@ class nmethod : public CodeBlob {
bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; } bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; } void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
int level() const { return flags.level; } int level() const { return flags.level; }
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; } void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
...@@ -437,6 +443,9 @@ class nmethod : public CodeBlob { ...@@ -437,6 +443,9 @@ class nmethod : public CodeBlob {
nmethod* scavenge_root_link() const { return _scavenge_root_link; } nmethod* scavenge_root_link() const { return _scavenge_root_link; }
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
public: public:
// Sweeper support // Sweeper support
......
...@@ -69,6 +69,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, ...@@ -69,6 +69,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
bool CompileBroker::_initialized = false; bool CompileBroker::_initialized = false;
volatile bool CompileBroker::_should_block = false; volatile bool CompileBroker::_should_block = false;
volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
// The installed compiler(s) // The installed compiler(s)
AbstractCompiler* CompileBroker::_compilers[2]; AbstractCompiler* CompileBroker::_compilers[2];
...@@ -986,6 +987,13 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, ...@@ -986,6 +987,13 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
return method_code; return method_code;
} }
if (method->is_not_compilable(comp_level)) return NULL; if (method->is_not_compilable(comp_level)) return NULL;
nmethod* saved = CodeCache::find_and_remove_saved_code(method());
if (saved != NULL) {
method->set_code(method, saved);
return saved;
}
} else { } else {
// osr compilation // osr compilation
#ifndef TIERED #ifndef TIERED
...@@ -1037,6 +1045,14 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, ...@@ -1037,6 +1045,14 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
method->jmethod_id(); method->jmethod_id();
} }
// If the compiler is shut off due to code cache flushing or otherwise,
// fail out now so blocking compiles dont hang the java thread
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
method->invocation_counter()->decay();
method->backedge_counter()->decay();
return NULL;
}
// do the compilation // do the compilation
if (method->is_native()) { if (method->is_native()) {
if (!PreferInterpreterNativeStubs) { if (!PreferInterpreterNativeStubs) {
...@@ -1325,26 +1341,13 @@ void CompileBroker::compiler_thread_loop() { ...@@ -1325,26 +1341,13 @@ void CompileBroker::compiler_thread_loop() {
{ {
// We need this HandleMark to avoid leaking VM handles. // We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread); HandleMark hm(thread);
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// The CodeCache is full. Print out warning and disable compilation. // the code cache is really full
UseInterpreter = true; handle_full_code_cache();
if (UseCompiler || AlwaysCompileLoopMethods ) { } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
if (log != NULL) { // Attempt to start cleaning the code cache while there is still a little headroom
log->begin_elem("code_cache_full"); NMethodSweeper::handle_full_code_cache(false);
log->stamp();
log->end_elem();
}
#ifndef PRODUCT
warning("CodeCache is full. Compiler has been disabled");
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(thread);
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
UseCompiler = false;
AlwaysCompileLoopMethods = false;
}
} }
CompileTask* task = queue->get(); CompileTask* task = queue->get();
...@@ -1369,7 +1372,7 @@ void CompileBroker::compiler_thread_loop() { ...@@ -1369,7 +1372,7 @@ void CompileBroker::compiler_thread_loop() {
// Never compile a method if breakpoints are present in it // Never compile a method if breakpoints are present in it
if (method()->number_of_breakpoints() == 0) { if (method()->number_of_breakpoints() == 0) {
// Compile the method. // Compile the method.
if (UseCompiler || AlwaysCompileLoopMethods) { if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
#ifdef COMPILER1 #ifdef COMPILER1
// Allow repeating compilations for the purpose of benchmarking // Allow repeating compilations for the purpose of benchmarking
// compile speed. This is not useful for customers. // compile speed. This is not useful for customers.
...@@ -1613,6 +1616,38 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { ...@@ -1613,6 +1616,38 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
} }
// ------------------------------------------------------------------
// CompileBroker::handle_full_code_cache
//
// The CodeCache is full. Print out warning and disable compilation or
// try code cache cleaning so compilation can continue later.
void CompileBroker::handle_full_code_cache() {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
CompilerThread* thread = CompilerThread::current();
CompileLog* log = thread->log();
if (log != NULL) {
log->begin_elem("code_cache_full");
log->stamp();
log->end_elem();
}
#ifndef PRODUCT
warning("CodeCache is full. Compiler has been disabled");
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
if (UseCodeCacheFlushing) {
NMethodSweeper::handle_full_code_cache(true);
} else {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
}
}
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::set_last_compile // CompileBroker::set_last_compile
// //
......
...@@ -193,6 +193,9 @@ class CompileBroker: AllStatic { ...@@ -193,6 +193,9 @@ class CompileBroker: AllStatic {
static bool _initialized; static bool _initialized;
static volatile bool _should_block; static volatile bool _should_block;
// This flag can be used to stop compilation or turn it back on
static volatile jint _should_compile_new_jobs;
// The installed compiler(s) // The installed compiler(s)
static AbstractCompiler* _compilers[2]; static AbstractCompiler* _compilers[2];
...@@ -319,6 +322,7 @@ class CompileBroker: AllStatic { ...@@ -319,6 +322,7 @@ class CompileBroker: AllStatic {
static void compiler_thread_loop(); static void compiler_thread_loop();
static uint get_compilation_id() { return _compilation_id; }
static bool is_idle(); static bool is_idle();
// Set _should_block. // Set _should_block.
...@@ -328,6 +332,20 @@ class CompileBroker: AllStatic { ...@@ -328,6 +332,20 @@ class CompileBroker: AllStatic {
// Call this from the compiler at convenient points, to poll for _should_block. // Call this from the compiler at convenient points, to poll for _should_block.
static void maybe_block(); static void maybe_block();
enum {
// Flags for toggling compiler activity
stop_compilation = 0,
run_compilation = 1
};
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
static bool set_should_compile_new_jobs(jint new_state) {
// Return success if the current caller set it
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
return (old == (1-new_state));
}
static void handle_full_code_cache();
// Return total compilation ticks // Return total compilation ticks
static jlong total_compilation_ticks() { static jlong total_compilation_ticks() {
return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0; return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
......
...@@ -775,6 +775,7 @@ output.cpp allocation.inline.hpp ...@@ -775,6 +775,7 @@ output.cpp allocation.inline.hpp
output.cpp assembler.inline.hpp output.cpp assembler.inline.hpp
output.cpp callnode.hpp output.cpp callnode.hpp
output.cpp cfgnode.hpp output.cpp cfgnode.hpp
output.cpp compileBroker.hpp
output.cpp debugInfo.hpp output.cpp debugInfo.hpp
output.cpp debugInfoRec.hpp output.cpp debugInfoRec.hpp
output.cpp handles.inline.hpp output.cpp handles.inline.hpp
......
...@@ -1032,6 +1032,7 @@ codeCache.cpp objArrayOop.hpp ...@@ -1032,6 +1032,7 @@ codeCache.cpp objArrayOop.hpp
codeCache.cpp oop.inline.hpp codeCache.cpp oop.inline.hpp
codeCache.cpp pcDesc.hpp codeCache.cpp pcDesc.hpp
codeCache.cpp resourceArea.hpp codeCache.cpp resourceArea.hpp
codeCache.cpp xmlstream.hpp
codeCache.hpp allocation.hpp codeCache.hpp allocation.hpp
codeCache.hpp codeBlob.hpp codeCache.hpp codeBlob.hpp
...@@ -1120,6 +1121,7 @@ compileBroker.cpp nativeLookup.hpp ...@@ -1120,6 +1121,7 @@ compileBroker.cpp nativeLookup.hpp
compileBroker.cpp oop.inline.hpp compileBroker.cpp oop.inline.hpp
compileBroker.cpp os.hpp compileBroker.cpp os.hpp
compileBroker.cpp sharedRuntime.hpp compileBroker.cpp sharedRuntime.hpp
compileBroker.cpp sweeper.hpp
compileBroker.cpp systemDictionary.hpp compileBroker.cpp systemDictionary.hpp
compileBroker.cpp vmSymbols.hpp compileBroker.cpp vmSymbols.hpp
...@@ -3719,6 +3721,7 @@ sharedHeap.hpp permGen.hpp ...@@ -3719,6 +3721,7 @@ sharedHeap.hpp permGen.hpp
sharedRuntime.cpp abstractCompiler.hpp sharedRuntime.cpp abstractCompiler.hpp
sharedRuntime.cpp arguments.hpp sharedRuntime.cpp arguments.hpp
sharedRuntime.cpp biasedLocking.hpp sharedRuntime.cpp biasedLocking.hpp
sharedRuntime.cpp compileBroker.hpp
sharedRuntime.cpp compiledIC.hpp sharedRuntime.cpp compiledIC.hpp
sharedRuntime.cpp compilerOracle.hpp sharedRuntime.cpp compilerOracle.hpp
sharedRuntime.cpp copy.hpp sharedRuntime.cpp copy.hpp
...@@ -3973,6 +3976,7 @@ stubs.hpp os_<os_family>.inline.hpp ...@@ -3973,6 +3976,7 @@ stubs.hpp os_<os_family>.inline.hpp
sweeper.cpp atomic.hpp sweeper.cpp atomic.hpp
sweeper.cpp codeCache.hpp sweeper.cpp codeCache.hpp
sweeper.cpp compileBroker.hpp
sweeper.cpp events.hpp sweeper.cpp events.hpp
sweeper.cpp methodOop.hpp sweeper.cpp methodOop.hpp
sweeper.cpp mutexLocker.hpp sweeper.cpp mutexLocker.hpp
...@@ -3980,6 +3984,8 @@ sweeper.cpp nmethod.hpp ...@@ -3980,6 +3984,8 @@ sweeper.cpp nmethod.hpp
sweeper.cpp os.hpp sweeper.cpp os.hpp
sweeper.cpp resourceArea.hpp sweeper.cpp resourceArea.hpp
sweeper.cpp sweeper.hpp sweeper.cpp sweeper.hpp
sweeper.cpp vm_operations.hpp
sweeper.cpp xmlstream.hpp
symbolKlass.cpp gcLocker.hpp symbolKlass.cpp gcLocker.hpp
symbolKlass.cpp handles.inline.hpp symbolKlass.cpp handles.inline.hpp
...@@ -4633,6 +4639,7 @@ vm_operations.cpp deoptimization.hpp ...@@ -4633,6 +4639,7 @@ vm_operations.cpp deoptimization.hpp
vm_operations.cpp interfaceSupport.hpp vm_operations.cpp interfaceSupport.hpp
vm_operations.cpp isGCActiveMark.hpp vm_operations.cpp isGCActiveMark.hpp
vm_operations.cpp resourceArea.hpp vm_operations.cpp resourceArea.hpp
vm_operations.cpp sweeper.hpp
vm_operations.cpp threadService.hpp vm_operations.cpp threadService.hpp
vm_operations.cpp thread_<os_family>.inline.hpp vm_operations.cpp thread_<os_family>.inline.hpp
vm_operations.cpp vmSymbols.hpp vm_operations.cpp vmSymbols.hpp
......
...@@ -705,6 +705,16 @@ address methodOopDesc::make_adapters(methodHandle mh, TRAPS) { ...@@ -705,6 +705,16 @@ address methodOopDesc::make_adapters(methodHandle mh, TRAPS) {
// This function must not hit a safepoint! // This function must not hit a safepoint!
address methodOopDesc::verified_code_entry() { address methodOopDesc::verified_code_entry() {
debug_only(No_Safepoint_Verifier nsv;) debug_only(No_Safepoint_Verifier nsv;)
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
if (code == NULL && UseCodeCacheFlushing) {
nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
if (saved_code != NULL) {
methodHandle method(this);
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
set_code( method, saved_code );
}
}
assert(_from_compiled_entry != NULL, "must be set"); assert(_from_compiled_entry != NULL, "must be set");
return _from_compiled_entry; return _from_compiled_entry;
} }
...@@ -733,8 +743,8 @@ void methodOopDesc::set_code(methodHandle mh, nmethod *code) { ...@@ -733,8 +743,8 @@ void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
int comp_level = code->comp_level(); int comp_level = code->comp_level();
// In theory there could be a race here. In practice it is unlikely // In theory there could be a race here. In practice it is unlikely
// and not worth worrying about. // and not worth worrying about.
if (comp_level > highest_tier_compile()) { if (comp_level > mh->highest_tier_compile()) {
set_highest_tier_compile(comp_level); mh->set_highest_tier_compile(comp_level);
} }
OrderAccess::storestore(); OrderAccess::storestore();
......
...@@ -303,7 +303,7 @@ class methodOopDesc : public oopDesc { ...@@ -303,7 +303,7 @@ class methodOopDesc : public oopDesc {
bool check_code() const; // Not inline to avoid circular ref bool check_code() const; // Not inline to avoid circular ref
nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
void clear_code(); // Clear out any compiled code void clear_code(); // Clear out any compiled code
void set_code(methodHandle mh, nmethod* code); static void set_code(methodHandle mh, nmethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
address get_i2c_entry(); address get_i2c_entry();
address get_c2i_entry(); address get_c2i_entry();
......
...@@ -1093,7 +1093,7 @@ void Compile::Fill_buffer() { ...@@ -1093,7 +1093,7 @@ void Compile::Fill_buffer() {
cb->initialize(total_req, locs_req); cb->initialize(total_req, locs_req);
// Have we run out of code space? // Have we run out of code space?
if (cb->blob() == NULL) { if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this); turn_off_compiler(this);
return; return;
} }
...@@ -1314,7 +1314,7 @@ void Compile::Fill_buffer() { ...@@ -1314,7 +1314,7 @@ void Compile::Fill_buffer() {
// Verify that there is sufficient space remaining // Verify that there is sufficient space remaining
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if (cb->blob() == NULL) { if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this); turn_off_compiler(this);
return; return;
} }
...@@ -1433,7 +1433,7 @@ void Compile::Fill_buffer() { ...@@ -1433,7 +1433,7 @@ void Compile::Fill_buffer() {
} }
// One last check for failed CodeBuffer::expand: // One last check for failed CodeBuffer::expand:
if (cb->blob() == NULL) { if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this); turn_off_compiler(this);
return; return;
} }
......
...@@ -66,7 +66,7 @@ bool CompilationPolicy::mustBeCompiled(methodHandle m) { ...@@ -66,7 +66,7 @@ bool CompilationPolicy::mustBeCompiled(methodHandle m) {
if (!canBeCompiled(m)) return false; if (!canBeCompiled(m)) return false;
return !UseInterpreter || // must compile all methods return !UseInterpreter || // must compile all methods
(UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
} }
// Returns true if m is allowed to be compiled // Returns true if m is allowed to be compiled
...@@ -137,7 +137,7 @@ void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) { ...@@ -137,7 +137,7 @@ void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
reset_counter_for_invocation_event(m); reset_counter_for_invocation_event(m);
const char* comment = "count"; const char* comment = "count";
if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) { if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
nmethod* nm = m->code(); nmethod* nm = m->code();
if (nm == NULL ) { if (nm == NULL ) {
const char* comment = "count"; const char* comment = "count";
...@@ -162,7 +162,7 @@ void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, ...@@ -162,7 +162,7 @@ void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci,
int hot_count = m->backedge_count(); int hot_count = m->backedge_count();
const char* comment = "backedge_count"; const char* comment = "backedge_count";
if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) { if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
...@@ -204,7 +204,7 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) { ...@@ -204,7 +204,7 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
reset_counter_for_invocation_event(m); reset_counter_for_invocation_event(m);
const char* comment = "count"; const char* comment = "count";
if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) { if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
JavaThread *thread = (JavaThread*)THREAD; JavaThread *thread = (JavaThread*)THREAD;
frame fr = thread->last_frame(); frame fr = thread->last_frame();
...@@ -248,7 +248,7 @@ void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bc ...@@ -248,7 +248,7 @@ void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bc
int hot_count = m->backedge_count(); int hot_count = m->backedge_count();
const char* comment = "backedge_count"; const char* comment = "backedge_count";
if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) { if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
......
...@@ -3117,6 +3117,15 @@ class CommandLineFlags { ...@@ -3117,6 +3117,15 @@ class CommandLineFlags {
notproduct(bool, ExitOnFullCodeCache, false, \ notproduct(bool, ExitOnFullCodeCache, false, \
"Exit the VM if we fill the code cache.") \ "Exit the VM if we fill the code cache.") \
\ \
product(bool, UseCodeCacheFlushing, false, \
"Attempt to clean the code cache before shutting off compiler") \
\
product(intx, MinCodeCacheFlushingInterval, 30, \
"Min number of seconds between code cache cleaning sessions") \
\
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
"When less than X space left, start code cache cleaning") \
\
/* interpreter debugging */ \ /* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \ develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \ "Minimal number of lookupswitch entries for rewriting to binary " \
......
...@@ -2146,19 +2146,8 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) { ...@@ -2146,19 +2146,8 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
// CodeCache is full, disable compilation // CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread // Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread. // and we're some non descript Java thread.
UseInterpreter = true; MutexUnlocker mu(AdapterHandlerLibrary_lock);
if (UseCompiler || AlwaysCompileLoopMethods ) { CompileBroker::handle_full_code_cache();
#ifndef PRODUCT
warning("CodeCache is full. Compiler has been disabled");
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
UseCompiler = false;
AlwaysCompileLoopMethods = false;
}
return NULL; // Out of CodeCache space return NULL; // Out of CodeCache space
} }
entry->relocate(B->instructions_begin()); entry->relocate(B->instructions_begin());
...@@ -2282,19 +2271,8 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { ...@@ -2282,19 +2271,8 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
// CodeCache is full, disable compilation // CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread // Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread. // and we're some non descript Java thread.
UseInterpreter = true; MutexUnlocker mu(AdapterHandlerLibrary_lock);
if (UseCompiler || AlwaysCompileLoopMethods ) { CompileBroker::handle_full_code_cache();
#ifndef PRODUCT
warning("CodeCache is full. Compiler has been disabled");
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
UseCompiler = false;
AlwaysCompileLoopMethods = false;
}
} }
return nm; return nm;
} }
......
...@@ -33,6 +33,11 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we ...@@ -33,6 +33,11 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false; bool NMethodSweeper::_rescan = false;
bool NMethodSweeper::_was_full = false;
jint NMethodSweeper::_advise_to_sweep = 0;
jlong NMethodSweeper::_last_was_full = 0;
uint NMethodSweeper::_highest_marked = 0;
long NMethodSweeper::_was_full_traversal = 0;
class MarkActivationClosure: public CodeBlobClosure { class MarkActivationClosure: public CodeBlobClosure {
public: public:
...@@ -114,6 +119,40 @@ void NMethodSweeper::sweep() { ...@@ -114,6 +119,40 @@ void NMethodSweeper::sweep() {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
} }
} }
if (UseCodeCacheFlushing) {
if (!CodeCache::needs_flushing()) {
// In a safepoint, no race with setters
_advise_to_sweep = 0;
}
if (was_full()) {
// There was some progress so attempt to restart the compiler
jlong now = os::javaTimeMillis();
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
jlong curr_interval = now - _last_was_full;
if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
set_was_full(false);
// Update the _last_was_full time so we can tell how fast the
// code cache is filling up
_last_was_full = os::javaTimeMillis();
if (PrintMethodFlushing) {
tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
}
}
}
} }
...@@ -137,12 +176,12 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -137,12 +176,12 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if (nm->is_marked_for_reclamation()) { if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
} }
nm->flush(); nm->flush();
} else { } else {
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
} }
nm->mark_for_reclamation(); nm->mark_for_reclamation();
_rescan = true; _rescan = true;
...@@ -152,7 +191,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -152,7 +191,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// stack we can safely convert it to a zombie method // stack we can safely convert it to a zombie method
if (nm->can_not_entrant_be_converted()) { if (nm->can_not_entrant_be_converted()) {
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
} }
nm->make_zombie(); nm->make_zombie();
_rescan = true; _rescan = true;
...@@ -167,7 +206,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -167,7 +206,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
} else if (nm->is_unloaded()) { } else if (nm->is_unloaded()) {
// Unloaded code, just make it a zombie // Unloaded code, just make it a zombie
if (PrintMethodFlushing && Verbose) if (PrintMethodFlushing && Verbose)
tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
if (nm->is_osr_method()) { if (nm->is_osr_method()) {
// No inline caches will ever point to osr methods, so we can just remove it // No inline caches will ever point to osr methods, so we can just remove it
nm->flush(); nm->flush();
...@@ -177,7 +216,167 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -177,7 +216,167 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
} }
} else { } else {
assert(nm->is_alive(), "should be alive"); assert(nm->is_alive(), "should be alive");
if (UseCodeCacheFlushing) {
if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
(_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
CodeCache::needs_flushing()) {
// This method has not been called since the forced cleanup happened
nm->make_not_entrant();
}
}
// Clean-up all inline caches that points to zombie/non-reentrant methods // Clean-up all inline caches that points to zombie/non-reentrant methods
nm->cleanup_inline_caches(); nm->cleanup_inline_caches();
} }
} }
// Code cache unloading: when compilers notice the code cache is getting full,
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second
// safepoint from the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
void NMethodSweeper::handle_full_code_cache(bool is_full) {
// Only the first one to notice can advise us to start early cleaning
if (!is_full){
jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
if (old != 0) {
return;
}
}
if (is_full) {
// Since code cache is full, immediately stop new compiles
bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
if (!did_set) {
// only the first to notice can start the cleaning,
// others will go back and block
return;
}
set_was_full(true);
// If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
jlong now = os::javaTimeMillis();
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
jlong curr_interval = now - _last_was_full;
if (curr_interval < max_interval) {
_rescan = true;
if (PrintMethodFlushing) {
tty->print_cr("### handle full too often, turning off compiler");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
return;
}
}
VM_HandleFullCodeCache op(is_full);
VMThread::execute(&op);
// rescan again as soon as possible
_rescan = true;
}
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
// If there was a race in detecting full code cache, only run
// one vm op for it or keep the compiler shut off
debug_only(jlong start = os::javaTimeMillis();)
if ((!was_full()) && (is_full)) {
if (!CodeCache::needs_flushing()) {
if (PrintMethodFlushing) {
tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
return;
}
}
// Traverse the code cache trying to dump the oldest nmethods
uint curr_max_comp_id = CompileBroker::get_compilation_id();
uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
jint disconnected = 0;
jint made_not_entrant = 0;
while ((nm != NULL)){
uint curr_comp_id = nm->compile_id();
// OSR methods cannot be flushed like this. Also, don't flush native methods
// since they are part of the JDK in most cases
if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
(!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
if ((nm->method()->code() == nm)) {
// This method has not been previously considered for
// unloading or it was restored already
CodeCache::speculatively_disconnect(nm);
disconnected++;
} else if (nm->is_speculatively_disconnected()) {
// This method was previously considered for preemptive unloading and was not called since then
nm->method()->invocation_counter()->decay();
nm->method()->backedge_counter()->decay();
nm->make_not_entrant();
made_not_entrant++;
}
if (curr_comp_id > _highest_marked) {
_highest_marked = curr_comp_id;
}
}
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
// Shut off compiler. Sweeper will run exiting from this safepoint
// and turn it back on if it clears enough space
if (was_full()) {
_last_was_full = os::javaTimeMillis();
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
}
// After two more traversals the sweeper will get rid of unrestored nmethods
_was_full_traversal = _traversals;
#ifdef ASSERT
jlong end = os::javaTimeMillis();
if(PrintMethodFlushing && Verbose) {
tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
}
#endif
}
...@@ -38,6 +38,11 @@ class NMethodSweeper : public AllStatic { ...@@ -38,6 +38,11 @@ class NMethodSweeper : public AllStatic {
static int _locked_seen; // Number of locked nmethods encountered during the scan static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
static bool _was_full; // remember if we did emergency unloading
static jint _advise_to_sweep; // flag to indicate code cache getting full
static jlong _last_was_full; // timestamp of last emergency unloading
static uint _highest_marked; // highest compile id dumped at last emergency unloading
static long _was_full_traversal; // trav number at last emergency unloading
static void process_nmethod(nmethod *nm); static void process_nmethod(nmethod *nm);
public: public:
...@@ -51,4 +56,10 @@ class NMethodSweeper : public AllStatic { ...@@ -51,4 +56,10 @@ class NMethodSweeper : public AllStatic {
// changes to false at safepoint so we can never overwrite it with false. // changes to false at safepoint so we can never overwrite it with false.
_rescan = true; _rescan = true;
} }
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
static void set_was_full(bool state) { _was_full = state; }
static bool was_full() { return _was_full; }
}; };
...@@ -151,6 +151,10 @@ void VM_ZombieAll::doit() { ...@@ -151,6 +151,10 @@ void VM_ZombieAll::doit() {
#endif // !PRODUCT #endif // !PRODUCT
void VM_HandleFullCodeCache::doit() {
NMethodSweeper::speculative_disconnect_nmethods(_is_full);
}
void VM_Verify::doit() { void VM_Verify::doit() {
Universe::verify(); Universe::verify();
} }
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
template(DeoptimizeFrame) \ template(DeoptimizeFrame) \
template(DeoptimizeAll) \ template(DeoptimizeAll) \
template(ZombieAll) \ template(ZombieAll) \
template(HandleFullCodeCache) \
template(Verify) \ template(Verify) \
template(PrintJNI) \ template(PrintJNI) \
template(HeapDumper) \ template(HeapDumper) \
...@@ -241,6 +242,16 @@ class VM_DeoptimizeFrame: public VM_Operation { ...@@ -241,6 +242,16 @@ class VM_DeoptimizeFrame: public VM_Operation {
bool allow_nested_vm_operations() const { return true; } bool allow_nested_vm_operations() const { return true; }
}; };
class VM_HandleFullCodeCache: public VM_Operation {
private:
bool _is_full;
public:
VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; }
VMOp_Type type() const { return VMOp_HandleFullCodeCache; }
void doit();
bool allow_nested_vm_operations() const { return true; }
};
#ifndef PRODUCT #ifndef PRODUCT
class VM_DeoptimizeAll: public VM_Operation { class VM_DeoptimizeAll: public VM_Operation {
private: private:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册