提交 f676ecf8 编写于 作者: A asaha

Merge

...@@ -758,3 +758,4 @@ ab64d7ea4f48ea4bdbcc43d4a653be157d9c29e3 jdk8u66-b12 ...@@ -758,3 +758,4 @@ ab64d7ea4f48ea4bdbcc43d4a653be157d9c29e3 jdk8u66-b12
a6f2a7ba281291f5dab79fa494f7cfaa6232c88b jdk8u66-b17 a6f2a7ba281291f5dab79fa494f7cfaa6232c88b jdk8u66-b17
b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00 b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00
c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01 c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01
15ef554f2f2e0a8d7c330191432fcd2126d19dab jdk8u72-b02
...@@ -521,15 +521,17 @@ void CodeCache::gc_prologue() { ...@@ -521,15 +521,17 @@ void CodeCache::gc_prologue() {
void CodeCache::gc_epilogue() { void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) { NOT_DEBUG(if (needs_cache_clean())) {
if (cb->is_nmethod()) { FOR_ALL_ALIVE_BLOBS(cb) {
nmethod *nm = (nmethod*)cb; if (cb->is_nmethod()) {
assert(!nm->is_unloaded(), "Tautology"); nmethod *nm = (nmethod*)cb;
if (needs_cache_clean()) { assert(!nm->is_unloaded(), "Tautology");
nm->cleanup_inline_caches(); DEBUG_ONLY(if (needs_cache_clean())) {
nm->cleanup_inline_caches();
}
DEBUG_ONLY(nm->verify());
DEBUG_ONLY(nm->verify_oop_relocations());
} }
DEBUG_ONLY(nm->verify());
DEBUG_ONLY(nm->verify_oop_relocations());
} }
} }
set_needs_cache_clean(false); set_needs_cache_clean(false);
...@@ -734,27 +736,6 @@ int CodeCache::mark_for_deoptimization(Method* dependee) { ...@@ -734,27 +736,6 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
return number_of_marked_CodeBlobs; return number_of_marked_CodeBlobs;
} }
void CodeCache::make_marked_nmethods_zombies() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
FOR_ALL_ALIVE_NMETHODS(nm) {
if (nm->is_marked_for_deoptimization()) {
// If the nmethod has already been made non-entrant and it can be converted
// then zombie it now. Otherwise make it non-entrant and it will eventually
// be zombied when it is no longer seen on the stack. Note that the nmethod
// might be "entrant" and not on the stack and so could be zombied immediately
// but we can't tell because we don't track it on stack until it becomes
// non-entrant.
if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
nm->make_zombie();
} else {
nm->make_not_entrant();
}
}
}
}
void CodeCache::make_marked_nmethods_not_entrant() { void CodeCache::make_marked_nmethods_not_entrant() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_NMETHODS(nm) { FOR_ALL_ALIVE_NMETHODS(nm) {
......
...@@ -179,7 +179,6 @@ class CodeCache : AllStatic { ...@@ -179,7 +179,6 @@ class CodeCache : AllStatic {
static void mark_all_nmethods_for_deoptimization(); static void mark_all_nmethods_for_deoptimization();
static int mark_for_deoptimization(Method* dependee); static int mark_for_deoptimization(Method* dependee);
static void make_marked_nmethods_zombies();
static void make_marked_nmethods_not_entrant(); static void make_marked_nmethods_not_entrant();
// tells how many nmethods have dependencies // tells how many nmethods have dependencies
......
...@@ -155,6 +155,14 @@ address CompiledIC::stub_address() const { ...@@ -155,6 +155,14 @@ address CompiledIC::stub_address() const {
return _ic_call->destination(); return _ic_call->destination();
} }
// Clears the IC stub if the compiled IC is in transition state
void CompiledIC::clear_ic_stub() {
if (is_in_transition_state()) {
ICStub* stub = ICStub_from_destination_address(stub_address());
stub->clear();
}
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe. // High-level access to an inline cache. Guaranteed to be MT-safe.
...@@ -279,6 +287,7 @@ bool CompiledIC::is_call_to_compiled() const { ...@@ -279,6 +287,7 @@ bool CompiledIC::is_call_to_compiled() const {
assert( is_c1_method || assert( is_c1_method ||
!is_monomorphic || !is_monomorphic ||
is_optimized() || is_optimized() ||
!caller->is_alive() ||
(cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT #endif // ASSERT
return is_monomorphic; return is_monomorphic;
...@@ -313,7 +322,7 @@ bool CompiledIC::is_call_to_interpreted() const { ...@@ -313,7 +322,7 @@ bool CompiledIC::is_call_to_interpreted() const {
} }
void CompiledIC::set_to_clean() { void CompiledIC::set_to_clean(bool in_use) {
assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
if (TraceInlineCacheClearing || TraceICs) { if (TraceInlineCacheClearing || TraceICs) {
tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
...@@ -329,17 +338,14 @@ void CompiledIC::set_to_clean() { ...@@ -329,17 +338,14 @@ void CompiledIC::set_to_clean() {
// A zombie transition will always be safe, since the metadata has already been set to NULL, so // A zombie transition will always be safe, since the metadata has already been set to NULL, so
// we only need to patch the destination // we only need to patch the destination
bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
if (safe_transition) { if (safe_transition) {
// Kill any leftover stub we might have too // Kill any leftover stub we might have too
if (is_in_transition_state()) { clear_ic_stub();
ICStub* old_stub = ICStub_from_destination_address(stub_address());
old_stub->clear();
}
if (is_optimized()) { if (is_optimized()) {
set_ic_destination(entry); set_ic_destination(entry);
} else { } else {
set_ic_destination_and_value(entry, (void*)NULL); set_ic_destination_and_value(entry, (void*)NULL);
} }
} else { } else {
......
...@@ -228,8 +228,9 @@ class CompiledIC: public ResourceObj { ...@@ -228,8 +228,9 @@ class CompiledIC: public ResourceObj {
// //
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
// //
void set_to_clean(); // Can only be called during a safepoint operation void set_to_clean(bool in_use = true);
void set_to_monomorphic(CompiledICInfo& info); void set_to_monomorphic(CompiledICInfo& info);
void clear_ic_stub();
// Returns true if successful and false otherwise. The call can fail if memory // Returns true if successful and false otherwise. The call can fail if memory
// allocation in the code cache fails. // allocation in the code cache fails.
......
...@@ -1148,9 +1148,20 @@ void nmethod::clear_inline_caches() { ...@@ -1148,9 +1148,20 @@ void nmethod::clear_inline_caches() {
} }
} }
// Clear ICStubs of all compiled ICs
void nmethod::clear_ic_stubs() {
assert_locked_or_safepoint(CompiledIC_lock);
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC* ic = CompiledIC_at(&iter);
ic->clear_ic_stub();
}
}
}
void nmethod::cleanup_inline_caches() {
void nmethod::cleanup_inline_caches() {
assert_locked_or_safepoint(CompiledIC_lock); assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the // If the method is not entrant or zombie then a JMP is plastered over the
...@@ -1166,7 +1177,8 @@ void nmethod::cleanup_inline_caches() { ...@@ -1166,7 +1177,8 @@ void nmethod::cleanup_inline_caches() {
// In fact, why are we bothering to look at oops in a non-entrant method?? // In fact, why are we bothering to look at oops in a non-entrant method??
} }
// Find all calls in an nmethod, and clear the ones that points to zombie methods // Find all calls in an nmethod and clear the ones that point to non-entrant,
// zombie and unloaded nmethods.
ResourceMark rm; ResourceMark rm;
RelocIterator iter(this, low_boundary); RelocIterator iter(this, low_boundary);
while(iter.next()) { while(iter.next()) {
...@@ -1178,8 +1190,8 @@ void nmethod::cleanup_inline_caches() { ...@@ -1178,8 +1190,8 @@ void nmethod::cleanup_inline_caches() {
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_nmethod() ) { if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb; nmethod* nm = (nmethod*)cb;
// Clean inline caches pointing to both zombie and not_entrant methods // Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(); if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
} }
break; break;
} }
...@@ -1188,7 +1200,7 @@ void nmethod::cleanup_inline_caches() { ...@@ -1188,7 +1200,7 @@ void nmethod::cleanup_inline_caches() {
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
if( cb != NULL && cb->is_nmethod() ) { if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb; nmethod* nm = (nmethod*)cb;
// Clean inline caches pointing to both zombie and not_entrant methods // Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
} }
break; break;
...@@ -1279,7 +1291,7 @@ void nmethod::mark_as_seen_on_stack() { ...@@ -1279,7 +1291,7 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., // Tell if a non-entrant method can be converted to a zombie (i.e.,
// there are no activations on the stack, not in use by the VM, // there are no activations on the stack, not in use by the VM,
// and not in use by the ServiceThread) // and not in use by the ServiceThread)
bool nmethod::can_not_entrant_be_converted() { bool nmethod::can_convert_to_zombie() {
assert(is_not_entrant(), "must be a non-entrant method"); assert(is_not_entrant(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal // Since the nmethod sweeper only does partial sweep the sweeper's traversal
...@@ -2695,7 +2707,7 @@ void nmethod::verify() { ...@@ -2695,7 +2707,7 @@ void nmethod::verify() {
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
// seems odd. // seems odd.
if( is_zombie() || is_not_entrant() ) if (is_zombie() || is_not_entrant() || is_unloaded())
return; return;
// Make sure all the entry points are correctly aligned for patching. // Make sure all the entry points are correctly aligned for patching.
......
...@@ -577,6 +577,7 @@ public: ...@@ -577,6 +577,7 @@ public:
// Inline cache support // Inline cache support
void clear_inline_caches(); void clear_inline_caches();
void clear_ic_stubs();
void cleanup_inline_caches(); void cleanup_inline_caches();
bool inlinecache_check_contains(address addr) const { bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point()); return (addr >= code_begin() && addr < verified_entry_point());
...@@ -604,7 +605,7 @@ public: ...@@ -604,7 +605,7 @@ public:
// See comment at definition of _last_seen_on_stack // See comment at definition of _last_seen_on_stack
void mark_as_seen_on_stack(); void mark_as_seen_on_stack();
bool can_not_entrant_be_converted(); bool can_convert_to_zombie();
// Evolution support. We make old (discarded) compiled methods point to new Method*s. // Evolution support. We make old (discarded) compiled methods point to new Method*s.
void set_method(Method* method) { _method = method; } void set_method(Method* method) { _method = method; }
......
...@@ -3751,7 +3751,7 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { ...@@ -3751,7 +3751,7 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) {
// Deoptimize all activations depending on marked nmethods // Deoptimize all activations depending on marked nmethods
Deoptimization::deoptimize_dependents(); Deoptimization::deoptimize_dependents();
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) // Make the dependent methods not entrant
CodeCache::make_marked_nmethods_not_entrant(); CodeCache::make_marked_nmethods_not_entrant();
// From now on we know that the dependency information is complete // From now on we know that the dependency information is complete
......
...@@ -538,10 +538,14 @@ int NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -538,10 +538,14 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
} else if (nm->is_not_entrant()) { } else if (nm->is_not_entrant()) {
// If there are no current activations of this method on the // If there are no current activations of this method on the
// stack we can safely convert it to a zombie method // stack we can safely convert it to a zombie method
if (nm->can_not_entrant_be_converted()) { if (nm->can_convert_to_zombie()) {
if (PrintMethodFlushing && Verbose) { if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
} }
// Clear ICStubs to prevent back patching stubs of zombie or unloaded
// nmethods during the next safepoint (see ICStub::finalize).
MutexLocker cl(CompiledIC_lock);
nm->clear_ic_stubs();
// Code cache state change is tracked in make_zombie() // Code cache state change is tracked in make_zombie()
nm->make_zombie(); nm->make_zombie();
_zombified_count++; _zombified_count++;
...@@ -567,6 +571,12 @@ int NMethodSweeper::process_nmethod(nmethod *nm) { ...@@ -567,6 +571,12 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
release_nmethod(nm); release_nmethod(nm);
_flushed_count++; _flushed_count++;
} else { } else {
{
// Clean ICs of unloaded nmethods as well because they may reference other
// unloaded nmethods that may be flushed earlier in the sweeper cycle.
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
// Code cache state change is tracked in make_zombie() // Code cache state change is tracked in make_zombie()
nm->make_zombie(); nm->make_zombie();
_zombified_count++; _zombified_count++;
......
...@@ -106,8 +106,8 @@ void VM_Deoptimize::doit() { ...@@ -106,8 +106,8 @@ void VM_Deoptimize::doit() {
// Deoptimize all activations depending on marked nmethods // Deoptimize all activations depending on marked nmethods
Deoptimization::deoptimize_dependents(); Deoptimization::deoptimize_dependents();
// Make the dependent methods zombies // Make the dependent methods not entrant
CodeCache::make_marked_nmethods_zombies(); CodeCache::make_marked_nmethods_not_entrant();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册