提交 5c188e0f 编写于 作者: I iveresov

8032463: VirtualDispatch test timeout with DeoptimizeALot

Summary: Introduce code aging for warm method detection
Reviewed-by: kvn, twisti
上级 44f3fdc0
......@@ -414,6 +414,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ set(_trap_request, G4);
__ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
......
......@@ -781,7 +781,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{
__ set_info("deoptimize", dont_gc_arguments);
OopMap* oop_map = save_live_registers(sasm);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), G4);
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
......
......@@ -430,6 +430,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_trap_request, 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
ce->add_call_info_here(_info);
DEBUG_ONLY(__ should_not_reach_here());
......
......@@ -1468,9 +1468,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
case deoptimize_id:
{
StubFrame f(sasm, "deoptimize", dont_gc_arguments);
const int num_rt_args = 1; // thread
const int num_rt_args = 2; // thread, trap_request
OopMap* oop_map = save_live_registers(sasm, num_rt_args);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
f.load_argument(0, rax);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax);
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
......
......@@ -450,9 +450,11 @@ class PatchingStub: public CodeStub {
class DeoptimizeStub : public CodeStub {
private:
CodeEmitInfo* _info;
jint _trap_request;
public:
DeoptimizeStub(CodeEmitInfo* info) : _info(new CodeEmitInfo(info)) {}
DeoptimizeStub(CodeEmitInfo* info, Deoptimization::DeoptReason reason, Deoptimization::DeoptAction action) :
_info(new CodeEmitInfo(info)), _trap_request(Deoptimization::make_trap_request(reason, action)) {}
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
......
......@@ -251,6 +251,10 @@ class Compilation: public StackObj {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && MethodData::profile_return();
}
bool age_code() const {
return _method->profile_aging();
}
// will compilation make optimistic assumptions that might lead to
// deoptimization and that the runtime will account for?
bool is_optimistic() const {
......
......@@ -2782,7 +2782,10 @@ void LIRGenerator::do_Base(Base* x) {
__ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
}
}
if (compilation()->age_code()) {
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
decrement_age(info);
}
// increment invocation counters if needed
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
profile_parameters(x);
......@@ -3328,6 +3331,27 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool bac
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
}
void LIRGenerator::decrement_age(CodeEmitInfo* info) {
ciMethod* method = info->scope()->method();
MethodCounters* mc_adr = method->ensure_method_counters();
if (mc_adr != NULL) {
LIR_Opr mc = new_pointer_register();
__ move(LIR_OprFact::intptrConst(mc_adr), mc);
int offset = in_bytes(MethodCounters::nmethod_age_offset());
LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
LIR_Opr result = new_register(T_INT);
__ load(counter, result);
__ sub(result, LIR_OprFact::intConst(1), result);
__ store(result, counter);
// DeoptimizeStub will reexecute from the current state in code info.
CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
Deoptimization::Action_make_not_entrant);
__ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
__ branch(lir_cond_lessEqual, T_INT, deopt);
}
}
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, int frequency,
int bci, bool backedge, bool notify) {
......
......@@ -372,7 +372,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
increment_event_counter(info, bci, true);
}
}
void decrement_age(CodeEmitInfo* info);
CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
CodeEmitInfo* state_for(Instruction* x);
......
......@@ -685,19 +685,32 @@ JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
JRT_END
// Cf. OptoRuntime::deoptimize_caller_frame
JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
// Called from within the owner thread, so no need for safepoint
RegisterMap reg_map(thread, false);
frame stub_frame = thread->last_frame();
assert(stub_frame.is_runtime_frame(), "sanity check");
assert(stub_frame.is_runtime_frame(), "Sanity check");
frame caller_frame = stub_frame.sender(&reg_map);
nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
assert(nm != NULL, "Sanity check");
methodHandle method(thread, nm->method());
assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
// We are coming from a compiled method; check this is true.
assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");
if (action == Deoptimization::Action_make_not_entrant) {
if (nm->make_not_entrant()) {
if (reason == Deoptimization::Reason_tenured) {
MethodData* trap_mdo = Deoptimization::get_method_data(thread, method, true /*create_if_missing*/);
if (trap_mdo != NULL) {
trap_mdo->inc_tenure_traps();
}
}
}
}
// Deoptimize the caller frame.
Deoptimization::deoptimize_frame(thread, caller_frame.id());
// Return to the now deoptimized frame.
JRT_END
......
......@@ -156,7 +156,7 @@ class Runtime1: public AllStatic {
static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock);
static void monitorexit (JavaThread* thread, BasicObjectLock* lock);
static void deoptimize(JavaThread* thread);
static void deoptimize(JavaThread* thread, jint trap_request);
static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread);
......
......@@ -129,6 +129,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
constantPoolHandle cpool = h_m()->constants();
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
_method_data = NULL;
_nmethod_age = h_m()->nmethod_age();
// Take a snapshot of these values, so they will be commensurate with the MDO.
if (ProfileInterpreter || TieredCompilation) {
int invcnt = h_m()->interpreter_invocation_count();
......@@ -1275,6 +1276,14 @@ bool ciMethod::check_call(int refinfo_index, bool is_static) const {
return false;
}
// ------------------------------------------------------------------
// ciMethod::profile_aging
//
// Should the method be compiled with an age counter?
bool ciMethod::profile_aging() const {
return UseCodeAging && (!MethodCounters::is_nmethod_hot(nmethod_age()) &&
!MethodCounters::is_nmethod_age_unset(nmethod_age()));
}
// ------------------------------------------------------------------
// ciMethod::print_codes
//
......
......@@ -68,6 +68,7 @@ class ciMethod : public ciMetadata {
int _max_locals;
vmIntrinsics::ID _intrinsic_id;
int _handler_count;
int _nmethod_age;
int _interpreter_invocation_count;
int _interpreter_throwout_count;
int _instructions_size;
......@@ -168,6 +169,10 @@ class ciMethod : public ciMetadata {
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; }
int nmethod_age() const { check_is_loaded(); return _nmethod_age; }
// Should the method be compiled with an age counter?
bool profile_aging() const;
// Code size for inlining decisions.
int code_size_for_inlining();
......
......@@ -371,6 +371,13 @@ class Method : public Metadata {
}
}
#endif
int nmethod_age() const {
if (method_counters() == NULL) {
return INT_MAX;
} else {
return method_counters()->nmethod_age();
}
}
int invocation_count();
int backedge_count();
......
......@@ -34,4 +34,5 @@ void MethodCounters::clear_counters() {
backedge_counter()->reset();
set_interpreter_throwout_count(0);
set_interpreter_invocation_count(0);
set_nmethod_age(INT_MAX);
}
......@@ -36,6 +36,15 @@ class MethodCounters: public MetaspaceObj {
u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
// NMethod age is a counter for warm methods detection in the code cache sweeper.
// The counter is reset by the sweeper and is decremented by some of the compiled
// code. The counter values are interpreted as follows:
// 1. (HotMethodDetection..INT_MAX] - initial value, no counters inserted
// 2. (1..HotMethodDetectionLimit) - the method is warm, the counter is used
// to figure out which methods can be flushed.
// 3. (INT_MIN..0] - method is hot and will deopt and get
// recompiled without the counters
int _nmethod_age;
#ifdef TIERED
float _rate; // Events (invocation and backedge counter increments) per millisecond
......@@ -44,7 +53,8 @@ class MethodCounters: public MetaspaceObj {
MethodCounters() : _interpreter_invocation_count(0),
_interpreter_throwout_count(0),
_number_of_breakpoints(0)
_number_of_breakpoints(0),
_nmethod_age(INT_MAX)
#ifdef TIERED
, _rate(0),
_prev_time(0)
......@@ -52,6 +62,10 @@ class MethodCounters: public MetaspaceObj {
{
invocation_counter()->init();
backedge_counter()->init();
if (StressCodeAging) {
set_nmethod_age(HotMethodDetectionLimit);
}
}
public:
......@@ -104,6 +118,24 @@ class MethodCounters: public MetaspaceObj {
InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; }
int nmethod_age() {
return _nmethod_age;
}
void set_nmethod_age(int age) {
_nmethod_age = age;
}
void reset_nmethod_age() {
set_nmethod_age(HotMethodDetectionLimit);
}
static bool is_nmethod_hot(int age) { return age <= 0; }
static bool is_nmethod_warm(int age) { return age < HotMethodDetectionLimit; }
static bool is_nmethod_age_unset(int age) { return age > HotMethodDetectionLimit; }
static ByteSize nmethod_age_offset() {
return byte_offset_of(MethodCounters, _nmethod_age);
}
static ByteSize interpreter_invocation_counter_offset() {
return byte_offset_of(MethodCounters, _interpreter_invocation_count);
}
......
......@@ -1130,6 +1130,7 @@ void MethodData::init() {
_backedge_counter.init();
_invocation_counter_start = 0;
_backedge_counter_start = 0;
_tenure_traps = 0;
_num_loops = 0;
_num_blocks = 0;
_highest_comp_level = 0;
......
......@@ -2059,6 +2059,7 @@ private:
// Counter values at the time profiling started.
int _invocation_counter_start;
int _backedge_counter_start;
uint _tenure_traps;
#if INCLUDE_RTM_OPT
// State of RTM code generation during compilation of the method
......@@ -2398,6 +2399,12 @@ public:
method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
}
}
uint tenure_traps() const {
return _tenure_traps;
}
void inc_tenure_traps() {
_tenure_traps += 1;
}
// Return pointer to area dedicated to parameters in MDO
ParametersTypeData* parameters_type_data() const {
......
......@@ -1089,6 +1089,7 @@ void Compile::Init(int aliaslevel) {
set_do_scheduling(OptoScheduling);
set_do_count_invocations(false);
set_do_method_data_update(false);
set_age_code(has_method() && method()->profile_aging());
set_rtm_state(NoRTM); // No RTM lock eliding by default
#if INCLUDE_RTM_OPT
if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
......
......@@ -311,6 +311,7 @@ class Compile : public Phase {
bool _do_freq_based_layout; // True if we intend to do frequency based block layout
bool _do_count_invocations; // True if we generate code to count invocations
bool _do_method_data_update; // True if we generate code to update MethodData*s
bool _age_code; // True if we need to profile code age (decrement the aging counter)
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
bool _print_assembly; // True if we should dump assembly code for this compilation
bool _print_inlining; // True if we should print inlining for this compilation
......@@ -584,6 +585,8 @@ class Compile : public Phase {
void set_do_count_invocations(bool z){ _do_count_invocations = z; }
bool do_method_data_update() const { return _do_method_data_update; }
void set_do_method_data_update(bool z) { _do_method_data_update = z; }
bool age_code() const { return _age_code; }
void set_age_code(bool z) { _age_code = z; }
int AliasLevel() const { return _AliasLevel; }
bool print_assembly() const { return _print_assembly; }
void set_print_assembly(bool z) { _print_assembly = z; }
......
......@@ -581,6 +581,7 @@ class Parse : public GraphKit {
void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
void decrement_age();
// helper functions for methodData style profiling
void test_counter_against_threshold(Node* cnt, int limit);
void increment_and_test_invocation_counter(int limit);
......
......@@ -568,6 +568,9 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Pars
} else {
set_map(entry_map);
do_method_entry();
if (depth() == 1 && C->age_code()) {
decrement_age();
}
}
if (depth() == 1) {
// Add check to deoptimize the nmethod if RTM state was changed
......@@ -2048,6 +2051,31 @@ void Parse::rtm_deopt() {
#endif
}
void Parse::decrement_age() {
MethodCounters* mc = method()->ensure_method_counters();
if (mc == NULL) {
C->record_failure("Must have MCs");
return;
}
assert(!is_osr_parse(), "Not doing this for OSRs");
// Set starting bci for uncommon trap.
set_parse_bci(0);
const TypePtr* adr_type = TypeRawPtr::make((address)mc);
Node* mc_adr = makecon(adr_type);
Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
Node* decr = _gvn.transform(new (C) SubINode(cnt, makecon(TypeInt::ONE)));
store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
Node *chk = _gvn.transform(new (C) CmpINode(decr, makecon(TypeInt::ZERO)));
Node* tst = _gvn.transform(new (C) BoolNode(chk, BoolTest::gt));
{ BuildCutout unless(this, tst, PROB_ALWAYS);
uncommon_trap(Deoptimization::Reason_tenured,
Deoptimization::Action_make_not_entrant);
}
}
//------------------------------return_current---------------------------------
// Append current _map to _exit_return
void Parse::return_current(Node* value) {
......
......@@ -744,6 +744,8 @@ int Deoptimization::deoptimize_dependents() {
return 0;
}
Deoptimization::DeoptAction Deoptimization::_unloaded_action
= Deoptimization::Action_reinterpret;
#ifdef COMPILER2
bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
......@@ -1185,6 +1187,23 @@ JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int by
}
JRT_END
MethodData*
Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
bool create_if_missing) {
Thread* THREAD = thread;
MethodData* mdo = m()->method_data();
if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method::build_interpreter_method_data(m, THREAD);
if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
CLEAR_PENDING_EXCEPTION;
}
mdo = m()->method_data();
}
return mdo;
}
#if defined(COMPILER2) || defined(SHARK)
void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
......@@ -1285,7 +1304,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
// Ensure that we can record deopt. history:
// Need MDO to record RTM code generation state.
bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking );
bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
MethodData* trap_mdo =
get_method_data(thread, trap_method, create_if_missing);
......@@ -1421,7 +1440,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
//
// The other actions cause immediate removal of the present code.
bool update_trap_state = true;
bool update_trap_state = (reason != Reason_tenured);
bool make_not_entrant = false;
bool make_not_compilable = false;
bool reprofile = false;
......@@ -1548,7 +1567,6 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
reprofile = true;
}
}
// Take requested actions on the method:
......@@ -1577,6 +1595,11 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
trap_mdo->atomic_set_rtm_state(ProfileRTM);
}
#endif
// For code aging we count traps separately here, using make_not_entrant()
// as a guard against simultaneous deopts in multiple threads.
if (reason == Reason_tenured && trap_mdo != NULL) {
trap_mdo->inc_tenure_traps();
}
}
if (inc_recompile_count) {
......@@ -1609,24 +1632,6 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
}
JRT_END
MethodData*
Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
bool create_if_missing) {
Thread* THREAD = thread;
MethodData* mdo = m()->method_data();
if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method::build_interpreter_method_data(m, THREAD);
if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
CLEAR_PENDING_EXCEPTION;
}
mdo = m()->method_data();
}
return mdo;
}
ProfileData*
Deoptimization::query_update_method_data(MethodData* trap_mdo,
int trap_bci,
......@@ -1813,8 +1818,6 @@ const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
//--------------------------------statics--------------------------------------
Deoptimization::DeoptAction Deoptimization::_unloaded_action
= Deoptimization::Action_reinterpret;
const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
// Note: Keep this in sync. with enum DeoptReason.
"none",
......
......@@ -62,6 +62,7 @@ class Deoptimization : AllStatic {
Reason_speculate_class_check, // saw unexpected object class from type speculation
Reason_speculate_null_check, // saw unexpected null from type speculation
Reason_rtm_state_change, // rtm state change detected
Reason_tenured, // age of the code has reached the limit
Reason_LIMIT,
// Note: Keep this enum in sync. with _trap_reason_name.
Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc
......@@ -357,8 +358,8 @@ class Deoptimization : AllStatic {
// returning to a deoptimized caller
static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
private:
static MethodData* get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
private:
// Update the mdo's count and per-BCI reason bits, returning previous state:
static ProfileData* query_update_method_data(MethodData* trap_mdo,
int trap_bci,
......
......@@ -2561,6 +2561,20 @@ class CommandLineFlags {
diagnostic(bool, PrintMethodFlushingStatistics, false, \
"print statistics about method flushing") \
\
diagnostic(intx, HotMethodDetectionLimit, 100000, \
"Number of compiled code invocations after which " \
"the method is considered as hot by the flusher") \
\
diagnostic(intx, MinPassesBeforeFlush, 10, \
"Minimum number of sweeper passes before an nmethod " \
"can be flushed") \
\
product(bool, UseCodeAging, true, \
"Insert counter to detect warm methods") \
\
diagnostic(bool, StressCodeAging, false, \
"Start with counters compiled in") \
\
develop(bool, UseRelocIndex, false, \
"Use an index to speed random access to relocations") \
\
......
......@@ -573,8 +573,21 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
SWEEP(nm);
}
} else {
possibly_flush(nm);
// Clean-up all inline caches that point to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
return freed_memory;
}
void NMethodSweeper::possibly_flush(nmethod* nm) {
if (UseCodeCacheFlushing) {
if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
bool make_not_entrant = false;
// Do not make native methods and OSR-methods not-entrant
nm->dec_hotness_counter();
// Get the initial value of the hotness counter. This value depends on the
......@@ -587,15 +600,60 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
// NmethodSweepActivity. If the current hotness counter - which decreases from its initial
// value until it is reset by stack walking - is smaller than the computed threshold, the
// corresponding nmethod is considered for removal.
if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) {
// A method is marked as not-entrant if the method is
// 1) 'old enough': nm->hotness_counter() < threshold
// 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
// 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush)
// The second condition is necessary if we are dealing with very small code cache
// sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
// The second condition ensures that methods are not immediately made not-entrant
// after compilation.
make_not_entrant = true;
}
// The stack-scanning low-cost detection may not see the method was used (which can happen for
// flat profiles). Check the age counter for possible data.
if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) {
MethodCounters* mc = nm->method()->method_counters();
if (mc != NULL) {
// Snapshot the value as it's changed concurrently
int age = mc->nmethod_age();
if (MethodCounters::is_nmethod_hot(age)) {
// The method has gone through flushing, and it became relatively hot that it deopted
// before we could take a look at it. Give it more time to appear in the stack traces,
// proportional to the number of deopts.
MethodData* md = nm->method()->method_data();
if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) {
// It's been long enough, we still haven't seen it on stack.
// Try to flush it, but enable counters the next time.
mc->reset_nmethod_age();
} else {
make_not_entrant = false;
}
} else if (MethodCounters::is_nmethod_warm(age)) {
// Method has counters enabled, and the method was used within
// previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing
// compiled state.
mc->reset_nmethod_age();
// delay the next check
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
make_not_entrant = false;
} else if (MethodCounters::is_nmethod_age_unset(age)) {
// No counters were used before. Set the counters to the detection
// limit value. If the method is going to be used again it will be compiled
// with counters that we're going to use for analysis the the next time.
mc->reset_nmethod_age();
} else {
// Method was totally idle for 10 sweeps
// The counter already has the initial value, flush it and may be recompile
// later with counters
}
}
}
if (make_not_entrant) {
nm->make_not_entrant();
// Code cache state change is tracked in make_not_entrant()
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
......@@ -604,12 +662,6 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
}
}
}
// Clean-up all inline caches that point to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
return freed_memory;
}
// Print out some state information about the current sweep and the
......
......@@ -111,6 +111,7 @@ class NMethodSweeper : public AllStatic {
static int hotness_counter_reset_val();
static void report_state_change(nmethod* nm);
static void possibly_enable_sweeper();
static void possibly_flush(nmethod* nm);
static void print(); // Printing/debugging
};
......
......@@ -361,10 +361,12 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(MethodData, _arg_local, intx) \
nonstatic_field(MethodData, _arg_stack, intx) \
nonstatic_field(MethodData, _arg_returned, intx) \
nonstatic_field(MethodData, _tenure_traps, uint) \
nonstatic_field(DataLayout, _header._struct._tag, u1) \
nonstatic_field(DataLayout, _header._struct._flags, u1) \
nonstatic_field(DataLayout, _header._struct._bci, u2) \
nonstatic_field(DataLayout, _cells[0], intptr_t) \
nonstatic_field(MethodCounters, _nmethod_age, int) \
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册