提交 f9119c4d 编写于 作者: G goetz

8024468: PPC64 (part 201): cppInterpreter: implement bytecode profiling

Summary: Implement profiling for c2 jit compilation. Also enable new cppInterpreter features.
Reviewed-by: kvn
上级 7ac26907
...@@ -220,7 +220,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) { ...@@ -220,7 +220,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
} }
InvocationCounter *counter = mcs->invocation_counter(); InvocationCounter *counter = mcs->invocation_counter();
counter->increment(); counter->increment();
if (counter->reached_InvocationLimit()) { if (counter->reached_InvocationLimit(mcs->backedge_counter())) {
CALL_VM_NOCHECK( CALL_VM_NOCHECK(
InterpreterRuntime::frequency_counter_overflow(thread, NULL)); InterpreterRuntime::frequency_counter_overflow(thread, NULL));
if (HAS_PENDING_EXCEPTION) if (HAS_PENDING_EXCEPTION)
......
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// This file defines a set of macros which are used by the c++-interpreter
// for updating a method's methodData object.
#ifndef SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP
#define SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP
// Global settings /////////////////////////////////////////////////////////////
// Enables profiling support.
#if defined(COMPILER2)
#define CC_INTERP_PROFILE
#endif
// Enables assertions for profiling code (also works in product-builds).
// #define CC_INTERP_PROFILE_WITH_ASSERTIONS
#ifdef CC_INTERP
// Empty dummy implementations if profiling code is switched off. //////////////
#ifndef CC_INTERP_PROFILE
#define SET_MDX(mdx)
#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler) \
if (ProfileInterpreter) { \
ShouldNotReachHere(); \
}
#define BI_PROFILE_ALIGN_TO_CURRENT_BCI()
#define BI_PROFILE_UPDATE_JUMP()
#define BI_PROFILE_UPDATE_BRANCH(is_taken)
#define BI_PROFILE_UPDATE_RET(bci)
#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver)
#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver)
#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver)
#define BI_PROFILE_UPDATE_CALL()
#define BI_PROFILE_UPDATE_FINALCALL()
#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver)
#define BI_PROFILE_UPDATE_SWITCH(switch_index)
#else
// Non-dummy implementations ///////////////////////////////////////////////////
// Accessors for the current method data pointer 'mdx'.
#define MDX() (istate->mdx())
#define SET_MDX(mdx) \
if (TraceProfileInterpreter) { \
/* Let it look like TraceBytecodes' format. */ \
tty->print_cr("[%d] %4d " \
"mdx " PTR_FORMAT "(%d)" \
" " \
" \t-> " PTR_FORMAT "(%d)", \
(int) THREAD->osthread()->thread_id(), \
BCI(), \
MDX(), \
(MDX() == NULL \
? 0 \
: istate->method()->method_data()->dp_to_di((address)MDX())), \
mdx, \
istate->method()->method_data()->dp_to_di((address)mdx) \
); \
}; \
istate->set_mdx(mdx);
// Dumps the profiling method data for the current method.
#ifdef PRODUCT
#define BI_PROFILE_PRINT_METHOD_DATA()
#else // PRODUCT
#define BI_PROFILE_PRINT_METHOD_DATA() \
{ \
ttyLocker ttyl; \
MethodData *md = istate->method()->method_data(); \
tty->cr(); \
tty->print("method data at mdx " PTR_FORMAT "(0) for", \
md->data_layout_at(md->bci_to_di(0))); \
istate->method()->print_short_name(tty); \
tty->cr(); \
if (md != NULL) { \
md->print_data_on(tty); \
address mdx = (address) MDX(); \
if (mdx != NULL) { \
tty->print_cr("current mdx " PTR_FORMAT "(%d)", \
mdx, \
istate->method()->method_data()->dp_to_di(mdx)); \
} \
} else { \
tty->print_cr("no method data"); \
} \
}
#endif // PRODUCT
// Gets or creates the profiling method data and initializes mdx.
#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler) \
if (ProfileInterpreter && MDX() == NULL) { \
/* Mdx is not yet initialized for this activation. */ \
MethodData *md = istate->method()->method_data(); \
if (md == NULL) { \
MethodCounters* mcs; \
GET_METHOD_COUNTERS(mcs); \
/* The profiling method data doesn't exist for this method, */ \
/* create it if the counters have overflowed. */ \
if (mcs->invocation_counter() \
->reached_ProfileLimit(mcs->backedge_counter())) { \
/* Must use CALL_VM, because an async exception may be pending. */ \
CALL_VM((InterpreterRuntime::profile_method(THREAD)), \
exception_handler); \
md = istate->method()->method_data(); \
if (md != NULL) { \
if (TraceProfileInterpreter) { \
BI_PROFILE_PRINT_METHOD_DATA(); \
} \
Method *m = istate->method(); \
int bci = m->bci_from(pc); \
jint di = md->bci_to_di(bci); \
SET_MDX(md->data_layout_at(di)); \
} \
} \
} else { \
/* The profiling method data exists, align the method data pointer */ \
/* mdx to the current bytecode index. */ \
if (TraceProfileInterpreter) { \
BI_PROFILE_PRINT_METHOD_DATA(); \
} \
SET_MDX(md->data_layout_at(md->bci_to_di(BCI()))); \
} \
}
// Asserts that the current method data pointer mdx corresponds
// to the current bytecode.
#if defined(CC_INTERP_PROFILE_WITH_ASSERTIONS)
#define BI_PROFILE_CHECK_MDX() \
{ \
MethodData *md = istate->method()->method_data(); \
address mdx = (address) MDX(); \
address mdx2 = (address) md->data_layout_at(md->bci_to_di(BCI())); \
guarantee(md != NULL, "1"); \
guarantee(mdx != NULL, "2"); \
guarantee(mdx2 != NULL, "3"); \
if (mdx != mdx2) { \
BI_PROFILE_PRINT_METHOD_DATA(); \
fatal3("invalid mdx at bci %d:" \
" was " PTR_FORMAT \
" but expected " PTR_FORMAT, \
BCI(), \
mdx, \
mdx2); \
} \
}
#else
#define BI_PROFILE_CHECK_MDX()
#endif
// Aligns the method data pointer mdx to the current bytecode index.
#define BI_PROFILE_ALIGN_TO_CURRENT_BCI() \
if (ProfileInterpreter && MDX() != NULL) { \
MethodData *md = istate->method()->method_data(); \
SET_MDX(md->data_layout_at(md->bci_to_di(BCI()))); \
}
// Updates profiling data for a jump.
#define BI_PROFILE_UPDATE_JUMP() \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
JumpData::increment_taken_count_no_overflow(MDX()); \
/* Remember last branch taken count. */ \
mdo_last_branch_taken_count = JumpData::taken_count(MDX()); \
SET_MDX(JumpData::advance_taken(MDX())); \
}
// Updates profiling data for a taken/not taken branch.
#define BI_PROFILE_UPDATE_BRANCH(is_taken) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
if (is_taken) { \
BranchData::increment_taken_count_no_overflow(MDX()); \
/* Remember last branch taken count. */ \
mdo_last_branch_taken_count = BranchData::taken_count(MDX()); \
SET_MDX(BranchData::advance_taken(MDX())); \
} else { \
BranchData::increment_not_taken_count_no_overflow(MDX()); \
SET_MDX(BranchData::advance_not_taken(MDX())); \
} \
}
// Updates profiling data for a ret with given bci.
#define BI_PROFILE_UPDATE_RET(bci) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
MethodData *md = istate->method()->method_data(); \
/* FIXME: there is more to do here than increment and advance(mdx)! */ \
CounterData::increment_count_no_overflow(MDX()); \
SET_MDX(RetData::advance(md, bci)); \
}
// Decrement counter at checkcast if the subtype check fails (as template
// interpreter does!).
#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver); \
ReceiverTypeData::decrement_count(MDX()); \
}
// Updates profiling data for a checkcast (was a null seen? which receiver?).
#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
if (null_seen) { \
ReceiverTypeData::set_null_seen(MDX()); \
} else { \
/* Template interpreter doesn't increment count. */ \
/* ReceiverTypeData::increment_count_no_overflow(MDX()); */ \
ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver); \
} \
SET_MDX(ReceiverTypeData::advance(MDX())); \
}
// Updates profiling data for an instanceof (was a null seen? which receiver?).
#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver) \
BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver)
// Updates profiling data for a call.
#define BI_PROFILE_UPDATE_CALL() \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
CounterData::increment_count_no_overflow(MDX()); \
SET_MDX(CounterData::advance(MDX())); \
}
// Updates profiling data for a final call.
#define BI_PROFILE_UPDATE_FINALCALL() \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
VirtualCallData::increment_count_no_overflow(MDX()); \
SET_MDX(VirtualCallData::advance(MDX())); \
}
// Updates profiling data for a virtual call with given receiver Klass.
#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
VirtualCallData::increment_receiver_count_no_overflow(MDX(), receiver); \
SET_MDX(VirtualCallData::advance(MDX())); \
}
// Updates profiling data for a switch (tabelswitch or lookupswitch) with
// given taken index (-1 means default case was taken).
#define BI_PROFILE_UPDATE_SWITCH(switch_index) \
if (ProfileInterpreter && MDX() != NULL) { \
BI_PROFILE_CHECK_MDX(); \
MultiBranchData::increment_count_no_overflow(MDX(), switch_index); \
SET_MDX(MultiBranchData::advance(MDX(), switch_index)); \
}
// The end /////////////////////////////////////////////////////////////////////
#endif // CC_INTERP_PROFILE
#endif // CC_INTERP
#endif // SHARE_VM_INTERPRETER_BYTECODECINTERPRETERPROFILING_HPP
...@@ -241,18 +241,15 @@ IRT_END ...@@ -241,18 +241,15 @@ IRT_END
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Exceptions // Exceptions
// Assume the compiler is (or will be) interested in this event. void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason,
// If necessary, create an MDO to hold the information, and record it. methodHandle trap_method, int trap_bci, TRAPS) {
void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
assert(ProfileTraps, "call me only if profiling");
methodHandle trap_method(thread, method(thread));
if (trap_method.not_null()) { if (trap_method.not_null()) {
MethodData* trap_mdo = trap_method->method_data(); MethodData* trap_mdo = trap_method->method_data();
if (trap_mdo == NULL) { if (trap_mdo == NULL) {
Method::build_interpreter_method_data(trap_method, THREAD); Method::build_interpreter_method_data(trap_method, THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())),
"we expect only an OOM error here");
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
} }
trap_mdo = trap_method->method_data(); trap_mdo = trap_method->method_data();
...@@ -261,12 +258,42 @@ void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { ...@@ -261,12 +258,42 @@ void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
if (trap_mdo != NULL) { if (trap_mdo != NULL) {
// Update per-method count of trap events. The interpreter // Update per-method count of trap events. The interpreter
// is updating the MDO to simulate the effect of compiler traps. // is updating the MDO to simulate the effect of compiler traps.
int trap_bci = trap_method->bci_from(bcp(thread));
Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason);
} }
} }
} }
// Assume the compiler is (or will be) interested in this event.
// If necessary, create an MDO to hold the information, and record it.
void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
assert(ProfileTraps, "call me only if profiling");
methodHandle trap_method(thread, method(thread));
int trap_bci = trap_method->bci_from(bcp(thread));
note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
}
#ifdef CC_INTERP
// As legacy note_trap, but we have more arguments.
IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci))
methodHandle trap_method(method);
note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
IRT_END
// Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper
// for each exception.
void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci)
{ if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); }
void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci)
{ if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); }
void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci)
{ if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); }
void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci)
{ if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); }
void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci)
{ if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); }
#endif // CC_INTERP
static Handle get_preinitialized_exception(Klass* k, TRAPS) { static Handle get_preinitialized_exception(Klass* k, TRAPS) {
// get klass // get klass
InstanceKlass* klass = InstanceKlass::cast(k); InstanceKlass* klass = InstanceKlass::cast(k);
......
...@@ -66,9 +66,15 @@ class InterpreterRuntime: AllStatic { ...@@ -66,9 +66,15 @@ class InterpreterRuntime: AllStatic {
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); } static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
static void note_trap_inner(JavaThread* thread, int reason,
methodHandle trap_method, int trap_bci, TRAPS);
static void note_trap(JavaThread *thread, int reason, TRAPS); static void note_trap(JavaThread *thread, int reason, TRAPS);
#ifdef CC_INTERP
// Profile traps in C++ interpreter.
static void note_trap(JavaThread* thread, int reason, Method *method, int trap_bci);
#endif // CC_INTERP
// Inner work method for Interpreter's frequency counter overflow // Inner work method for Interpreter's frequency counter overflow.
static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp); static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
public: public:
...@@ -100,6 +106,17 @@ class InterpreterRuntime: AllStatic { ...@@ -100,6 +106,17 @@ class InterpreterRuntime: AllStatic {
#endif #endif
static void throw_pending_exception(JavaThread* thread); static void throw_pending_exception(JavaThread* thread);
#ifdef CC_INTERP
// Profile traps in C++ interpreter.
static void note_nullCheck_trap (JavaThread* thread, Method *method, int trap_bci);
static void note_div0Check_trap (JavaThread* thread, Method *method, int trap_bci);
static void note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci);
static void note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci);
static void note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci);
// A dummy for makros that shall not profile traps.
static void note_no_trap(JavaThread* thread, Method *method, int trap_bci) {}
#endif // CC_INTERP
// Statics & fields // Statics & fields
static void resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode); static void resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
......
...@@ -99,16 +99,24 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC { ...@@ -99,16 +99,24 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
int get_BackwardBranchLimit() const { return InterpreterBackwardBranchLimit >> number_of_noncount_bits; } int get_BackwardBranchLimit() const { return InterpreterBackwardBranchLimit >> number_of_noncount_bits; }
int get_ProfileLimit() const { return InterpreterProfileLimit >> number_of_noncount_bits; } int get_ProfileLimit() const { return InterpreterProfileLimit >> number_of_noncount_bits; }
#ifdef CC_INTERP
// Test counter using scaled limits like the asm interpreter would do rather than doing // Test counter using scaled limits like the asm interpreter would do rather than doing
// the shifts to normalize the counter. // the shifts to normalize the counter.
// Checks sum of invocation_counter and backedge_counter as the template interpreter does.
bool reached_InvocationLimit() const { return _counter >= (unsigned int) InterpreterInvocationLimit; } bool reached_InvocationLimit(InvocationCounter *back_edge_count) const {
bool reached_BackwardBranchLimit() const { return _counter >= (unsigned int) InterpreterBackwardBranchLimit; } return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
(unsigned int) InterpreterInvocationLimit;
// Do this just like asm interpreter does for max speed }
bool reached_ProfileLimit(InvocationCounter *back_edge_count) const { bool reached_BackwardBranchLimit(InvocationCounter *back_edge_count) const {
return (_counter && count_mask) + back_edge_count->_counter >= (unsigned int) InterpreterProfileLimit; return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
(unsigned int) InterpreterBackwardBranchLimit;
}
// Do this just like asm interpreter does for max speed.
bool reached_ProfileLimit(InvocationCounter *back_edge_count) const {
return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
(unsigned int) InterpreterProfileLimit;
} }
#endif // CC_INTERP
void increment() { _counter += count_increment; } void increment() { _counter += count_increment; }
......
...@@ -244,6 +244,11 @@ address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { ...@@ -244,6 +244,11 @@ address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
return mdp; return mdp;
} }
#ifdef CC_INTERP
DataLayout* RetData::advance(MethodData *md, int bci) {
return (DataLayout*) md->bci_to_dp(bci);
}
#endif // CC_INTERP
#ifndef PRODUCT #ifndef PRODUCT
void RetData::print_data_on(outputStream* st) { void RetData::print_data_on(outputStream* st) {
......
...@@ -225,6 +225,11 @@ public: ...@@ -225,6 +225,11 @@ public:
static ByteSize cell_offset(int index) { static ByteSize cell_offset(int index) {
return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size); return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
} }
#ifdef CC_INTERP
static int cell_offset_in_bytes(int index) {
return (int)offset_of(DataLayout, _cells[index]);
}
#endif // CC_INTERP
// Return a value which, when or-ed as a byte into _flags, sets the flag. // Return a value which, when or-ed as a byte into _flags, sets the flag.
static int flag_number_to_byte_constant(int flag_number) { static int flag_number_to_byte_constant(int flag_number) {
assert(0 <= flag_number && flag_number < flag_limit, "oob"); assert(0 <= flag_number && flag_number < flag_limit, "oob");
...@@ -356,6 +361,41 @@ protected: ...@@ -356,6 +361,41 @@ protected:
_data = data; _data = data;
} }
#ifdef CC_INTERP
// Static low level accessors for DataLayout with ProfileData's semantics.
static int cell_offset_in_bytes(int index) {
return DataLayout::cell_offset_in_bytes(index);
}
static void increment_uint_at_no_overflow(DataLayout* layout, int index,
int inc = DataLayout::counter_increment) {
uint count = ((uint)layout->cell_at(index)) + inc;
if (count == 0) return;
layout->set_cell_at(index, (intptr_t) count);
}
static int int_at(DataLayout* layout, int index) {
return (int)layout->cell_at(index);
}
static int uint_at(DataLayout* layout, int index) {
return (uint)layout->cell_at(index);
}
static oop oop_at(DataLayout* layout, int index) {
return (oop)layout->cell_at(index);
}
static void set_intptr_at(DataLayout* layout, int index, intptr_t value) {
layout->set_cell_at(index, (intptr_t) value);
}
static void set_flag_at(DataLayout* layout, int flag_number) {
layout->set_flag_at(flag_number);
}
#endif // CC_INTERP
public: public:
// Constructor for invalid ProfileData. // Constructor for invalid ProfileData.
ProfileData(); ProfileData();
...@@ -495,6 +535,20 @@ public: ...@@ -495,6 +535,20 @@ public:
return cell_offset(bit_cell_count); return cell_offset(bit_cell_count);
} }
#ifdef CC_INTERP
static int bit_data_size_in_bytes() {
return cell_offset_in_bytes(bit_cell_count);
}
static void set_null_seen(DataLayout* layout) {
set_flag_at(layout, null_seen_flag);
}
static DataLayout* advance(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
}
#endif // CC_INTERP
#ifndef PRODUCT #ifndef PRODUCT
void print_data_on(outputStream* st); void print_data_on(outputStream* st);
#endif #endif
...@@ -539,6 +593,25 @@ public: ...@@ -539,6 +593,25 @@ public:
set_uint_at(count_off, count); set_uint_at(count_off, count);
} }
#ifdef CC_INTERP
static int counter_data_size_in_bytes() {
return cell_offset_in_bytes(counter_cell_count);
}
static void increment_count_no_overflow(DataLayout* layout) {
increment_uint_at_no_overflow(layout, count_off);
}
// Support counter decrementation at checkcast / subtype check failed.
static void decrement_count(DataLayout* layout) {
increment_uint_at_no_overflow(layout, count_off, -1);
}
static DataLayout* advance(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)CounterData::counter_data_size_in_bytes());
}
#endif // CC_INTERP
#ifndef PRODUCT #ifndef PRODUCT
void print_data_on(outputStream* st); void print_data_on(outputStream* st);
#endif #endif
...@@ -609,6 +682,20 @@ public: ...@@ -609,6 +682,20 @@ public:
return cell_offset(displacement_off_set); return cell_offset(displacement_off_set);
} }
#ifdef CC_INTERP
static void increment_taken_count_no_overflow(DataLayout* layout) {
increment_uint_at_no_overflow(layout, taken_off_set);
}
static DataLayout* advance_taken(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)int_at(layout, displacement_off_set));
}
static uint taken_count(DataLayout* layout) {
return (uint) uint_at(layout, taken_off_set);
}
#endif // CC_INTERP
// Specific initialization. // Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo); void post_initialize(BytecodeStream* stream, MethodData* mdo);
...@@ -718,6 +805,43 @@ public: ...@@ -718,6 +805,43 @@ public:
// GC support // GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
#ifdef CC_INTERP
static int receiver_type_data_size_in_bytes() {
return cell_offset_in_bytes(static_cell_count());
}
static Klass *receiver_unchecked(DataLayout* layout, uint row) {
oop recv = oop_at(layout, receiver_cell_index(row));
return (Klass *)recv;
}
static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
const int num_rows = row_limit();
// Receiver already exists?
for (int row = 0; row < num_rows; row++) {
if (receiver_unchecked(layout, row) == rcvr) {
increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
return;
}
}
// New receiver, find a free slot.
for (int row = 0; row < num_rows; row++) {
if (receiver_unchecked(layout, row) == NULL) {
set_intptr_at(layout, receiver_cell_index(row), (intptr_t)rcvr);
increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
return;
}
}
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_count_no_overflow(layout);
}
static DataLayout* advance(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)ReceiverTypeData::receiver_type_data_size_in_bytes());
}
#endif // CC_INTERP
#ifndef PRODUCT #ifndef PRODUCT
void print_receiver_data_on(outputStream* st); void print_receiver_data_on(outputStream* st);
void print_data_on(outputStream* st); void print_data_on(outputStream* st);
...@@ -751,6 +875,16 @@ public: ...@@ -751,6 +875,16 @@ public:
return cell_offset(static_cell_count()); return cell_offset(static_cell_count());
} }
#ifdef CC_INTERP
static int virtual_call_data_size_in_bytes() {
return cell_offset_in_bytes(static_cell_count());
}
static DataLayout* advance(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
}
#endif // CC_INTERP
#ifndef PRODUCT #ifndef PRODUCT
void print_data_on(outputStream* st); void print_data_on(outputStream* st);
#endif #endif
...@@ -847,6 +981,10 @@ public: ...@@ -847,6 +981,10 @@ public:
return cell_offset(bci_displacement_cell_index(row)); return cell_offset(bci_displacement_cell_index(row));
} }
#ifdef CC_INTERP
static DataLayout* advance(MethodData *md, int bci);
#endif // CC_INTERP
// Specific initialization. // Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo); void post_initialize(BytecodeStream* stream, MethodData* mdo);
...@@ -911,6 +1049,20 @@ public: ...@@ -911,6 +1049,20 @@ public:
return cell_offset(branch_cell_count); return cell_offset(branch_cell_count);
} }
#ifdef CC_INTERP
static int branch_data_size_in_bytes() {
return cell_offset_in_bytes(branch_cell_count);
}
static void increment_not_taken_count_no_overflow(DataLayout* layout) {
increment_uint_at_no_overflow(layout, not_taken_off_set);
}
static DataLayout* advance_not_taken(DataLayout* layout) {
return (DataLayout*) (((address)layout) + (ssize_t)BranchData::branch_data_size_in_bytes());
}
#endif // CC_INTERP
// Specific initialization. // Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo); void post_initialize(BytecodeStream* stream, MethodData* mdo);
...@@ -950,6 +1102,20 @@ protected: ...@@ -950,6 +1102,20 @@ protected:
set_int_at(aindex, value); set_int_at(aindex, value);
} }
#ifdef CC_INTERP
// Static low level accessors for DataLayout with ArrayData's semantics.
static void increment_array_uint_at_no_overflow(DataLayout* layout, int index) {
int aindex = index + array_start_off_set;
increment_uint_at_no_overflow(layout, aindex);
}
static int array_int_at(DataLayout* layout, int index) {
int aindex = index + array_start_off_set;
return int_at(layout, aindex);
}
#endif // CC_INTERP
// Code generation support for subclasses. // Code generation support for subclasses.
static ByteSize array_element_offset(int index) { static ByteSize array_element_offset(int index) {
return cell_offset(array_start_off_set + index); return cell_offset(array_start_off_set + index);
...@@ -1068,6 +1234,28 @@ public: ...@@ -1068,6 +1234,28 @@ public:
return in_ByteSize(relative_displacement_off_set) * cell_size; return in_ByteSize(relative_displacement_off_set) * cell_size;
} }
#ifdef CC_INTERP
static void increment_count_no_overflow(DataLayout* layout, int index) {
if (index == -1) {
increment_array_uint_at_no_overflow(layout, default_count_off_set);
} else {
increment_array_uint_at_no_overflow(layout, case_array_start +
index * per_case_cell_count +
relative_count_off_set);
}
}
static DataLayout* advance(DataLayout* layout, int index) {
if (index == -1) {
return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, default_disaplacement_off_set));
} else {
return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, case_array_start +
index * per_case_cell_count +
relative_displacement_off_set));
}
}
#endif // CC_INTERP
// Specific initialization. // Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo); void post_initialize(BytecodeStream* stream, MethodData* mdo);
...@@ -1146,8 +1334,11 @@ public: ...@@ -1146,8 +1334,11 @@ public:
// adjusted in the event of a change in control flow. // adjusted in the event of a change in control flow.
// //
CC_INTERP_ONLY(class BytecodeInterpreter;)
class MethodData : public Metadata { class MethodData : public Metadata {
friend class VMStructs; friend class VMStructs;
CC_INTERP_ONLY(friend class BytecodeInterpreter;)
private: private:
friend class ProfileData; friend class ProfileData;
......
...@@ -117,10 +117,10 @@ jvmtiCapabilities JvmtiManageCapabilities::init_onload_capabilities() { ...@@ -117,10 +117,10 @@ jvmtiCapabilities JvmtiManageCapabilities::init_onload_capabilities() {
jvmtiCapabilities jc; jvmtiCapabilities jc;
memset(&jc, 0, sizeof(jc)); memset(&jc, 0, sizeof(jc));
#ifndef CC_INTERP #ifndef ZERO
jc.can_pop_frame = 1; jc.can_pop_frame = 1;
jc.can_force_early_return = 1; jc.can_force_early_return = 1;
#endif // !CC_INTERP #endif // !ZERO
jc.can_get_source_debug_extension = 1; jc.can_get_source_debug_extension = 1;
jc.can_access_local_variables = 1; jc.can_access_local_variables = 1;
jc.can_maintain_original_method_order = 1; jc.can_maintain_original_method_order = 1;
......
...@@ -3592,8 +3592,8 @@ jint Arguments::parse(const JavaVMInitArgs* args) { ...@@ -3592,8 +3592,8 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
UseBiasedLocking = false; UseBiasedLocking = false;
} }
#ifdef CC_INTERP #ifdef ZERO
// Clear flags not supported by the C++ interpreter // Clear flags not supported on zero.
FLAG_SET_DEFAULT(ProfileInterpreter, false); FLAG_SET_DEFAULT(ProfileInterpreter, false);
FLAG_SET_DEFAULT(UseBiasedLocking, false); FLAG_SET_DEFAULT(UseBiasedLocking, false);
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false)); LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
......
...@@ -2727,6 +2727,11 @@ class CommandLineFlags { ...@@ -2727,6 +2727,11 @@ class CommandLineFlags {
product_pd(bool, ProfileInterpreter, \ product_pd(bool, ProfileInterpreter, \
"Profile at the bytecode level during interpretation") \ "Profile at the bytecode level during interpretation") \
\ \
develop(bool, TraceProfileInterpreter, false, \
"Trace profiling at the bytecode level during interpretation. " \
"This outputs the profiling information collected to improve " \
"jit compilation.") \
\
develop_pd(bool, ProfileTraps, \ develop_pd(bool, ProfileTraps, \
"Profile deoptimization traps at the bytecode level") \ "Profile deoptimization traps at the bytecode level") \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册