提交 c27343de 编写于 作者: R roland

8031755: Type speculation should be used to optimize explicit null checks

Summary: feed profiling data about reference nullness to type speculation.
Reviewed-by: kvn, iveresov
上级 91f9bbf3
......@@ -581,14 +581,14 @@ void ciMethod::assert_call_type_ok(int bci) {
* Check whether profiling provides a type for the argument i to the
* call at bci bci
*
* @param bci bci of the call
* @param i argument number
* @return profiled type
* @param [in]bci bci of the call
* @param [in]i argument number
* @param [out]type profiled type of argument, NULL if none
* @param [out]maybe_null true if null was seen for argument
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
bool ciMethod::argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
......@@ -596,82 +596,77 @@ ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
return false;
}
type = call->valid_argument_type(i);
maybe_null = call->argument_maybe_null(i);
return true;
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
return false;
}
type = call->valid_argument_type(i);
maybe_null = call->argument_maybe_null(i);
return true;
}
}
}
return NULL;
return false;
}
/**
* Check whether profiling provides a type for the return value from
* the call at bci bci
*
* @param bci bci of the call
* @return profiled type
* @param [in]bci bci of the call
* @param [out]type profiled type of argument, NULL if none
* @param [out]maybe_null true if null was seen for argument
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::return_profiled_type(int bci) {
bool ciMethod::return_profiled_type(int bci, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
if (data->is_VirtualCallTypeData()) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
type = call->valid_return_type();
maybe_null = call->return_maybe_null();
return true;
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
type = call->valid_return_type();
maybe_null = call->return_maybe_null();
return true;
}
}
}
return NULL;
return false;
}
/**
* Check whether profiling provides a type for the parameter i
*
* @param i parameter number
* @return profiled type
* @param [in]i parameter number
* @param [out]type profiled type of parameter, NULL if none
* @param [out]maybe_null true if null was seen for parameter
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::parameter_profiled_type(int i) {
bool ciMethod::parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciParametersTypeData* parameters = method_data()->parameters_type_data();
if (parameters != NULL && i < parameters->number_of_parameters()) {
ciKlass* type = parameters->valid_parameter_type(i);
if (type != NULL && !parameters->parameter_maybe_null(i)) {
return type;
}
type = parameters->valid_parameter_type(i);
maybe_null = parameters->parameter_maybe_null(i);
return true;
}
}
return NULL;
return false;
}
......
......@@ -234,10 +234,10 @@ class ciMethod : public ciMetadata {
ciCallProfile call_profile_at_bci(int bci);
int interpreter_call_site_count(int bci);
// Does type profiling provide a useful type at this point?
ciKlass* argument_profiled_type(int bci, int i);
ciKlass* parameter_profiled_type(int i);
ciKlass* return_profiled_type(int bci);
// Does type profiling provide any useful information at this point?
bool argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null);
bool parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null);
bool return_profiled_type(int bci, ciKlass*& type, bool& maybe_null);
ciField* get_field_at_bci( int bci, bool &will_link);
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
......
......@@ -801,6 +801,8 @@ bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
case Bytecodes::_invokeinterface:
case Bytecodes::_if_acmpeq:
case Bytecodes::_if_acmpne:
case Bytecodes::_ifnull:
case Bytecodes::_ifnonnull:
case Bytecodes::_invokestatic:
#ifdef COMPILER2
return UseTypeSpeculation;
......
......@@ -2052,7 +2052,7 @@ public:
// Whole-method sticky bits and flags
enum {
_trap_hist_limit = 19, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_limit = 20, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_mask = max_jubyte,
_extra_data_count = 4 // extra DataLayout headers, for trap history
}; // Public flag values
......
......@@ -399,7 +399,7 @@ Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
// Take 'join' of input and cast-up type
const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
const Type* ft = phase->type(in(1))->filter_speculative(_type);
const Type* ft = phase->type(in(1))->filter_speculative(_type);
#ifdef ASSERT
// Previous versions of this function had some special case logic,
......@@ -493,7 +493,17 @@ const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
result = my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
}
}
return result;
// This is the code from TypePtr::xmeet() that prevents us from
// having 2 ways to represent the same type. We have to replicate it
// here because we don't go through meet/join.
if (result->remove_speculative() == result->speculative()) {
result = result->remove_speculative();
}
// Same as above: because we don't go through meet/join, remove the
// speculative type if we know we won't use it.
return result->cleanup_speculative();
// JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
// FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
......
......@@ -249,8 +249,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
CallGenerator* miss_cg;
Deoptimization::DeoptReason reason = morphism == 2 ?
Deoptimization::Reason_bimorphic :
(speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
!too_many_traps(jvms->method(), jvms->bci(), reason)
) {
......@@ -631,13 +630,7 @@ void Parse::do_call() {
}
BasicType ct = ctype->basic_type();
if (ct == T_OBJECT || ct == T_ARRAY) {
ciKlass* better_type = method()->return_profiled_type(bci());
if (UseTypeSpeculation && better_type != NULL) {
// If profiling reports a single type for the return value,
// feed it to the type system so it can propagate it as a
// speculative type
record_profile_for_speculation(stack(sp()-1), better_type);
}
record_profiled_return_for_speculation();
}
}
......
......@@ -612,10 +612,10 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
// Usual case: Bail to interpreter.
// Reserve the right to recompile if we haven't seen anything yet.
assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
if (treat_throw_as_hot
&& (method()->method_data()->trap_recompiled_at(bci(), NULL)
&& (method()->method_data()->trap_recompiled_at(bci(), m)
|| C->too_many_traps(reason))) {
// We cannot afford to take more traps here. Suffer in the interpreter.
if (C->log() != NULL)
......@@ -1181,7 +1181,8 @@ extern int explicit_null_checks_inserted,
Node* GraphKit::null_check_common(Node* value, BasicType type,
// optional arguments for variations:
bool assert_null,
Node* *null_control) {
Node* *null_control,
bool speculative) {
assert(!assert_null || null_control == NULL, "not both at once");
if (stopped()) return top();
if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
......@@ -1291,13 +1292,13 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Branch to failure if null
float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
Deoptimization::DeoptReason reason;
if (assert_null)
if (assert_null) {
reason = Deoptimization::Reason_null_assert;
else if (type == T_OBJECT)
reason = Deoptimization::Reason_null_check;
else
} else if (type == T_OBJECT) {
reason = Deoptimization::reason_null_check(speculative);
} else {
reason = Deoptimization::Reason_div0_check;
}
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
// ciMethodData::has_trap_at will return a conservative -1 if any
// must-be-null assertion has failed. This could cause performance
......@@ -2120,21 +2121,36 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) {
*
* @param n node that the type applies to
* @param exact_kls type from profiling
* @param maybe_null did profiling see null?
*
* @return node with improved type
*/
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null) {
const Type* current_type = _gvn.type(n);
assert(UseTypeSpeculation, "type speculation must be on");
const TypeOopPtr* speculative = current_type->speculative();
const TypePtr* speculative = current_type->speculative();
// Should the klass from the profile be recorded in the speculative type?
if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
const TypeOopPtr* xtype = tklass->as_instance_type();
assert(xtype->klass_is_exact(), "Should be exact");
// Any reason to believe n is not null (from this profiling or a previous one)?
const TypePtr* ptr = (maybe_null && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
// record the new speculative type's depth
speculative = xtype->with_inline_depth(jvms()->depth());
speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
speculative = speculative->with_inline_depth(jvms()->depth());
} else if (current_type->would_improve_ptr(maybe_null)) {
// Profiling report that null was never seen so we can change the
// speculative type to non null ptr.
assert(!maybe_null, "nothing to improve");
if (speculative == NULL) {
speculative = TypePtr::NOTNULL;
} else {
const TypePtr* ptr = TypePtr::NOTNULL;
speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
}
}
if (speculative != current_type->speculative()) {
......@@ -2167,7 +2183,15 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
return n;
}
ciKlass* exact_kls = profile_has_unique_klass();
return record_profile_for_speculation(n, exact_kls);
bool maybe_null = true;
if (java_bc() == Bytecodes::_checkcast ||
java_bc() == Bytecodes::_instanceof ||
java_bc() == Bytecodes::_aastore) {
ciProfileData* data = method()->method_data()->bci_to_data(bci());
bool maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
}
return record_profile_for_speculation(n, exact_kls, maybe_null);
return n;
}
/**
......@@ -2187,9 +2211,10 @@ void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method,
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
ciKlass* better_type = method()->argument_profiled_type(bci(), i);
if (better_type != NULL) {
record_profile_for_speculation(argument(j), better_type);
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->argument_profiled_type(bci(), i, better_type, maybe_null)) {
record_profile_for_speculation(argument(j), better_type, maybe_null);
}
i++;
}
......@@ -2206,15 +2231,34 @@ void GraphKit::record_profiled_parameters_for_speculation() {
}
for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
if (_gvn.type(local(i))->isa_oopptr()) {
ciKlass* better_type = method()->parameter_profiled_type(j);
if (better_type != NULL) {
record_profile_for_speculation(local(i), better_type);
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->parameter_profiled_type(j, better_type, maybe_null)) {
record_profile_for_speculation(local(i), better_type, maybe_null);
}
j++;
}
}
}
/**
* Record profiling data from return value profiling at an invoke with
* the type system so that it can propagate it (speculation)
*/
void GraphKit::record_profiled_return_for_speculation() {
if (!UseTypeSpeculation) {
return;
}
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->return_profiled_type(bci(), better_type, maybe_null)) {
// If profiling reports a single type for the return value,
// feed it to the type system so it can propagate it as a
// speculative type
record_profile_for_speculation(stack(sp()-1), better_type, maybe_null);
}
}
void GraphKit::round_double_result(ciMethod* dest_method) {
// A non-strict method may return a double value which has an extended
// exponent, but this must not be visible in a caller which is 'strict'
......@@ -2294,10 +2338,12 @@ Node* GraphKit::dstore_rounding(Node* n) {
// Null check oop. Set null-path control into Region in slot 3.
// Make a cast-not-nullness use the other not-null control. Return cast.
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
bool never_see_null, bool safe_for_replace) {
bool never_see_null,
bool safe_for_replace,
bool speculative) {
// Initial NULL check taken path
(*null_control) = top();
Node* cast = null_check_common(value, T_OBJECT, false, null_control);
Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
// Generate uncommon_trap:
if (never_see_null && (*null_control) != top()) {
......@@ -2308,7 +2354,8 @@ Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
PreserveJVMState pjvms(this);
set_control(*null_control);
replace_in_map(value, null());
uncommon_trap(Deoptimization::Reason_null_check,
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
uncommon_trap(reason,
Deoptimization::Action_make_not_entrant);
(*null_control) = top(); // NULL path is dead
}
......@@ -2732,11 +2779,16 @@ Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
// recompile; the offending check will be recompiled to handle NULLs.
// If we see several offending BCIs, then all checks in the
// method will be recompiled.
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
speculating = !_gvn.type(obj)->speculative_maybe_null();
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
if (UncommonNullCast // Cutout for this technique
&& obj != null() // And not the -Xcomp stupid case?
&& !too_many_traps(Deoptimization::Reason_null_check)
&& !too_many_traps(reason)
) {
if (speculating) {
return true;
}
if (data == NULL)
// Edge case: no mature data. Be optimistic here.
return true;
......@@ -2746,6 +2798,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
return !data->as_BitData()->null_seen();
}
speculating = false;
return false;
}
......@@ -2758,7 +2811,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
bool safe_for_replace) {
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
// Make sure we haven't already deoptimized from this tactic.
if (too_many_traps(reason))
......@@ -2811,7 +2864,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
// type == NULL if profiling tells us this object is always null
if (type != NULL) {
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
if (!too_many_traps(null_reason) &&
!too_many_traps(class_reason)) {
Node* not_null_obj = NULL;
......@@ -2819,7 +2872,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
// there's no need for a null check
if (!not_null) {
Node* null_ctl = top();
not_null_obj = null_check_oop(obj, &null_ctl, true, true);
not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
assert(null_ctl->is_top(), "no null control here");
} else {
not_null_obj = obj;
......@@ -2867,12 +2920,13 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
data = method()->method_data()->bci_to_data(bci());
}
bool speculative_not_null = false;
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
&& seems_never_null(obj, data));
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
......@@ -2995,12 +3049,13 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
C->set_has_split_ifs(true); // Has chance for split-if optimization
// Use null-cast information if it is available
bool speculative_not_null = false;
bool never_see_null = ((failure_control == NULL) // regular case only
&& seems_never_null(obj, data));
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
......
......@@ -351,9 +351,11 @@ class GraphKit : public Phase {
// Return the value cast to not-null.
// Be clever about equivalent dominating null checks.
Node* null_check_common(Node* value, BasicType type,
bool assert_null = false, Node* *null_control = NULL);
bool assert_null = false,
Node* *null_control = NULL,
bool speculative = false);
Node* null_check(Node* value, BasicType type = T_OBJECT) {
return null_check_common(value, type);
return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
}
Node* null_check_receiver() {
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
......@@ -382,10 +384,12 @@ class GraphKit : public Phase {
// If safe_for_replace, then we can replace the value with the cast
// in the parsing map (the cast is guaranteed to dominate the map)
Node* null_check_oop(Node* value, Node* *null_control,
bool never_see_null = false, bool safe_for_replace = false);
bool never_see_null = false,
bool safe_for_replace = false,
bool speculative = false);
// Check the null_seen bit.
bool seems_never_null(Node* obj, ciProfileData* data);
bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
// Check for unique class for receiver at call
ciKlass* profile_has_unique_klass() {
......@@ -399,10 +403,11 @@ class GraphKit : public Phase {
}
// record type from profiling with the type system
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
Node* record_profiled_receiver_for_speculation(Node* n);
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null);
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
void record_profiled_parameters_for_speculation();
void record_profiled_return_for_speculation();
Node* record_profiled_receiver_for_speculation(Node* n);
// Use the type profile to narrow an object type.
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
......
......@@ -4658,7 +4658,7 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* src_k = NULL;
if (!has_src) {
src_k = src_type->speculative_type();
src_k = src_type->speculative_type_not_null();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
......@@ -4666,7 +4666,7 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* dest_k = NULL;
if (!has_dest) {
dest_k = dest_type->speculative_type();
dest_k = dest_type->speculative_type_not_null();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}
......@@ -4738,13 +4738,13 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* src_k = top_src->klass();
ciKlass* dest_k = top_dest->klass();
if (!src_spec) {
src_k = src_type->speculative_type();
src_k = src_type->speculative_type_not_null();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
}
if (!dest_spec) {
dest_k = dest_type->speculative_type();
dest_k = dest_type->speculative_type_not_null();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}
......
......@@ -1288,7 +1288,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
const Type* tcc = ccast->as_Type()->type();
assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
_gvn.set_type_bottom(ccast);
......@@ -1352,7 +1352,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
if (ccast != NULL) {
const Type* tcc = ccast->as_Type()->type();
assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
assert(tcc != tval && tcc->higher_equal(tval), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
ccast->set_req(0, control());
......@@ -1393,7 +1393,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) {
Node* addp = load_klass->in(2);
Node* obj = addp->in(AddPNode::Address);
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
if (obj_type->speculative_type() != NULL) {
if (obj_type->speculative_type_not_null() != NULL) {
ciKlass* k = obj_type->speculative_type();
inc_sp(2);
obj = maybe_cast_profiled_obj(obj, k);
......@@ -2277,6 +2277,14 @@ void Parse::do_one_bytecode() {
maybe_add_safepoint(iter().get_dest());
a = null();
b = pop();
if (!_gvn.type(b)->speculative_maybe_null() &&
!too_many_traps(Deoptimization::Reason_speculate_null_check)) {
inc_sp(1);
Node* null_ctl = top();
b = null_check_oop(b, &null_ctl, true, true, true);
assert(null_ctl->is_top(), "no null control here");
dec_sp(1);
}
c = _gvn.transform( new (C) CmpPNode(b, a) );
do_ifnull(btest, c);
break;
......
......@@ -330,7 +330,7 @@ void NodeHash::check_no_speculative_types() {
Node *sentinel_node = sentinel();
for (uint i = 0; i < max; ++i) {
Node *n = at(i);
if(n != NULL && n != sentinel_node && n->is_Type()) {
if(n != NULL && n != sentinel_node && n->is_Type() && n->outcnt() > 0) {
TypeNode* tn = n->as_Type();
const Type* t = tn->type();
const Type* t_no_spec = t->remove_speculative();
......
此差异已折叠。
......@@ -224,7 +224,7 @@ public:
}
// Variant that keeps the speculative part of the types
const Type *meet_speculative(const Type *t) const {
return meet_helper(t, true);
return meet_helper(t, true)->cleanup_speculative();
}
// WIDEN: 'widens' for Ints and other range types
virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
......@@ -247,7 +247,7 @@ public:
}
// Variant that keeps the speculative part of the types
const Type *join_speculative(const Type *t) const {
return join_helper(t, true);
return join_helper(t, true)->cleanup_speculative();
}
// Modified version of JOIN adapted to the needs Node::Value.
......@@ -259,7 +259,7 @@ public:
}
// Variant that keeps the speculative part of the types
const Type *filter_speculative(const Type *kills) const {
return filter_helper(kills, true);
return filter_helper(kills, true)->cleanup_speculative();
}
#ifdef ASSERT
......@@ -414,15 +414,18 @@ public:
bool require_constant = false,
bool is_autobox_cache = false);
// Speculative type. See TypeInstPtr
virtual const TypeOopPtr* speculative() const { return NULL; }
virtual ciKlass* speculative_type() const { return NULL; }
// Speculative type helper methods. See TypePtr.
virtual const TypePtr* speculative() const { return NULL; }
virtual ciKlass* speculative_type() const { return NULL; }
virtual ciKlass* speculative_type_not_null() const { return NULL; }
virtual bool speculative_maybe_null() const { return true; }
virtual const Type* remove_speculative() const { return this; }
virtual const Type* cleanup_speculative() const { return this; }
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return exact_kls != NULL; }
virtual bool would_improve_ptr(bool maybe_null) const { return !maybe_null; }
const Type* maybe_remove_speculative(bool include_speculative) const;
virtual const Type* remove_speculative() const { return this; }
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const {
return exact_kls != NULL;
}
virtual bool maybe_null() const { return true; }
private:
// support arrays
......@@ -679,6 +682,7 @@ public:
virtual const Type *xdual() const; // Compute dual right now.
bool ary_must_be_exact() const; // true if arrays of such are never generic
virtual const Type* remove_speculative() const;
virtual const Type* cleanup_speculative() const;
#ifdef ASSERT
// One type is interface, the other is oop
virtual bool interface_vs_oop(const Type *t) const;
......@@ -761,13 +765,48 @@ class TypePtr : public Type {
public:
enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
protected:
TypePtr( TYPES t, PTR ptr, int offset ) : Type(t), _ptr(ptr), _offset(offset) {}
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
TypePtr(TYPES t, PTR ptr, int offset,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom) :
Type(t), _ptr(ptr), _offset(offset), _speculative(speculative),
_inline_depth(inline_depth) {}
static const PTR ptr_meet[lastPTR][lastPTR];
static const PTR ptr_dual[lastPTR];
static const char * const ptr_msg[lastPTR];
enum {
InlineDepthBottom = INT_MAX,
InlineDepthTop = -InlineDepthBottom
};
// Extra type information profiling gave us. We propagate it the
// same way the rest of the type info is propagated. If we want to
// use it, then we have to emit a guard: this part of the type is
// not something we know but something we speculate about the type.
const TypePtr* _speculative;
// For speculative types, we record at what inlining depth the
// profiling point that provided the data is. We want to favor
// profile data coming from outer scopes which are likely better for
// the current compilation.
int _inline_depth;
// utility methods to work on the speculative part of the type
const TypePtr* dual_speculative() const;
const TypePtr* xmeet_speculative(const TypePtr* other) const;
bool eq_speculative(const TypePtr* other) const;
int hash_speculative() const;
const TypePtr* add_offset_speculative(intptr_t offset) const;
#ifndef PRODUCT
void dump_speculative(outputStream *st) const;
#endif
// utility methods to work on the inline depth of the type
int dual_inline_depth() const;
int meet_inline_depth(int depth) const;
#ifndef PRODUCT
void dump_inline_depth(outputStream *st) const;
#endif
public:
const int _offset; // Offset into oop, with TOP & BOT
const PTR _ptr; // Pointer equivalence class
......@@ -775,7 +814,9 @@ public:
const int offset() const { return _offset; }
const PTR ptr() const { return _ptr; }
static const TypePtr *make( TYPES t, PTR ptr, int offset );
static const TypePtr *make(TYPES t, PTR ptr, int offset,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom);
// Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const;
......@@ -784,10 +825,13 @@ public:
int xadd_offset( intptr_t offset ) const;
virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual bool eq(const Type *t) const;
virtual int hash() const; // Type specific hashing
virtual bool singleton(void) const; // TRUE if type is a singleton
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xmeet_helper( const Type *t ) const;
int meet_offset( int offset ) const;
int dual_offset( ) const;
virtual const Type *xdual() const; // Compute dual right now.
......@@ -802,6 +846,20 @@ public:
return ptr_dual[ ptr_meet[ ptr_dual[in_ptr] ] [ dual_ptr() ] ];
}
// Speculative type helper methods.
virtual const TypePtr* speculative() const { return _speculative; }
int inline_depth() const { return _inline_depth; }
virtual ciKlass* speculative_type() const;
virtual ciKlass* speculative_type_not_null() const;
virtual bool speculative_maybe_null() const;
virtual const Type* remove_speculative() const;
virtual const Type* cleanup_speculative() const;
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
virtual bool would_improve_ptr(bool maybe_null) const;
virtual const TypePtr* with_inline_depth(int depth) const;
virtual bool maybe_null() const { return meet_ptr(Null) == ptr(); }
// Tests for relation to centerline of type lattice:
static bool above_centerline(PTR ptr) { return (ptr <= AnyNull); }
static bool below_centerline(PTR ptr) { return (ptr >= NotNull); }
......@@ -850,7 +908,8 @@ public:
// Some kind of oop (Java pointer), either klass or instance or array.
class TypeOopPtr : public TypePtr {
protected:
TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id,
const TypePtr* speculative, int inline_depth);
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -861,10 +920,6 @@ public:
};
protected:
enum {
InlineDepthBottom = INT_MAX,
InlineDepthTop = -InlineDepthBottom
};
// Oop is NULL, unless this is a constant oop.
ciObject* _const_oop; // Constant oop
// If _klass is NULL, then so is _sig. This is an unloaded klass.
......@@ -880,38 +935,11 @@ protected:
// This is the the node index of the allocation node creating this instance.
int _instance_id;
// Extra type information profiling gave us. We propagate it the
// same way the rest of the type info is propagated. If we want to
// use it, then we have to emit a guard: this part of the type is
// not something we know but something we speculate about the type.
const TypeOopPtr* _speculative;
// For speculative types, we record at what inlining depth the
// profiling point that provided the data is. We want to favor
// profile data coming from outer scopes which are likely better for
// the current compilation.
int _inline_depth;
static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact);
int dual_instance_id() const;
int meet_instance_id(int uid) const;
// utility methods to work on the speculative part of the type
const TypeOopPtr* dual_speculative() const;
const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
bool eq_speculative(const TypeOopPtr* other) const;
int hash_speculative() const;
const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
#ifndef PRODUCT
void dump_speculative(outputStream *st) const;
#endif
// utility methods to work on the inline depth of the type
int dual_inline_depth() const;
int meet_inline_depth(int depth) const;
#ifndef PRODUCT
void dump_inline_depth(outputStream *st) const;
#endif
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
......@@ -941,7 +969,9 @@ public:
bool not_null_elements = false);
// Make a generic (unclassed) pointer to an oop.
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom);
ciObject* const_oop() const { return _const_oop; }
virtual ciKlass* klass() const { return _klass; }
......@@ -955,7 +985,6 @@ public:
bool is_known_instance() const { return _instance_id > 0; }
int instance_id() const { return _instance_id; }
bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
virtual const TypeOopPtr* speculative() const { return _speculative; }
virtual intptr_t get_con() const;
......@@ -969,10 +998,13 @@ public:
const TypeKlassPtr* as_klass_type() const;
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
// Speculative type helper methods.
virtual const Type* remove_speculative() const;
virtual const Type* cleanup_speculative() const;
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
virtual const TypePtr* with_inline_depth(int depth) const;
virtual const Type *xmeet(const Type *t) const;
virtual const Type *xdual() const; // Compute dual right now.
// the core of the computation of the meet for TypeOopPtr and for its subclasses
virtual const Type *xmeet_helper(const Type *t) const;
......@@ -982,29 +1014,14 @@ public:
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
#endif
// Return the speculative type if any
ciKlass* speculative_type() const {
if (_speculative != NULL) {
const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
if (speculative->klass_is_exact()) {
return speculative->klass();
}
}
return NULL;
}
int inline_depth() const {
return _inline_depth;
}
virtual const TypeOopPtr* with_inline_depth(int depth) const;
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
};
//------------------------------TypeInstPtr------------------------------------
// Class of Java object pointers, pointing either to non-array Java instances
// or to a Klass* (including array klasses).
class TypeInstPtr : public TypeOopPtr {
TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id,
const TypePtr* speculative, int inline_depth);
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -1040,7 +1057,10 @@ class TypeInstPtr : public TypeOopPtr {
}
// Make a pointer to an oop.
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset,
int instance_id = InstanceBot,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom);
/** Create constant type for a constant boxed value */
const Type* get_const_boxed_value() const;
......@@ -1057,9 +1077,10 @@ class TypeInstPtr : public TypeOopPtr {
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
// Speculative type helper methods.
virtual const Type* remove_speculative() const;
virtual const TypeOopPtr* with_inline_depth(int depth) const;
virtual const TypePtr* with_inline_depth(int depth) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
......@@ -1081,7 +1102,8 @@ class TypeInstPtr : public TypeOopPtr {
// Class of Java array pointers
class TypeAryPtr : public TypeOopPtr {
TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative, int inline_depth)
int offset, int instance_id, bool is_autobox_cache,
const TypePtr* speculative, int inline_depth)
: TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative, inline_depth),
_ary(ary),
_is_autobox_cache(is_autobox_cache)
......@@ -1120,9 +1142,15 @@ public:
bool is_autobox_cache() const { return _is_autobox_cache; }
static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
static const TypeAryPtr *make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset,
int instance_id = InstanceBot,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom);
// Constant pointer to array
static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom, bool is_autobox_cache= false);
static const TypeAryPtr *make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset,
int instance_id = InstanceBot,
const TypePtr* speculative = NULL,
int inline_depth = InlineDepthBottom, bool is_autobox_cache = false);
// Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const;
......@@ -1136,9 +1164,10 @@ public:
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
// Speculative type helper methods.
virtual const Type* remove_speculative() const;
virtual const TypeOopPtr* with_inline_depth(int depth) const;
virtual const TypePtr* with_inline_depth(int depth) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
......@@ -1367,9 +1396,8 @@ public:
static const TypeNarrowOop *BOTTOM;
static const TypeNarrowOop *NULL_PTR;
virtual const Type* remove_speculative() const {
return make(_ptrtype->remove_speculative()->is_ptr());
}
virtual const Type* remove_speculative() const;
virtual const Type* cleanup_speculative() const;
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
......
......@@ -3801,10 +3801,6 @@ jint Arguments::apply_ergo() {
AlwaysIncrementalInline = false;
}
#endif
if (IncrementalInline && FLAG_IS_DEFAULT(MaxNodeLimit)) {
// incremental inlining: bump MaxNodeLimit
FLAG_SET_DEFAULT(MaxNodeLimit, (intx)75000);
}
if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
// nothing to use the profiling, turn if off
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
......
......@@ -1839,6 +1839,7 @@ const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
"predicate",
"loop_limit_check",
"speculate_class_check",
"speculate_null_check",
"rtm_state_change"
};
const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
......
......@@ -60,6 +60,7 @@ class Deoptimization : AllStatic {
Reason_predicate, // compiler generated predicate failed
Reason_loop_limit_check, // compiler generated loop limits check failed
Reason_speculate_class_check, // saw unexpected object class from type speculation
Reason_speculate_null_check, // saw unexpected null from type speculation
Reason_rtm_state_change, // rtm state change detected
Reason_LIMIT,
// Note: Keep this enum in sync. with _trap_reason_name.
......@@ -315,17 +316,27 @@ class Deoptimization : AllStatic {
return Reason_null_check; // recorded per BCI as a null check
else if (reason == Reason_speculate_class_check)
return Reason_class_check;
else if (reason == Reason_speculate_null_check)
return Reason_null_check;
else
return Reason_none;
}
static bool reason_is_speculate(int reason) {
if (reason == Reason_speculate_class_check) {
if (reason == Reason_speculate_class_check || reason == Reason_speculate_null_check) {
return true;
}
return false;
}
static DeoptReason reason_null_check(bool speculative) {
return speculative ? Deoptimization::Reason_speculate_null_check : Deoptimization::Reason_null_check;
}
static DeoptReason reason_class_check(bool speculative) {
return speculative ? Deoptimization::Reason_speculate_class_check : Deoptimization::Reason_class_check;
}
static uint per_method_trap_limit(int reason) {
return reason_is_speculate(reason) ? (uint)PerMethodSpecTrapLimit : (uint)PerMethodTrapLimit;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册