提交 0cc413b6 编写于 作者: R roland

8024070: C2 needs some form of type speculation

Summary: record unused type profile information with type system, propagate and use it.
Reviewed-by: kvn, twisti
上级 e926cc9f
......@@ -565,6 +565,116 @@ void ciCallProfile::add_receiver(ciKlass* receiver, int receiver_count) {
if (_limit < MorphismLimit) _limit++;
}
void ciMethod::assert_virtual_call_type_ok(int bci) {
assert(java_code_at_bci(bci) == Bytecodes::_invokevirtual ||
java_code_at_bci(bci) == Bytecodes::_invokeinterface, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
}
void ciMethod::assert_call_type_ok(int bci) {
assert(java_code_at_bci(bci) == Bytecodes::_invokestatic ||
java_code_at_bci(bci) == Bytecodes::_invokespecial ||
java_code_at_bci(bci) == Bytecodes::_invokedynamic, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
}
/**
* Check whether profiling provides a type for the argument i to the
* call at bci bci
*
* @param bci bci of the call
* @param i argument number
* @return profiled type
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
if (data->is_VirtualCallTypeData()) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
}
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
}
}
}
}
return NULL;
}
/**
* Check whether profiling provides a type for the return value from
* the call at bci bci
*
* @param bci bci of the call
* @return profiled type
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::return_profiled_type(int bci) {
if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
if (data->is_VirtualCallTypeData()) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
}
}
}
return NULL;
}
/**
* Check whether profiling provides a type for the parameter i
*
* @param i parameter number
* @return profiled type
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::parameter_profiled_type(int i) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciParametersTypeData* parameters = method_data()->parameters_type_data();
if (parameters != NULL && i < parameters->number_of_parameters()) {
ciKlass* type = parameters->valid_parameter_type(i);
if (type != NULL && !parameters->parameter_maybe_null(i)) {
return type;
}
}
}
return NULL;
}
// ------------------------------------------------------------------
// ciMethod::find_monomorphic_target
//
......
......@@ -117,6 +117,10 @@ class ciMethod : public ciMetadata {
*bcp = code;
}
// Check bytecode and profile data collected are compatible
void assert_virtual_call_type_ok(int bci);
void assert_call_type_ok(int bci);
public:
// Basic method information.
ciFlags flags() const { check_is_loaded(); return _flags; }
......@@ -230,6 +234,11 @@ class ciMethod : public ciMetadata {
ciCallProfile call_profile_at_bci(int bci);
int interpreter_call_site_count(int bci);
// Does type profiling provide a useful type at this point?
ciKlass* argument_profiled_type(int bci, int i);
ciKlass* parameter_profiled_type(int i);
ciKlass* return_profiled_type(int bci);
ciField* get_field_at_bci( int bci, bool &will_link);
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
......
......@@ -100,6 +100,10 @@ public:
return valid_ciklass(type(i));
}
bool maybe_null(int i) const {
return was_null_seen(type(i));
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
......@@ -113,6 +117,10 @@ public:
return valid_ciklass(type());
}
bool maybe_null() const {
return was_null_seen(type());
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
......@@ -154,6 +162,14 @@ public:
return ret()->valid_type();
}
bool argument_maybe_null(int i) const {
return args()->maybe_null(i);
}
bool return_maybe_null() const {
return ret()->maybe_null();
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
......@@ -260,6 +276,14 @@ public:
return ret()->valid_type();
}
bool argument_maybe_null(int i) const {
return args()->maybe_null(i);
}
bool return_maybe_null() const {
return ret()->maybe_null();
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
......@@ -305,6 +329,10 @@ public:
return parameters()->valid_type(i);
}
bool parameter_maybe_null(int i) const {
return parameters()->maybe_null(i);
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
......
......@@ -641,7 +641,10 @@
"Enables intrinsification of various java.lang.Math functions") \
\
experimental(bool, ReplaceInParentMaps, false, \
"Propagate type improvements in callers of inlinee if possible")
"Propagate type improvements in callers of inlinee if possible") \
\
experimental(bool, UseTypeSpeculation, false, \
"Speculatively propagate types from profiles")
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
......
......@@ -486,6 +486,8 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
return new_jvms;
}
virtual bool is_string_late_inline() const { return true; }
};
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
......@@ -773,7 +775,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
const int vtable_index = Method::invalid_vtable_index;
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
......@@ -829,6 +831,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
int vtable_index = Method::invalid_vtable_index;
bool call_does_dispatch = false;
ciKlass* speculative_receiver_type = NULL;
if (is_virtual_or_interface) {
ciInstanceKlass* klass = target->holder();
Node* receiver_node = kit.argument(0);
......@@ -837,9 +840,12 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
is_virtual,
call_does_dispatch, vtable_index); // out-parameters
// We lack profiling at this call but type speculation may
// provide us with a type
speculative_receiver_type = receiver_type->speculative_type();
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
......
......@@ -74,6 +74,7 @@ class CallGenerator : public ResourceObj {
virtual bool is_late_inline() const { return false; }
// same but for method handle calls
virtual bool is_mh_late_inline() const { return false; }
virtual bool is_string_late_inline() const{ return false; }
// for method handle calls: have we tried inlinining the call already?
virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
......
......@@ -1360,7 +1360,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
}
}
......@@ -1385,6 +1385,9 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// Also, make sure exact and non-exact variants alias the same.
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
}
if (to->speculative() != NULL) {
tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
}
// Canonicalize the holder of this field
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
// First handle header references such as a LoadKlassNode, even if the
......@@ -2013,6 +2016,12 @@ void Compile::Optimize() {
if (failing()) return;
}
// Remove the speculative part of types and clean up the graph from
// the extra CastPP nodes whose only purpose is to carry them. Do
// that early so that optimizations are not disrupted by the extra
// CastPP nodes.
remove_speculative_types(igvn);
// No more new expensive nodes will be added to the list from here
// so keep only the actual candidates for optimizations.
cleanup_expensive_nodes(igvn);
......@@ -3799,6 +3808,45 @@ void Compile::add_expensive_node(Node * n) {
}
}
/**
* Remove the speculative part of types and clean up the graph
*/
void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
if (UseTypeSpeculation) {
Unique_Node_List worklist;
worklist.push(root());
int modified = 0;
// Go over all type nodes that carry a speculative type, drop the
// speculative part of the type and enqueue the node for an igvn
// which may optimize it out.
for (uint next = 0; next < worklist.size(); ++next) {
Node *n = worklist.at(next);
if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
TypeNode* tn = n->as_Type();
const TypeOopPtr* t = tn->type()->is_oopptr();
bool in_hash = igvn.hash_delete(n);
assert(in_hash, "node should be in igvn hash table");
tn->set_type(t->remove_speculative());
igvn.hash_insert(n);
igvn._worklist.push(n); // give it a chance to go away
modified++;
}
uint max = n->len();
for( uint i = 0; i < max; ++i ) {
Node *m = n->in(i);
if (not_a_node(m)) continue;
worklist.push(m);
}
}
// Drop the speculative part of all types in the igvn's type table
igvn.remove_speculative_types();
if (modified > 0) {
igvn.optimize();
}
}
}
// Auxiliary method to support randomized stressing/fuzzing.
//
// This method can be called the arbitrary number of times, with current count
......
......@@ -424,6 +424,8 @@ class Compile : public Phase {
static int cmp_expensive_nodes(Node** n1, Node** n2);
// Expensive nodes list already sorted?
bool expensive_nodes_sorted() const;
// Remove the speculative part of types and clean up the graph
void remove_speculative_types(PhaseIterGVN &igvn);
// Are we within a PreserveJVMState block?
int _preserve_jvm_state;
......@@ -824,8 +826,8 @@ class Compile : public Phase {
// Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true,
bool delayed_forbidden = false);
JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
bool allow_intrinsics = true, bool delayed_forbidden = false);
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return should_delay_string_inlining(call_method, jvms) ||
should_delay_boxing_inlining(call_method, jvms);
......
......@@ -63,7 +63,8 @@ void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMeth
CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
JVMState* jvms, bool allow_inline,
float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
float prof_factor, ciKlass* speculative_receiver_type,
bool allow_intrinsics, bool delayed_forbidden) {
ciMethod* caller = jvms->method();
int bci = jvms->bci();
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
......@@ -117,7 +118,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
if (cg->is_predicted()) {
// Code without intrinsic but, hopefully, inlined.
CallGenerator* inline_cg = this->call_generator(callee,
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
if (inline_cg != NULL) {
cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
}
......@@ -212,8 +213,24 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
ciMethod* receiver_method = NULL;
if (have_major_receiver || profile.morphism() == 1 ||
(profile.morphism() == 2 && UseBimorphicInlining)) {
int morphism = profile.morphism();
if (speculative_receiver_type != NULL) {
// We have a speculative type, we should be able to resolve
// the call. We do that before looking at the profiling at
// this invoke because it may lead to bimorphic inlining which
// a speculative type should help us avoid.
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
speculative_receiver_type);
if (receiver_method == NULL) {
speculative_receiver_type = NULL;
} else {
morphism = 1;
}
}
if (receiver_method == NULL &&
(have_major_receiver || morphism == 1 ||
(morphism == 2 && UseBimorphicInlining))) {
// receiver_method = profile.method();
// Profiles do not suggest methods now. Look it up in the major receiver.
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
......@@ -227,7 +244,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Look up second receiver.
CallGenerator* next_hit_cg = NULL;
ciMethod* next_receiver_method = NULL;
if (profile.morphism() == 2 && UseBimorphicInlining) {
if (morphism == 2 && UseBimorphicInlining) {
next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
profile.receiver(1));
if (next_receiver_method != NULL) {
......@@ -242,11 +259,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
}
CallGenerator* miss_cg;
Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
Deoptimization::DeoptReason reason = morphism == 2 ?
Deoptimization::Reason_bimorphic :
Deoptimization::Reason_class_check;
if (( profile.morphism() == 1 ||
(profile.morphism() == 2 && next_hit_cg != NULL) ) &&
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
!too_many_traps(jvms->method(), jvms->bci(), reason)
) {
// Generate uncommon trap for class check failure path
......@@ -260,6 +276,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
if (miss_cg != NULL) {
if (next_hit_cg != NULL) {
assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
// We don't need to record dependency on a receiver here and below.
// Whenever we inline, the dependency is added by Parse::Parse().
......@@ -267,7 +284,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
if (miss_cg != NULL) {
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
if (cg != NULL) return cg;
}
}
......@@ -446,13 +465,16 @@ void Parse::do_call() {
int vtable_index = Method::invalid_vtable_index;
bool call_does_dispatch = false;
// Speculative type of the receiver if any
ciKlass* speculative_receiver_type = NULL;
if (is_virtual_or_interface) {
Node* receiver_node = stack(sp() - nargs);
Node* receiver_node = stack(sp() - nargs);
const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
// call_does_dispatch and vtable_index are out-parameters. They might be changed.
callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
is_virtual,
call_does_dispatch, vtable_index); // out-parameters
speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
}
// Note: It's OK to try to inline a virtual call.
......@@ -468,7 +490,7 @@ void Parse::do_call() {
// Decide call tactic.
// This call checks with CHA, the interpreter profile, intrinsics table, etc.
// It decides whether inlining is desirable or not.
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
orig_callee = callee = NULL;
......@@ -477,6 +499,10 @@ void Parse::do_call() {
// Round double arguments before call
round_double_arguments(cg->method());
// Feed profiling data for arguments to the type system so it can
// propagate it as speculative types
record_profiled_arguments_for_speculation(cg->method(), bc());
#ifndef PRODUCT
// bump global counters for calls
count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
......@@ -491,6 +517,13 @@ void Parse::do_call() {
// save across call, for a subsequent cast_not_null.
Node* receiver = has_receiver ? argument(0) : NULL;
// The extra CheckCastPP for speculative types mess with PhaseStringOpts
if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
// Feed profiling data for a single receiver to the type system so
// it can propagate it as a speculative type
receiver = record_profiled_receiver_for_speculation(receiver);
}
// Bump method data counters (We profile *before* the call is made
// because exceptions don't return to the call site.)
profile_call(receiver);
......@@ -508,7 +541,7 @@ void Parse::do_call() {
// the call site, perhaps because it did not match a pattern the
// intrinsic was expecting to optimize. Should always be possible to
// get a normal java call that may inline in that case
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
if ((new_jvms = cg->generate(jvms, this)) == NULL) {
guarantee(failing(), "call failed to generate: calls should work");
return;
......@@ -607,6 +640,16 @@ void Parse::do_call() {
null_assert(peek());
set_bci(iter().cur_bci()); // put it back
}
BasicType ct = ctype->basic_type();
if (ct == T_OBJECT || ct == T_ARRAY) {
ciKlass* better_type = method()->return_profiled_type(bci());
if (UseTypeSpeculation && better_type != NULL) {
// If profiling reports a single type for the return value,
// feed it to the type system so it can propagate it as a
// speculative type
record_profile_for_speculation(stack(sp()-1), better_type);
}
}
}
// Restart record of parsing work after possible inlining of call
......
......@@ -2098,6 +2098,104 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) {
}
}
/**
* Record profiling data exact_kls for Node n with the type system so
* that it can propagate it (speculation)
*
* @param n node that the type applies to
* @param exact_kls type from profiling
*
* @return node with improved type
*/
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr();
assert(UseTypeSpeculation, "type speculation must be on");
if (exact_kls != NULL &&
// nothing to improve if type is already exact
(current_type == NULL ||
(!current_type->klass_is_exact() &&
(current_type->speculative() == NULL ||
!current_type->speculative()->klass_is_exact())))) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
const TypeOopPtr* xtype = tklass->as_instance_type();
assert(xtype->klass_is_exact(), "Should be exact");
// Build a type with a speculative type (what we think we know
// about the type but will need a guard when we use it)
const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype);
// We're changing the type, we need a new cast node to carry the
// new type. The new type depends on the control: what profiling
// tells us is only valid from here as far as we can tell.
Node* cast = new(C) CastPPNode(n, spec_type);
cast->init_req(0, control());
cast = _gvn.transform(cast);
replace_in_map(n, cast);
n = cast;
}
return n;
}
/**
* Record profiling data from receiver profiling at an invoke with the
* type system so that it can propagate it (speculation)
*
* @param n receiver node
*
* @return node with improved type
*/
Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
if (!UseTypeSpeculation) {
return n;
}
ciKlass* exact_kls = profile_has_unique_klass();
return record_profile_for_speculation(n, exact_kls);
}
/**
* Record profiling data from argument profiling at an invoke with the
* type system so that it can propagate it (speculation)
*
* @param dest_method target method for the call
* @param bc what invoke bytecode is this?
*/
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
if (!UseTypeSpeculation) {
return;
}
const TypeFunc* tf = TypeFunc::make(dest_method);
int nargs = tf->_domain->_cnt - TypeFunc::Parms;
int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
ciKlass* better_type = method()->argument_profiled_type(bci(), i);
if (better_type != NULL) {
record_profile_for_speculation(argument(j), better_type);
}
i++;
}
}
}
/**
* Record profiling data from parameter profiling at an invoke with
* the type system so that it can propagate it (speculation)
*/
void GraphKit::record_profiled_parameters_for_speculation() {
if (!UseTypeSpeculation) {
return;
}
for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
if (_gvn.type(local(i))->isa_oopptr()) {
ciKlass* better_type = method()->parameter_profiled_type(j);
if (better_type != NULL) {
record_profile_for_speculation(local(i), better_type);
}
j++;
}
}
}
void GraphKit::round_double_result(ciMethod* dest_method) {
// A non-strict method may return a double value which has an extended
// exponent, but this must not be visible in a caller which is 'strict'
......@@ -2635,10 +2733,10 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
// If the profile has seen exactly one type, narrow to exactly that type.
// Subsequent type checks will always fold up.
Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
ciProfileData* data,
ciKlass* require_klass) {
ciKlass* require_klass,
ciKlass* spec_klass,
bool safe_for_replace) {
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
if (data == NULL) return NULL;
// Make sure we haven't already deoptimized from this tactic.
if (too_many_traps(Deoptimization::Reason_class_check))
......@@ -2646,15 +2744,15 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
// (No, this isn't a call, but it's enough like a virtual call
// to use the same ciMethod accessor to get the profile info...)
ciCallProfile profile = method()->call_profile_at_bci(bci());
if (profile.count() >= 0 && // no cast failures here
profile.has_receiver(0) &&
profile.morphism() == 1) {
ciKlass* exact_kls = profile.receiver(0);
// If we have a speculative type use it instead of profiling (which
// may not help us)
ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
if (exact_kls != NULL) {// no cast failures here
if (require_klass == NULL ||
static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
// If we narrow the type to match what the type profile sees,
// we can then remove the rest of the cast.
// If we narrow the type to match what the type profile sees or
// the speculative type, we can then remove the rest of the
// cast.
// This is a win, even if the exact_kls is very specific,
// because downstream operations, such as method calls,
// will often benefit from the sharper type.
......@@ -2666,7 +2764,9 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
uncommon_trap(Deoptimization::Reason_class_check,
Deoptimization::Action_maybe_recompile);
}
replace_in_map(not_null_obj, exact_obj);
if (safe_for_replace) {
replace_in_map(not_null_obj, exact_obj);
}
return exact_obj;
}
// assert(ssc == SSC_always_true)... except maybe the profile lied to us.
......@@ -2675,11 +2775,59 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
return NULL;
}
/**
* Cast obj to type and emit guard unless we had too many traps here
* already
*
* @param obj node being casted
* @param type type to cast the node to
* @param not_null true if we know node cannot be null
*/
Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
ciKlass* type,
bool not_null) {
// type == NULL if profiling tells us this object is always null
if (type != NULL) {
if (!too_many_traps(Deoptimization::Reason_null_check) &&
!too_many_traps(Deoptimization::Reason_class_check)) {
Node* not_null_obj = NULL;
// not_null is true if we know the object is not null and
// there's no need for a null check
if (!not_null) {
Node* null_ctl = top();
not_null_obj = null_check_oop(obj, &null_ctl, true, true);
assert(null_ctl->is_top(), "no null control here");
} else {
not_null_obj = obj;
}
Node* exact_obj = not_null_obj;
ciKlass* exact_kls = type;
Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
&exact_obj);
{
PreserveJVMState pjvms(this);
set_control(slow_ctl);
uncommon_trap(Deoptimization::Reason_class_check,
Deoptimization::Action_maybe_recompile);
}
replace_in_map(not_null_obj, exact_obj);
obj = exact_obj;
}
} else {
if (!too_many_traps(Deoptimization::Reason_null_assert)) {
Node* exact_obj = null_assert(obj);
replace_in_map(obj, exact_obj);
obj = exact_obj;
}
}
return obj;
}
//-------------------------------gen_instanceof--------------------------------
// Generate an instance-of idiom. Used by both the instance-of bytecode
// and the reflective instance-of call.
Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
kill_dead_locals(); // Benefit all the uncommon traps
assert( !stopped(), "dead parse path should be checked in callers" );
assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
......@@ -2692,10 +2840,8 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
C->set_has_split_ifs(true); // Has chance for split-if optimization
ciProfileData* data = NULL;
bool safe_for_replace = false;
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
data = method()->method_data()->bci_to_data(bci());
safe_for_replace = true;
}
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
&& seems_never_null(obj, data));
......@@ -2719,14 +2865,37 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
phi ->del_req(_null_path);
}
if (ProfileDynamicTypes && data != NULL) {
Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL);
if (stopped()) { // Profile disagrees with this path.
set_control(null_ctl); // Null is the only remaining possibility.
return intcon(0);
// Do we know the type check always succeed?
bool known_statically = false;
if (_gvn.type(superklass)->singleton()) {
ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
if (subk != NULL && subk->is_loaded()) {
int static_res = static_subtype_check(superk, subk);
known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
}
}
if (known_statically && UseTypeSpeculation) {
// If we know the type check always succeed then we don't use the
// profiling data at this bytecode. Don't lose it, feed it to the
// type system as a speculative type.
not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
} else {
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
// We may not have profiling here or it may not help us. If we
// have a speculative type use it to perform an exact cast.
ciKlass* spec_obj_type = obj_type->speculative_type();
if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
if (stopped()) { // Profile disagrees with this path.
set_control(null_ctl); // Null is the only remaining possibility.
return intcon(0);
}
if (cast_obj != NULL) {
not_null_obj = cast_obj;
}
}
if (cast_obj != NULL)
not_null_obj = cast_obj;
}
// Load the object's klass
......@@ -2773,7 +2942,10 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
if (objtp != NULL && objtp->klass() != NULL) {
switch (static_subtype_check(tk->klass(), objtp->klass())) {
case SSC_always_true:
return obj;
// If we know the type check always succeed then we don't use
// the profiling data at this bytecode. Don't lose it, feed it
// to the type system as a speculative type.
return record_profiled_receiver_for_speculation(obj);
case SSC_always_false:
// It needs a null check because a null will *pass* the cast check.
// A non-null value will always produce an exception.
......@@ -2822,12 +2994,17 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
}
Node* cast_obj = NULL;
if (data != NULL &&
// Counter has never been decremented (due to cast failure).
// ...This is a reasonable thing to expect. It is true of
// all casts inserted by javac to implement generic types.
data->as_CounterData()->count() >= 0) {
cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
// We may not have profiling here or it may not help us. If we have
// a speculative type use it to perform an exact cast.
ciKlass* spec_obj_type = obj_type->speculative_type();
if (spec_obj_type != NULL ||
(data != NULL &&
// Counter has never been decremented (due to cast failure).
// ...This is a reasonable thing to expect. It is true of
// all casts inserted by javac to implement generic types.
data->as_CounterData()->count() >= 0)) {
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
if (cast_obj != NULL) {
if (failure_control != NULL) // failure is now impossible
(*failure_control) = top();
......
......@@ -386,10 +386,33 @@ class GraphKit : public Phase {
// Check the null_seen bit.
bool seems_never_null(Node* obj, ciProfileData* data);
// Check for unique class for receiver at call
ciKlass* profile_has_unique_klass() {
ciCallProfile profile = method()->call_profile_at_bci(bci());
if (profile.count() >= 0 && // no cast failures here
profile.has_receiver(0) &&
profile.morphism() == 1) {
return profile.receiver(0);
}
return NULL;
}
// record type from profiling with the type system
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
Node* record_profiled_receiver_for_speculation(Node* n);
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
void record_profiled_parameters_for_speculation();
// Use the type profile to narrow an object type.
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
ciProfileData* data,
ciKlass* require_klass);
ciKlass* require_klass,
ciKlass* spec,
bool safe_for_replace);
// Cast obj to type and emit guard unless we had too many traps here already
Node* maybe_cast_profiled_obj(Node* obj,
ciKlass* type,
bool not_null = false);
// Cast obj to not-null on this path
Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
......@@ -775,7 +798,7 @@ class GraphKit : public Phase {
// Generate an instance-of idiom. Used by both the instance-of bytecode
// and the reflective instance-of call.
Node* gen_instanceof( Node *subobj, Node* superkls );
Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
// Generate a check-cast idiom. Used by both the check-cast bytecode
// and the array-store bytecode
......
......@@ -3353,6 +3353,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
// If kls is null, we have a primitive mirror.
phi->init_req(_prim_path, prim_return_value);
if (stopped()) { set_result(region, phi); return true; }
bool safe_for_replace = (region->in(_prim_path) == top());
Node* p; // handy temp
Node* null_ctl;
......@@ -3363,7 +3364,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
switch (id) {
case vmIntrinsics::_isInstance:
// nothing is an instance of a primitive type
query_value = gen_instanceof(obj, kls);
query_value = gen_instanceof(obj, kls, safe_for_replace);
break;
case vmIntrinsics::_getModifiers:
......@@ -4553,8 +4554,62 @@ bool LibraryCallKit::inline_arraycopy() {
const Type* dest_type = dest->Value(&_gvn);
const TypeAryPtr* top_src = src_type->isa_aryptr();
const TypeAryPtr* top_dest = dest_type->isa_aryptr();
if (top_src == NULL || top_src->klass() == NULL ||
top_dest == NULL || top_dest->klass() == NULL) {
// Do we have the type of src?
bool has_src = (top_src != NULL && top_src->klass() != NULL);
// Do we have the type of dest?
bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
// Is the type for src from speculation?
bool src_spec = false;
// Is the type for dest from speculation?
bool dest_spec = false;
if (!has_src || !has_dest) {
// We don't have sufficient type information, let's see if
// speculative types can help. We need to have types for both src
// and dest so that it pays off.
// Do we already have or could we have type information for src
bool could_have_src = has_src;
// Do we already have or could we have type information for dest
bool could_have_dest = has_dest;
ciKlass* src_k = NULL;
if (!has_src) {
src_k = src_type->speculative_type();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
}
ciKlass* dest_k = NULL;
if (!has_dest) {
dest_k = dest_type->speculative_type();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}
}
if (could_have_src && could_have_dest) {
// This is going to pay off so emit the required guards
if (!has_src) {
src = maybe_cast_profiled_obj(src, src_k);
src_type = _gvn.type(src);
top_src = src_type->isa_aryptr();
has_src = (top_src != NULL && top_src->klass() != NULL);
src_spec = true;
}
if (!has_dest) {
dest = maybe_cast_profiled_obj(dest, dest_k);
dest_type = _gvn.type(dest);
top_dest = dest_type->isa_aryptr();
has_dest = (top_dest != NULL && top_dest->klass() != NULL);
dest_spec = true;
}
}
}
if (!has_src || !has_dest) {
// Conservatively insert a memory barrier on all memory slices.
// Do not let writes into the source float below the arraycopy.
insert_mem_bar(Op_MemBarCPUOrder);
......@@ -4589,6 +4644,40 @@ bool LibraryCallKit::inline_arraycopy() {
return true;
}
if (src_elem == T_OBJECT) {
// If both arrays are object arrays then having the exact types
// for both will remove the need for a subtype check at runtime
// before the call and may make it possible to pick a faster copy
// routine (without a subtype check on every element)
// Do we have the exact type of src?
bool could_have_src = src_spec;
// Do we have the exact type of dest?
bool could_have_dest = dest_spec;
ciKlass* src_k = top_src->klass();
ciKlass* dest_k = top_dest->klass();
if (!src_spec) {
src_k = src_type->speculative_type();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
}
if (!dest_spec) {
dest_k = dest_type->speculative_type();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}
}
if (could_have_src && could_have_dest) {
// If we can have both exact types, emit the missing guards
if (could_have_src && !src_spec) {
src = maybe_cast_profiled_obj(src, src_k);
}
if (could_have_dest && !dest_spec) {
dest = maybe_cast_profiled_obj(dest, dest_k);
}
}
}
//---------------------------------------------------------------------------
// We will make a fast path for this call to arraycopy.
......
......@@ -607,6 +607,9 @@ class Parse : public GraphKit {
// Assumes that there is no applicable local handler.
void throw_to_exit(SafePointNode* ex_map);
// Use speculative type to optimize CmpP node
Node* optimize_cmp_with_klass(Node* c);
public:
#ifndef PRODUCT
// Handle PrintOpto, etc.
......
......@@ -1102,6 +1102,10 @@ void Parse::do_method_entry() {
_synch_lock = shared_lock(lock_obj);
}
// Feed profiling data for parameters to the type system so it can
// propagate it as speculative types
record_profiled_parameters_for_speculation();
if (depth() == 1) {
increment_and_test_invocation_counter(Tier2CompileThreshold);
}
......
......@@ -1366,6 +1366,56 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
}
}
/**
* Use speculative type to optimize CmpP node: if comparison is
* against the low level class, cast the object to the speculative
* type if any. CmpP should then go away.
*
* @param c expected CmpP node
* @return result of CmpP on object casted to speculative type
*
*/
Node* Parse::optimize_cmp_with_klass(Node* c) {
// If this is transformed by the _gvn to a comparison with the low
// level klass then we may be able to use speculation
if (c->Opcode() == Op_CmpP &&
(c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
c->in(2)->is_Con()) {
Node* load_klass = NULL;
Node* decode = NULL;
if (c->in(1)->Opcode() == Op_DecodeNKlass) {
decode = c->in(1);
load_klass = c->in(1)->in(1);
} else {
load_klass = c->in(1);
}
if (load_klass->in(2)->is_AddP()) {
Node* addp = load_klass->in(2);
Node* obj = addp->in(AddPNode::Address);
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
if (obj_type->speculative_type() != NULL) {
ciKlass* k = obj_type->speculative_type();
inc_sp(2);
obj = maybe_cast_profiled_obj(obj, k);
dec_sp(2);
// Make the CmpP use the casted obj
addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
load_klass = load_klass->clone();
load_klass->set_req(2, addp);
load_klass = _gvn.transform(load_klass);
if (decode != NULL) {
decode = decode->clone();
decode->set_req(1, load_klass);
load_klass = _gvn.transform(decode);
}
c = c->clone();
c->set_req(1, load_klass);
c = _gvn.transform(c);
}
}
}
return c;
}
//------------------------------do_one_bytecode--------------------------------
// Parse this bytecode, and alter the Parsers JVM->Node mapping
......@@ -2239,6 +2289,7 @@ void Parse::do_one_bytecode() {
a = pop();
b = pop();
c = _gvn.transform( new (C) CmpPNode(b, a) );
c = optimize_cmp_with_klass(c);
do_if(btest, c);
break;
......
......@@ -128,7 +128,7 @@ void Parse::do_instanceof() {
}
// Push the bool result back on stack
Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)));
Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
// Pop from stack AFTER gen_instanceof because it can uncommon trap.
pop();
......
......@@ -1385,6 +1385,20 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
}
}
/**
* Remove the speculative part of all types that we know of
*/
void PhaseIterGVN::remove_speculative_types() {
assert(UseTypeSpeculation, "speculation is off");
for (uint i = 0; i < _types.Size(); i++) {
const Type* t = _types.fast_lookup(i);
if (t != NULL && t->isa_oopptr()) {
const TypeOopPtr* to = t->is_oopptr();
_types.map(i, to->remove_speculative());
}
}
}
//=============================================================================
#ifndef PRODUCT
uint PhaseCCP::_total_invokes = 0;
......
......@@ -500,6 +500,8 @@ public:
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason);
void remove_speculative_types();
#ifndef PRODUCT
protected:
// Sub-quadratic implementation of VerifyIterativeGVN.
......
此差异已折叠。
......@@ -159,6 +159,11 @@ private:
// Table for efficient dualing of base types
static const TYPES dual_type[lastype];
#ifdef ASSERT
// One type is interface, the other is oop
virtual bool interface_vs_oop_helper(const Type *t) const;
#endif
protected:
// Each class of type is also identified by its base.
const TYPES _base; // Enum of Types type
......@@ -376,6 +381,9 @@ public:
bool require_constant = false,
bool is_autobox_cache = false);
// Speculative type. See TypeInstPtr
virtual ciKlass* speculative_type() const { return NULL; }
private:
// support arrays
static const BasicType _basic_type[];
......@@ -784,7 +792,7 @@ public:
// Some kind of oop (Java pointer), either klass or instance or array.
class TypeOopPtr : public TypePtr {
protected:
TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id );
TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -810,11 +818,27 @@ protected:
// This is the the node index of the allocation node creating this instance.
int _instance_id;
// Extra type information profiling gave us. We propagate it the
// same way the rest of the type info is propagated. If we want to
// use it, then we have to emit a guard: this part of the type is
// not something we know but something we speculate about the type.
const TypeOopPtr* _speculative;
static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact);
int dual_instance_id() const;
int meet_instance_id(int uid) const;
// utility methods to work on the speculative part of the type
const TypeOopPtr* dual_speculative() const;
const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
bool eq_speculative(const TypeOopPtr* other) const;
int hash_speculative() const;
const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
#ifndef PRODUCT
void dump_speculative(outputStream *st) const;
#endif
public:
// Creates a type given a klass. Correctly handles multi-dimensional arrays
// Respects UseUniqueSubclasses.
......@@ -841,7 +865,7 @@ public:
bool not_null_elements = false);
// Make a generic (unclassed) pointer to an oop.
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id);
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative);
ciObject* const_oop() const { return _const_oop; }
virtual ciKlass* klass() const { return _klass; }
......@@ -855,6 +879,7 @@ public:
bool is_known_instance() const { return _instance_id > 0; }
int instance_id() const { return _instance_id; }
bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
const TypeOopPtr* speculative() const { return _speculative; }
virtual intptr_t get_con() const;
......@@ -868,9 +893,13 @@ public:
const TypeKlassPtr* as_klass_type() const;
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xmeet(const Type *t) const;
virtual const Type *xdual() const; // Compute dual right now.
// the core of the computation of the meet for TypeOopPtr and for its subclasses
virtual const Type *xmeet_helper(const Type *t) const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
......@@ -880,13 +909,24 @@ public:
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
#endif
// Return the speculative type if any
ciKlass* speculative_type() const {
if (_speculative != NULL) {
const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
if (speculative->klass_is_exact()) {
return speculative->klass();
}
}
return NULL;
}
};
//------------------------------TypeInstPtr------------------------------------
// Class of Java object pointers, pointing either to non-array Java instances
// or to a Klass* (including array klasses).
class TypeInstPtr : public TypeOopPtr {
TypeInstPtr( PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id );
TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -899,30 +939,30 @@ class TypeInstPtr : public TypeOopPtr {
// Make a pointer to a constant oop.
static const TypeInstPtr *make(ciObject* o) {
return make(TypePtr::Constant, o->klass(), true, o, 0);
return make(TypePtr::Constant, o->klass(), true, o, 0, InstanceBot);
}
// Make a pointer to a constant oop with offset.
static const TypeInstPtr *make(ciObject* o, int offset) {
return make(TypePtr::Constant, o->klass(), true, o, offset);
return make(TypePtr::Constant, o->klass(), true, o, offset, InstanceBot);
}
// Make a pointer to some value of type klass.
static const TypeInstPtr *make(PTR ptr, ciKlass* klass) {
return make(ptr, klass, false, NULL, 0);
return make(ptr, klass, false, NULL, 0, InstanceBot);
}
// Make a pointer to some non-polymorphic value of exactly type klass.
static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) {
return make(ptr, klass, true, NULL, 0);
return make(ptr, klass, true, NULL, 0, InstanceBot);
}
// Make a pointer to some value of type klass with offset.
static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) {
return make(ptr, klass, false, NULL, offset);
return make(ptr, klass, false, NULL, offset, InstanceBot);
}
// Make a pointer to an oop.
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot );
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
/** Create constant type for a constant boxed value */
const Type* get_const_boxed_value() const;
......@@ -939,8 +979,11 @@ class TypeInstPtr : public TypeOopPtr {
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type *xmeet( const Type *t ) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
......@@ -959,8 +1002,8 @@ class TypeInstPtr : public TypeOopPtr {
// Class of Java array pointers
class TypeAryPtr : public TypeOopPtr {
TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
int offset, int instance_id, bool is_autobox_cache )
: TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id),
int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative)
: TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative),
_ary(ary),
_is_autobox_cache(is_autobox_cache)
{
......@@ -998,9 +1041,9 @@ public:
bool is_autobox_cache() const { return _is_autobox_cache; }
static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
// Constant pointer to array
static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false);
static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, bool is_autobox_cache = false);
// Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const;
......@@ -1014,8 +1057,11 @@ public:
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type *xmeet( const Type *t ) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
virtual const Type *xdual() const; // Compute dual right now.
const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
......
......@@ -3721,6 +3721,14 @@ jint Arguments::apply_ergo() {
// incremental inlining: bump MaxNodeLimit
FLAG_SET_DEFAULT(MaxNodeLimit, (intx)75000);
}
if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
// nothing to use the profiling, turn if off
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
}
if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) {
// Doing the replace in parent maps helps speculation
FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
}
#endif
if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8024070
* @summary Test that type speculation doesn't cause incorrect execution
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 TypeSpeculation
*
*/
public class TypeSpeculation {
interface I {
}
static class A {
int m() {
return 1;
}
}
static class B extends A implements I {
int m() {
return 2;
}
}
static class C extends B {
int m() {
return 3;
}
}
static int test1_invokevirtual(A a) {
return a.m();
}
static int test1_1(A a) {
return test1_invokevirtual(a);
}
static boolean test1() {
A a = new A();
B b = new B();
C c = new C();
// pollute profile at test1_invokevirtual to make sure the
// compiler cannot rely on it
for (int i = 0; i < 5000; i++) {
test1_invokevirtual(a);
test1_invokevirtual(b);
test1_invokevirtual(c);
}
// profiling + speculation should make test1_invokevirtual
// inline A.m() with a guard
for (int i = 0; i < 20000; i++) {
int res = test1_1(b);
if (res != b.m()) {
System.out.println("test1 failed with class B");
return false;
}
}
// check that the guard works as expected by passing a
// different type
int res = test1_1(a);
if (res != a.m()) {
System.out.println("test1 failed with class A");
return false;
}
return true;
}
static int test2_invokevirtual(A a) {
return a.m();
}
static int test2_1(A a, boolean t) {
A aa;
if (t) {
aa = (B)a;
} else {
aa = a;
}
// if a of type B is passed to test2_1, the static type of aa
// here is no better than A but the profiled type is B so this
// should inline
return test2_invokevirtual(aa);
}
static boolean test2() {
A a = new A();
B b = new B();
C c = new C();
// pollute profile at test2_invokevirtual to make sure the
// compiler cannot rely on it
for (int i = 0; i < 5000; i++) {
test2_invokevirtual(a);
test2_invokevirtual(b);
test2_invokevirtual(c);
}
// profiling + speculation should make test2_invokevirtual
// inline A.m() with a guard
for (int i = 0; i < 20000; i++) {
int res = test2_1(b, (i % 2) == 0);
if (res != b.m()) {
System.out.println("test2 failed with class B");
return false;
}
}
// check that the guard works as expected by passing a
// different type
int res = test2_1(a, false);
if (res != a.m()) {
System.out.println("test2 failed with class A");
return false;
}
return true;
}
static int test3_invokevirtual(A a) {
return a.m();
}
static void test3_2(A a) {
}
static int test3_1(A a, int i) {
if (i == 0) {
return 0;
}
// If we come here and a is of type B but parameter profiling
// is polluted, both branches of the if below should have
// profiling that tell us and inlining of the virtual call
// should happen
if (i == 1) {
test3_2(a);
} else {
test3_2(a);
}
return test3_invokevirtual(a);
}
static boolean test3() {
A a = new A();
B b = new B();
C c = new C();
// pollute profile at test3_invokevirtual and test3_1 to make
// sure the compiler cannot rely on it
for (int i = 0; i < 3000; i++) {
test3_invokevirtual(a);
test3_invokevirtual(b);
test3_invokevirtual(c);
test3_1(a, 0);
test3_1(b, 0);
}
// profiling + speculation should make test3_invokevirtual
// inline A.m() with a guard
for (int i = 0; i < 20000; i++) {
int res = test3_1(b, (i % 2) + 1);
if (res != b.m()) {
System.out.println("test3 failed with class B");
return false;
}
}
// check that the guard works as expected by passing a
// different type
int res = test3_1(a, 1);
if (res != a.m()) {
System.out.println("test3 failed with class A");
return false;
}
return true;
}
// Mix 2 incompatible profiled types
static int test4_invokevirtual(A a) {
return a.m();
}
static void test4_2(A a) {
}
static int test4_1(A a, boolean b) {
if (b) {
test4_2(a);
} else {
test4_2(a);
}
// shouldn't inline
return test4_invokevirtual(a);
}
static boolean test4() {
A a = new A();
B b = new B();
C c = new C();
// pollute profile at test3_invokevirtual and test3_1 to make
// sure the compiler cannot rely on it
for (int i = 0; i < 3000; i++) {
test4_invokevirtual(a);
test4_invokevirtual(b);
test4_invokevirtual(c);
}
for (int i = 0; i < 20000; i++) {
if ((i % 2) == 0) {
int res = test4_1(a, true);
if (res != a.m()) {
System.out.println("test4 failed with class A");
return false;
}
} else {
int res = test4_1(b, false);
if (res != b.m()) {
System.out.println("test4 failed with class B");
return false;
}
}
}
return true;
}
// Mix one profiled type with an incompatible type
static int test5_invokevirtual(A a) {
return a.m();
}
static void test5_2(A a) {
}
static int test5_1(A a, boolean b) {
if (b) {
test5_2(a);
} else {
A aa = (B)a;
}
// shouldn't inline
return test5_invokevirtual(a);
}
static boolean test5() {
A a = new A();
B b = new B();
C c = new C();
// pollute profile at test3_invokevirtual and test3_1 to make
// sure the compiler cannot rely on it
for (int i = 0; i < 3000; i++) {
test5_invokevirtual(a);
test5_invokevirtual(b);
test5_invokevirtual(c);
}
for (int i = 0; i < 20000; i++) {
if ((i % 2) == 0) {
int res = test5_1(a, true);
if (res != a.m()) {
System.out.println("test5 failed with class A");
return false;
}
} else {
int res = test5_1(b, false);
if (res != b.m()) {
System.out.println("test5 failed with class B");
return false;
}
}
}
return true;
}
// Mix incompatible profiled types
static void test6_2(Object o) {
}
static Object test6_1(Object o, boolean b) {
if (b) {
test6_2(o);
} else {
test6_2(o);
}
return o;
}
static boolean test6() {
A a = new A();
A[] aa = new A[10];
for (int i = 0; i < 20000; i++) {
if ((i % 2) == 0) {
test6_1(a, true);
} else {
test6_1(aa, false);
}
}
return true;
}
// Mix a profiled type with an incompatible type
static void test7_2(Object o) {
}
static Object test7_1(Object o, boolean b) {
if (b) {
test7_2(o);
} else {
Object oo = (A[])o;
}
return o;
}
static boolean test7() {
A a = new A();
A[] aa = new A[10];
for (int i = 0; i < 20000; i++) {
if ((i % 2) == 0) {
test7_1(a, true);
} else {
test7_1(aa, false);
}
}
return true;
}
// Mix a profiled type with an interface
static void test8_2(Object o) {
}
static I test8_1(Object o) {
test8_2(o);
return (I)o;
}
static boolean test8() {
A a = new A();
B b = new B();
C c = new C();
for (int i = 0; i < 20000; i++) {
test8_1(b);
}
return true;
}
// Mix a profiled type with a constant
static void test9_2(Object o) {
}
static Object test9_1(Object o, boolean b) {
Object oo;
if (b) {
test9_2(o);
oo = o;
} else {
oo = "some string";
}
return oo;
}
static boolean test9() {
A a = new A();
for (int i = 0; i < 20000; i++) {
if ((i % 2) == 0) {
test9_1(a, true);
} else {
test9_1(a, false);
}
}
return true;
}
static public void main(String[] args) {
boolean success = true;
success = test1() && success;
success = test2() && success;
success = test3() && success;
success = test4() && success;
success = test5() && success;
success = test6() && success;
success = test7() && success;
success = test8() && success;
success = test9() && success;
if (success) {
System.out.println("TEST PASSED");
} else {
throw new RuntimeException("TEST FAILED: erroneous bound check elimination");
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册