提交 644a4c64 编写于 作者: K kvn

8035605: Expand functionality of PredictedIntrinsicGenerator

Summary: Allow several predicates and separate intrinsic methods per one intrinsified (virtual) method.
Reviewed-by: roland
上级 b9b96976
......@@ -710,7 +710,15 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser)
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
iophi->set_req(2, slow_map->i_o());
kit.set_i_o(gvn.transform(iophi));
// Merge memory
kit.merge_memory(slow_map->merged_memory(), region, 2);
// Transform new memory Phis.
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
Node* phi = mms.memory();
if (phi->is_Phi() && phi->in(0) == region) {
mms.set_memory(gvn.transform(phi));
}
}
uint tos = kit.jvms()->stkoff() + kit.sp();
uint limit = slow_map->req();
for (uint i = TypeFunc::Parms; i < limit; i++) {
......@@ -864,15 +872,15 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
}
//------------------------PredictedIntrinsicGenerator------------------------------
// Internal class which handles all predicted Intrinsic calls.
class PredictedIntrinsicGenerator : public CallGenerator {
//------------------------PredicatedIntrinsicGenerator------------------------------
// Internal class which handles all predicated Intrinsic calls.
class PredicatedIntrinsicGenerator : public CallGenerator {
CallGenerator* _intrinsic;
CallGenerator* _cg;
public:
PredictedIntrinsicGenerator(CallGenerator* intrinsic,
CallGenerator* cg)
PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
CallGenerator* cg)
: CallGenerator(cg->method())
{
_intrinsic = intrinsic;
......@@ -887,103 +895,182 @@ public:
};
CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
CallGenerator* cg) {
return new PredictedIntrinsicGenerator(intrinsic, cg);
CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
CallGenerator* cg) {
return new PredicatedIntrinsicGenerator(intrinsic, cg);
}
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
// The code we want to generate here is:
// if (receiver == NULL)
// uncommon_Trap
// if (predicate(0))
// do_intrinsic(0)
// else
// if (predicate(1))
// do_intrinsic(1)
// ...
// else
// do_java_comp
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
CompileLog* log = kit.C->log();
if (log != NULL) {
log->elem("predicted_intrinsic bci='%d' method='%d'",
log->elem("predicated_intrinsic bci='%d' method='%d'",
jvms->bci(), log->identify(method()));
}
Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
if (!method()->is_static()) {
// We need an explicit receiver null_check before checking its type in predicate.
// We share a map with the caller, so his JVMS gets adjusted.
Node* receiver = kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
return kit.transfer_exceptions_into_jvms();
}
}
SafePointNode* slow_map = NULL;
JVMState* slow_jvms;
if (slow_ctl != NULL) {
PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
int n_predicates = _intrinsic->predicates_count();
assert(n_predicates > 0, "sanity");
JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
// Region for normal compilation code if intrinsic failed.
Node* slow_region = new (kit.C) RegionNode(1);
int results = 0;
for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
#ifdef ASSERT
JVMState* old_jvms = kit.jvms();
SafePointNode* old_map = kit.map();
Node* old_io = old_map->i_o();
Node* old_mem = old_map->memory();
Node* old_exc = old_map->next_exception();
#endif
Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
#ifdef ASSERT
// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
SafePointNode* new_map = kit.map();
assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
#endif
if (!kit.stopped()) {
slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())
slow_map = kit.stop();
PreserveJVMState pjvms(&kit);
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
if (new_jvms == NULL) {
// Intrinsic failed, use normal compilation path for this predicate.
slow_region->add_req(kit.control());
} else {
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
if (!kit.stopped()) {
result_jvms[results++] = kit.jvms();
}
}
}
if (else_ctrl == NULL) {
else_ctrl = kit.C->top();
}
kit.set_control(else_ctrl);
}
if (kit.stopped()) {
// Predicate is always false.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
if (!kit.stopped()) {
// Final 'else' after predicates.
slow_region->add_req(kit.control());
}
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
if (new_jvms == NULL) {
// Intrinsic failed, so use slow code or make a direct call.
if (slow_map == NULL) {
CallGenerator* cg = CallGenerator::for_direct_call(method());
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
} else {
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
if (slow_region->req() > 1) {
PreserveJVMState pjvms(&kit);
// Generate normal compilation code:
kit.set_control(gvn.transform(slow_region));
JVMState* new_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(new_jvms != NULL, "must be");
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
if (!kit.stopped()) {
result_jvms[results++] = kit.jvms();
}
}
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
// Need to merge slow and fast?
if (slow_map == NULL) {
// The fast path is the only path remaining.
if (results == 0) {
// All paths ended in uncommon traps.
(void) kit.stop();
return kit.transfer_exceptions_into_jvms();
}
if (kit.stopped()) {
// Intrinsic method threw an exception, so it's just the slow path after all.
kit.set_jvms(slow_jvms);
if (results == 1) { // Only one path
kit.set_jvms(result_jvms[0]);
return kit.transfer_exceptions_into_jvms();
}
// Finish the diamond.
// Merge all paths.
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
RegionNode* region = new (kit.C) RegionNode(3);
region->init_req(1, kit.control());
region->init_req(2, slow_map->control());
kit.set_control(gvn.transform(region));
RegionNode* region = new (kit.C) RegionNode(results + 1);
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
iophi->set_req(2, slow_map->i_o());
for (int i = 0; i < results; i++) {
JVMState* jvms = result_jvms[i];
int path = i + 1;
SafePointNode* map = jvms->map();
region->init_req(path, map->control());
iophi->set_req(path, map->i_o());
if (i == 0) {
kit.set_jvms(jvms);
} else {
kit.merge_memory(map->merged_memory(), region, path);
}
}
kit.set_control(gvn.transform(region));
kit.set_i_o(gvn.transform(iophi));
kit.merge_memory(slow_map->merged_memory(), region, 2);
// Transform new memory Phis.
for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
Node* phi = mms.memory();
if (phi->is_Phi() && phi->in(0) == region) {
mms.set_memory(gvn.transform(phi));
}
}
// Merge debug info.
Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
uint tos = kit.jvms()->stkoff() + kit.sp();
uint limit = slow_map->req();
Node* map = kit.map();
uint limit = map->req();
for (uint i = TypeFunc::Parms; i < limit; i++) {
// Skip unused stack slots; fast forward to monoff();
if (i == tos) {
i = kit.jvms()->monoff();
if( i >= limit ) break;
}
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
Node* n = map->in(i);
ins[0] = n;
const Type* t = gvn.type(n);
bool needs_phi = false;
for (int j = 1; j < results; j++) {
JVMState* jvms = result_jvms[j];
Node* jmap = jvms->map();
Node* m = NULL;
if (jmap->req() > i) {
m = jmap->in(i);
if (m != n) {
needs_phi = true;
t = t->meet_speculative(gvn.type(m));
}
}
ins[j] = m;
}
if (needs_phi) {
Node* phi = PhiNode::make(region, n, t);
for (int j = 1; j < results; j++) {
phi->set_req(j + 1, ins[j]);
}
map->set_req(i, gvn.transform(phi));
}
}
return kit.transfer_exceptions_into_jvms();
}
......
......@@ -63,8 +63,9 @@ class CallGenerator : public ResourceObj {
virtual bool is_virtual() const { return false; }
// is_deferred: The decision whether to inline or not is deferred.
virtual bool is_deferred() const { return false; }
// is_predicted: Uses an explicit check against a predicted type.
virtual bool is_predicted() const { return false; }
// is_predicated: Uses an explicit check (predicate).
virtual bool is_predicated() const { return false; }
virtual int predicates_count() const { return 0; }
// is_trap: Does not return to the caller. (E.g., uncommon trap.)
virtual bool is_trap() const { return false; }
// does_virtual_dispatch: Should try inlining as normal method first.
......@@ -157,9 +158,9 @@ class CallGenerator : public ResourceObj {
// Registry for intrinsics:
static CallGenerator* for_intrinsic(ciMethod* m);
static void register_intrinsic(ciMethod* m, CallGenerator* cg);
static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic,
CallGenerator* cg);
virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic,
CallGenerator* cg);
virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; };
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
......
......@@ -115,12 +115,12 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
if (allow_inline && allow_intrinsics) {
CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
if (cg != NULL) {
if (cg->is_predicted()) {
if (cg->is_predicated()) {
// Code without intrinsic but, hopefully, inlined.
CallGenerator* inline_cg = this->call_generator(callee,
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
if (inline_cg != NULL) {
cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
}
}
......
......@@ -2435,23 +2435,24 @@ void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
Node* new_slice = mms.memory2();
if (old_slice != new_slice) {
PhiNode* phi;
if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) {
phi = new_slice->as_Phi();
#ifdef ASSERT
if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region)
old_slice = old_slice->in(new_path);
// Caller is responsible for ensuring that any pre-existing
// phis are already aware of old memory.
int old_path = (new_path > 1) ? 1 : 2; // choose old_path != new_path
assert(phi->in(old_path) == old_slice, "pre-existing phis OK");
#endif
mms.set_memory(phi);
if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
if (mms.is_empty()) {
// clone base memory Phi's inputs for this memory slice
assert(old_slice == mms.base_memory(), "sanity");
phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));
_gvn.set_type(phi, Type::MEMORY);
for (uint i = 1; i < phi->req(); i++) {
phi->init_req(i, old_slice->in(i));
}
} else {
phi = old_slice->as_Phi(); // Phi was generated already
}
} else {
phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
_gvn.set_type(phi, Type::MEMORY);
phi->set_req(new_path, new_slice);
mms.set_memory(_gvn.transform(phi)); // assume it is complete
}
phi->set_req(new_path, new_slice);
mms.set_memory(phi);
}
}
}
......
......@@ -46,25 +46,28 @@ class LibraryIntrinsic : public InlineCallGenerator {
public:
private:
bool _is_virtual;
bool _is_predicted;
bool _does_virtual_dispatch;
int8_t _predicates_count; // Intrinsic is predicated by several conditions
int8_t _last_predicate; // Last generated predicate
vmIntrinsics::ID _intrinsic_id;
public:
LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
: InlineCallGenerator(m),
_is_virtual(is_virtual),
_is_predicted(is_predicted),
_does_virtual_dispatch(does_virtual_dispatch),
_predicates_count((int8_t)predicates_count),
_last_predicate((int8_t)-1),
_intrinsic_id(id)
{
}
virtual bool is_intrinsic() const { return true; }
virtual bool is_virtual() const { return _is_virtual; }
virtual bool is_predicted() const { return _is_predicted; }
virtual bool is_predicated() const { return _predicates_count > 0; }
virtual int predicates_count() const { return _predicates_count; }
virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
virtual Node* generate_predicate(JVMState* jvms);
virtual Node* generate_predicate(JVMState* jvms, int predicate);
vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
};
......@@ -107,8 +110,8 @@ class LibraryCallKit : public GraphKit {
vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
ciMethod* callee() const { return _intrinsic->method(); }
bool try_to_inline();
Node* try_to_predicate();
bool try_to_inline(int predicate);
Node* try_to_predicate(int predicate);
void push_result() {
// Push the result onto the stack.
......@@ -367,7 +370,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
}
}
bool is_predicted = false;
int predicates = 0;
bool does_virtual_dispatch = false;
switch (id) {
......@@ -508,7 +511,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
if (!UseAESIntrinsics) return NULL;
// these two require the predicated logic
is_predicted = true;
predicates = 1;
break;
case vmIntrinsics::_updateCRC32:
......@@ -577,7 +580,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
if (!InlineUnsafeOps) return NULL;
}
return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id);
}
//----------------------register_library_intrinsics-----------------------
......@@ -601,7 +604,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
const int bci = kit.bci();
// Try to inline the intrinsic.
if (kit.try_to_inline()) {
if (kit.try_to_inline(_last_predicate)) {
if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
......@@ -634,12 +637,13 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
return NULL;
}
Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
LibraryCallKit kit(jvms, this);
Compile* C = kit.C;
int nodes = C->unique();
_last_predicate = predicate;
#ifndef PRODUCT
assert(is_predicted(), "sanity");
assert(is_predicated() && predicate < predicates_count(), "sanity");
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
......@@ -649,10 +653,10 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
ciMethod* callee = kit.callee();
const int bci = kit.bci();
Node* slow_ctl = kit.try_to_predicate();
Node* slow_ctl = kit.try_to_predicate(predicate);
if (!kit.failing()) {
if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) {
......@@ -681,7 +685,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
return NULL;
}
bool LibraryCallKit::try_to_inline() {
bool LibraryCallKit::try_to_inline(int predicate) {
// Handle symbolic names for otherwise undistinguished boolean switches:
const bool is_store = true;
const bool is_native_ptr = true;
......@@ -898,7 +902,7 @@ bool LibraryCallKit::try_to_inline() {
}
}
Node* LibraryCallKit::try_to_predicate() {
Node* LibraryCallKit::try_to_predicate(int predicate) {
if (!jvms()->has_method()) {
// Root JVMState has a null method.
assert(map()->memory()->Opcode() == Op_Parm, "");
......@@ -5866,7 +5870,12 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field
BasicType bt = field->layout_type();
// Build the resultant type of the load
const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
const Type *type;
if (bt == T_OBJECT) {
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
} else {
type = Type::get_const_basic_type(bt);
}
// Build the load.
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
......@@ -5996,7 +6005,7 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
assert(tinst != NULL, "CBC obj is null");
assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
if (!klass_AESCrypt->is_loaded()) return false;
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
......@@ -6071,11 +6080,8 @@ Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescryp
// note cipher==plain is more conservative than the original java code but that's OK
//
Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
// First, check receiver for NULL since it is virtual method.
// The receiver was checked for NULL already.
Node* objCBC = argument(0);
objCBC = null_check(objCBC);
if (stopped()) return NULL; // Always NULL
// Load embeddedCipher field of CipherBlockChaining object.
Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册