提交 4e427252 编写于 作者: R roland

8077504: Unsafe load can loose control dependency and cause crash

Summary: Node::depends_only_on_test() should return false for Unsafe loads
Reviewed-by: kvn, adinn
上级 283c2515
...@@ -1452,18 +1452,18 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { ...@@ -1452,18 +1452,18 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
// factory methods in "int adr_idx" // factory methods in "int adr_idx"
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx)); debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx); Node* mem = memory(adr_idx);
Node* ld; Node* ld;
if (require_atomic_access && bt == T_LONG) { if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo); ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
} else if (require_atomic_access && bt == T_DOUBLE) { } else if (require_atomic_access && bt == T_DOUBLE) {
ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo); ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
} else { } else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo); ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
} }
ld = _gvn.transform(ld); ld = _gvn.transform(ld);
if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
......
...@@ -516,21 +516,24 @@ class GraphKit : public Phase { ...@@ -516,21 +516,24 @@ class GraphKit : public Phase {
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
// of volatile fields. // of volatile fields.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
MemNode::MemOrd mo, bool require_atomic_access = false) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) {
// This version computes alias_index from bottom_type // This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
mo, require_atomic_access); mo, control_dependency, require_atomic_access);
} }
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
MemNode::MemOrd mo, bool require_atomic_access = false) { MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) {
// This version computes alias_index from an address type // This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory"); assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, require_atomic_access); mo, control_dependency, require_atomic_access);
} }
// This is the base version which is given an alias index. // This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access = false); MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false);
// Create & transform a StoreNode and store the effect into the // Create & transform a StoreNode and store the effect into the
// parser's memory state. // parser's memory state.
......
...@@ -2669,7 +2669,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas ...@@ -2669,7 +2669,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
if (!is_store) { if (!is_store) {
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile); // To be valid, unsafe loads may depend on other conditions than
// the one that guards them: pin the Load node
Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
// load value // load value
switch (type) { switch (type) {
case T_BOOLEAN: case T_BOOLEAN:
...@@ -6038,7 +6040,7 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field ...@@ -6038,7 +6040,7 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field
} }
// Build the load. // Build the load.
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
// If reference is volatile, prevent following memory ops from // If reference is volatile, prevent following memory ops from
// floating up past the volatile read. Also prevents commoning // floating up past the volatile read. Also prevents commoning
// another volatile read. // another volatile read.
......
...@@ -438,7 +438,13 @@ class Invariance : public StackObj { ...@@ -438,7 +438,13 @@ class Invariance : public StackObj {
} }
} }
if (all_inputs_invariant) { if (all_inputs_invariant) {
_invariant.set(n->_idx); // I am a invariant too // If n's control is a predicate that was moved out of the
// loop, it was marked invariant but n is only invariant if
// it depends only on that test. Otherwise, unless that test
// is out of the loop, it's not invariant.
if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
_invariant.set(n->_idx); // I am a invariant too
}
} }
} else { // process next input } else { // process next input
_stack.set_index(idx + 1); _stack.set_index(idx + 1);
......
...@@ -840,7 +840,7 @@ void Matcher::init_spill_mask( Node *ret ) { ...@@ -840,7 +840,7 @@ void Matcher::init_spill_mask( Node *ret ) {
MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
#endif #endif
MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered)); MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false)); MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest,false));
MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered)); MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered)); MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
......
...@@ -878,6 +878,9 @@ void LoadNode::dump_spec(outputStream *st) const { ...@@ -878,6 +878,9 @@ void LoadNode::dump_spec(outputStream *st) const {
// standard dump does this in Verbose and WizardMode // standard dump does this in Verbose and WizardMode
st->print(" #"); _type->dump_on(st); st->print(" #"); _type->dump_on(st);
} }
if (!_depends_only_on_test) {
st->print(" (does not depend only on test)");
}
} }
#endif #endif
...@@ -894,7 +897,7 @@ bool LoadNode::is_immutable_value(Node* adr) { ...@@ -894,7 +897,7 @@ bool LoadNode::is_immutable_value(Node* adr) {
//----------------------------LoadNode::make----------------------------------- //----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method: // Polymorphic factory method:
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) { Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, ControlDependency control_dependency) {
Compile* C = gvn.C; Compile* C = gvn.C;
// sanity check the alias category against the created node type // sanity check the alias category against the created node type
...@@ -910,39 +913,39 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP ...@@ -910,39 +913,39 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
rt->isa_oopptr() || is_immutable_value(adr), rt->isa_oopptr() || is_immutable_value(adr),
"raw memory operations should have control edge"); "raw memory operations should have control edge");
switch (bt) { switch (bt) {
case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo); case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo); case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo); case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo); case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency);
case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo); case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo); case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo); case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
case T_OBJECT: case T_OBJECT:
#ifdef _LP64 #ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) { if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo)); Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency));
return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
} else } else
#endif #endif
{ {
assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo); return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo, control_dependency);
} }
} }
ShouldNotReachHere(); ShouldNotReachHere();
return (LoadNode*)NULL; return (LoadNode*)NULL;
} }
LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
bool require_atomic = true; bool require_atomic = true;
return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic); return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
} }
LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
bool require_atomic = true; bool require_atomic = true;
return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic); return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
} }
......
...@@ -138,7 +138,33 @@ public: ...@@ -138,7 +138,33 @@ public:
//------------------------------LoadNode--------------------------------------- //------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address // Load value; requires Memory and Address
class LoadNode : public MemNode { class LoadNode : public MemNode {
public:
// Some loads (from unsafe) should be pinned: they don't depend only
// on the dominating test. The boolean field _depends_only_on_test
// below records whether that node depends only on the dominating
// test.
// Methods used to build LoadNodes pass an argument of type enum
// ControlDependency instead of a boolean because those methods
// typically have multiple boolean parameters with default values:
// passing the wrong boolean to one of these parameters by mistake
// goes easily unnoticed. Using an enum, the compiler can check that
// the type of a value and the type of the parameter match.
enum ControlDependency {
Pinned,
DependsOnlyOnTest
};
private: private:
// LoadNode::hash() doesn't take the _depends_only_on_test field
// into account: If the graph already has a non-pinned LoadNode and
// we add a pinned LoadNode with the same inputs, it's safe for GVN
// to replace the pinned LoadNode with the non-pinned LoadNode,
// otherwise it wouldn't be safe to have a non pinned LoadNode with
// those inputs in the first place. If the graph already has a
// pinned LoadNode and we add a non pinned LoadNode with the same
// inputs, it's safe (but suboptimal) for GVN to replace the
// non-pinned LoadNode by the pinned LoadNode.
bool _depends_only_on_test;
// On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
// loads that can be reordered, and such requiring acquire semantics to // loads that can be reordered, and such requiring acquire semantics to
// adhere to the Java specification. The required behaviour is stored in // adhere to the Java specification. The required behaviour is stored in
...@@ -153,8 +179,8 @@ protected: ...@@ -153,8 +179,8 @@ protected:
const Type* const _type; // What kind of value is loaded? const Type* const _type; // What kind of value is loaded?
public: public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo) LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
: MemNode(c,mem,adr,at), _type(rt), _mo(mo) { : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
init_class_id(Class_Load); init_class_id(Class_Load);
} }
inline bool is_unordered() const { return !is_acquire(); } inline bool is_unordered() const { return !is_acquire(); }
...@@ -165,7 +191,8 @@ public: ...@@ -165,7 +191,8 @@ public:
// Polymorphic factory method: // Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo); const TypePtr* at, const Type *rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
virtual uint hash() const; // Check the type virtual uint hash() const; // Check the type
...@@ -233,16 +260,15 @@ protected: ...@@ -233,16 +260,15 @@ protected:
// which produce results (new raw memory state) inside of loops preventing all // which produce results (new raw memory state) inside of loops preventing all
// manner of other optimizations). Basically, it's ugly but so is the alternative. // manner of other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common(). // See comment in macro.cpp, around line 125 expand_allocate_common().
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
}; };
//------------------------------LoadBNode-------------------------------------- //------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory // Load a byte (8bits signed) from memory
class LoadBNode : public LoadNode { class LoadBNode : public LoadNode {
public: public:
LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
...@@ -255,8 +281,8 @@ public: ...@@ -255,8 +281,8 @@ public:
// Load a unsigned byte (8bits unsigned) from memory // Load a unsigned byte (8bits unsigned) from memory
class LoadUBNode : public LoadNode { class LoadUBNode : public LoadNode {
public: public:
LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo) LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
...@@ -269,8 +295,8 @@ public: ...@@ -269,8 +295,8 @@ public:
// Load an unsigned short/char (16bits unsigned) from memory // Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode { class LoadUSNode : public LoadNode {
public: public:
LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
...@@ -283,8 +309,8 @@ public: ...@@ -283,8 +309,8 @@ public:
// Load a short (16bits signed) from memory // Load a short (16bits signed) from memory
class LoadSNode : public LoadNode { class LoadSNode : public LoadNode {
public: public:
LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
...@@ -297,8 +323,8 @@ public: ...@@ -297,8 +323,8 @@ public:
// Load an integer from memory // Load an integer from memory
class LoadINode : public LoadNode { class LoadINode : public LoadNode {
public: public:
LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo) {} : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; } virtual uint ideal_reg() const { return Op_RegI; }
virtual int store_Opcode() const { return Op_StoreI; } virtual int store_Opcode() const { return Op_StoreI; }
...@@ -330,15 +356,15 @@ class LoadLNode : public LoadNode { ...@@ -330,15 +356,15 @@ class LoadLNode : public LoadNode {
public: public:
LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
MemOrd mo, bool require_atomic_access = false) MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {} : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegL; } virtual uint ideal_reg() const { return Op_RegL; }
virtual int store_Opcode() const { return Op_StoreL; } virtual int store_Opcode() const { return Op_StoreL; }
virtual BasicType memory_type() const { return T_LONG; } virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; } bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo); const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const { virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st); LoadNode::dump_spec(st);
...@@ -351,8 +377,8 @@ public: ...@@ -351,8 +377,8 @@ public:
// Load a long from unaligned memory // Load a long from unaligned memory
class LoadL_unalignedNode : public LoadLNode { class LoadL_unalignedNode : public LoadLNode {
public: public:
LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {} : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
}; };
...@@ -360,8 +386,8 @@ public: ...@@ -360,8 +386,8 @@ public:
// Load a float (64 bits) from memory // Load a float (64 bits) from memory
class LoadFNode : public LoadNode { class LoadFNode : public LoadNode {
public: public:
LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo) LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegF; } virtual uint ideal_reg() const { return Op_RegF; }
virtual int store_Opcode() const { return Op_StoreF; } virtual int store_Opcode() const { return Op_StoreF; }
...@@ -381,15 +407,15 @@ class LoadDNode : public LoadNode { ...@@ -381,15 +407,15 @@ class LoadDNode : public LoadNode {
public: public:
LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
MemOrd mo, bool require_atomic_access = false) MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, t, mo), _require_atomic_access(require_atomic_access) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegD; } virtual uint ideal_reg() const { return Op_RegD; }
virtual int store_Opcode() const { return Op_StoreD; } virtual int store_Opcode() const { return Op_StoreD; }
virtual BasicType memory_type() const { return T_DOUBLE; } virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; } bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo); const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const { virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st); LoadNode::dump_spec(st);
...@@ -402,8 +428,8 @@ public: ...@@ -402,8 +428,8 @@ public:
// Load a double from unaligned memory // Load a double from unaligned memory
class LoadD_unalignedNode : public LoadDNode { class LoadD_unalignedNode : public LoadDNode {
public: public:
LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {} : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
}; };
...@@ -411,8 +437,8 @@ public: ...@@ -411,8 +437,8 @@ public:
// Load a pointer from memory (either object or array) // Load a pointer from memory (either object or array)
class LoadPNode : public LoadNode { class LoadPNode : public LoadNode {
public: public:
LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo) LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; } virtual uint ideal_reg() const { return Op_RegP; }
virtual int store_Opcode() const { return Op_StoreP; } virtual int store_Opcode() const { return Op_StoreP; }
...@@ -424,8 +450,8 @@ public: ...@@ -424,8 +450,8 @@ public:
// Load a narrow oop from memory (either object or array) // Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode { class LoadNNode : public LoadNode {
public: public:
LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo) LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo) {} : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; } virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; } virtual int store_Opcode() const { return Op_StoreN; }
......
...@@ -233,7 +233,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { ...@@ -233,7 +233,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Build the load. // Build the load.
// //
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
// Adjust Java stack // Adjust Java stack
if (type2size[bt] == 1) if (type2size[bt] == 1)
......
...@@ -1431,7 +1431,7 @@ void SuperWord::output() { ...@@ -1431,7 +1431,7 @@ void SuperWord::output() {
} }
Node* adr = low_adr->in(MemNode::Address); Node* adr = low_adr->in(MemNode::Address);
const TypePtr* atyp = n->adr_type(); const TypePtr* atyp = n->adr_type();
vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n)); vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n), control_dependency(p));
vlen_in_bytes = vn->as_LoadVector()->memory_size(); vlen_in_bytes = vn->as_LoadVector()->memory_size();
} else if (n->is_Store()) { } else if (n->is_Store()) {
// Promote value to be stored to vector // Promote value to be stored to vector
...@@ -2029,6 +2029,19 @@ Node* SuperWord::executed_last(Node_List* p) { ...@@ -2029,6 +2029,19 @@ Node* SuperWord::executed_last(Node_List* p) {
return n; return n;
} }
LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) {
LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest;
for (uint i = 0; i < p->size(); i++) {
Node* n = p->at(i);
assert(n->is_Load(), "only meaningful for loads");
if (!n->depends_only_on_test()) {
dep = LoadNode::Pinned;
}
}
return dep;
}
//----------------------------align_initial_loop_index--------------------------- //----------------------------align_initial_loop_index---------------------------
// Adjust pre-loop limit so that in main loop, a load/store reference // Adjust pre-loop limit so that in main loop, a load/store reference
// to align_to_ref will be a position zero in the vector. // to align_to_ref will be a position zero in the vector.
......
...@@ -399,6 +399,7 @@ class SuperWord : public ResourceObj { ...@@ -399,6 +399,7 @@ class SuperWord : public ResourceObj {
Node* executed_first(Node_List* p); Node* executed_first(Node_List* p);
// Return the node executed last in pack p. // Return the node executed last in pack p.
Node* executed_last(Node_List* p); Node* executed_last(Node_List* p);
static LoadNode::ControlDependency control_dependency(Node_List* p);
// Alignment within a vector memory reference // Alignment within a vector memory reference
int memory_alignment(MemNode* s, int iv_adjust); int memory_alignment(MemNode* s, int iv_adjust);
// (Start, end] half-open range defining which operands are vector // (Start, end] half-open range defining which operands are vector
......
...@@ -403,9 +403,10 @@ PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) { ...@@ -403,9 +403,10 @@ PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) {
// Return the vector version of a scalar load node. // Return the vector version of a scalar load node.
LoadVectorNode* LoadVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem, LoadVectorNode* LoadVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) { Node* adr, const TypePtr* atyp, uint vlen, BasicType bt,
ControlDependency control_dependency) {
const TypeVect* vt = TypeVect::make(bt, vlen); const TypeVect* vt = TypeVect::make(bt, vlen);
return new (C) LoadVectorNode(ctl, mem, adr, atyp, vt); return new (C) LoadVectorNode(ctl, mem, adr, atyp, vt, control_dependency);
} }
// Return the vector version of a scalar store node. // Return the vector version of a scalar store node.
......
...@@ -355,8 +355,8 @@ class XorVNode : public VectorNode { ...@@ -355,8 +355,8 @@ class XorVNode : public VectorNode {
// Load Vector from memory // Load Vector from memory
class LoadVectorNode : public LoadNode { class LoadVectorNode : public LoadNode {
public: public:
LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt) LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt, ControlDependency control_dependency = LoadNode::DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, vt, MemNode::unordered) { : LoadNode(c, mem, adr, at, vt, MemNode::unordered, control_dependency) {
init_class_id(Class_LoadVector); init_class_id(Class_LoadVector);
} }
...@@ -372,7 +372,8 @@ class LoadVectorNode : public LoadNode { ...@@ -372,7 +372,8 @@ class LoadVectorNode : public LoadNode {
virtual int store_Opcode() const { return Op_StoreVector; } virtual int store_Opcode() const { return Op_StoreVector; }
static LoadVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem, static LoadVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
Node* adr, const TypePtr* atyp, uint vlen, BasicType bt); Node* adr, const TypePtr* atyp, uint vlen, BasicType bt,
ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
}; };
//------------------------------StoreVectorNode-------------------------------- //------------------------------StoreVectorNode--------------------------------
......
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8077504
* @summary Unsafe load can loose control dependency and cause crash
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestUnsafeLoadControl
*
*/
import java.lang.reflect.Field;
import sun.misc.Unsafe;
public class TestUnsafeLoadControl {
private static final Unsafe UNSAFE;
static {
try {
Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
unsafeField.setAccessible(true);
UNSAFE = (Unsafe) unsafeField.get(null);
} catch(Exception e) {
throw new RuntimeException(e);
}
}
static int val;
static void test1(int[] a, boolean[] flags, boolean flag, long j) {
for (int i = 0; i < 10; i++) {
if (flags[i]) {
if (flag) {
long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
int v = UNSAFE.getInt(a, address);
val = v;
}
}
}
}
static int test2(int[] a, boolean[] flags, boolean flag, long j) {
int sum = 0;
for (int i = 0; i < 10; i++) {
if (flags[i]) {
if (flag) {
long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
int v = UNSAFE.getInt(a, address);
if (v == 0) {
sum++;
}
}
}
}
return sum;
}
static public void main(String[] args) {
boolean[] flags = new boolean[10];
for (int i = 0; i < flags.length; i++) {
flags[i] = true;
}
int[] array = new int[10];
for (int i = 0; i < 20000; i++) {
test1(array, flags, true, 0);
}
for (int i = 0; i < flags.length; i++) {
flags[i] = false;
}
test1(array, flags, true, Long.MAX_VALUE/4);
for (int i = 0; i < flags.length; i++) {
flags[i] = true;
}
for (int i = 0; i < 20000; i++) {
test2(array, flags, true, 0);
}
for (int i = 0; i < flags.length; i++) {
flags[i] = false;
}
test2(array, flags, true, Long.MAX_VALUE/4);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册