提交 b4e0131b 编写于 作者: K kvn

6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")

Summary: Escape Analysis fixes.
Reviewed-by: never, rasbold
上级 98d190c3
......@@ -388,6 +388,9 @@
product(intx, EliminateAllocationArraySizeLimit, 64, \
"Array size (number of elements) limit for scalar replacement") \
\
product(intx, ValueSearchLimit, 1000, \
"Recursion limit in PhaseMacroExpand::value_from_mem_phi") \
\
product(intx, MaxLabelRootDepth, 1100, \
"Maximum times call Label_Root to prevent stack overflow") \
\
......
......@@ -585,6 +585,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
// Perform escape analysis
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction.
PhaseGVN* igvn = initial_gvn();
Node* oop_null = igvn->zerocon(T_OBJECT);
Node* noop_null = igvn->zerocon(T_NARROWOOP);
_congraph = new(comp_arena()) ConnectionGraph(this);
bool has_non_escaping_obj = _congraph->compute_escape();
......@@ -594,6 +598,12 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_congraph->dump();
}
#endif
// Cleanup.
if (oop_null->outcnt() == 0)
igvn->hash_delete(oop_null);
if (noop_null->outcnt() == 0)
igvn->hash_delete(noop_null);
if (!has_non_escaping_obj) {
_congraph = NULL;
}
......
......@@ -62,10 +62,14 @@ static const char *edge_type_suffix[] = {
"F" // FieldEdge
};
void PointsToNode::dump() const {
void PointsToNode::dump(bool print_state) const {
NodeType nt = node_type();
EscapeState es = escape_state();
tty->print("%s %s %s [[", node_type_names[(int) nt], esc_names[(int) es], _scalar_replaceable ? "" : "NSR");
tty->print("%s ", node_type_names[(int) nt]);
if (print_state) {
EscapeState es = escape_state();
tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
}
tty->print("[[");
for (uint i = 0; i < edge_count(); i++) {
tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
}
......@@ -84,11 +88,22 @@ ConnectionGraph::ConnectionGraph(Compile * C) :
_compile(C),
_node_map(C->comp_arena()) {
_phantom_object = C->top()->_idx;
PointsToNode *phn = ptnode_adr(_phantom_object);
phn->_node = C->top();
phn->set_node_type(PointsToNode::JavaObject);
phn->set_escape_state(PointsToNode::GlobalEscape);
_phantom_object = C->top()->_idx,
add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
// Add ConP(#NULL) and ConN(#NULL) nodes.
PhaseGVN* igvn = C->initial_gvn();
Node* oop_null = igvn->zerocon(T_OBJECT);
_oop_null = oop_null->_idx;
assert(_oop_null < C->unique(), "should be created already");
add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
if (UseCompressedOops) {
Node* noop_null = igvn->zerocon(T_NARROWOOP);
_noop_null = noop_null->_idx;
assert(_noop_null < C->unique(), "should be created already");
add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
}
}
void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
......@@ -500,29 +515,30 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
igvn->set_type(addp, tinst);
// record the allocation in the node map
set_map(addp->_idx, get_map(base->_idx));
// if the Address input is not the appropriate instance type
// (due to intervening casts,) insert a cast
Node *adr = addp->in(AddPNode::Address);
const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
if (atype != NULL && atype->instance_id() != inst_id) {
assert(!atype->is_known_instance(), "no conflicting instances");
const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr();
Node *acast = new (_compile, 2) CastPPNode(adr, new_atype);
acast->set_req(0, adr->in(0));
igvn->set_type(acast, new_atype);
record_for_optimizer(acast);
Node *bcast = acast;
Node *abase = addp->in(AddPNode::Base);
if (abase != adr) {
bcast = new (_compile, 2) CastPPNode(abase, base_t);
bcast->set_req(0, abase->in(0));
igvn->set_type(bcast, base_t);
record_for_optimizer(bcast);
}
igvn->hash_delete(addp);
addp->set_req(AddPNode::Base, bcast);
addp->set_req(AddPNode::Address, acast);
igvn->hash_insert(addp);
// Set addp's Base and Address to 'base'.
Node *abase = addp->in(AddPNode::Base);
Node *adr = addp->in(AddPNode::Address);
if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
adr->in(0)->_idx == (uint)inst_id) {
// Skip AddP cases #3 and #5.
} else {
assert(!abase->is_top(), "sanity"); // AddP case #3
if (abase != base) {
igvn->hash_delete(addp);
addp->set_req(AddPNode::Base, base);
if (abase == adr) {
addp->set_req(AddPNode::Address, base);
} else {
// AddP case #4 (adr is array's element offset AddP node)
#ifdef ASSERT
const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
assert(adr->is_AddP() && atype != NULL &&
atype->instance_id() == inst_id, "array's element offset should be processed first");
#endif
}
igvn->hash_insert(addp);
}
}
// Put on IGVN worklist since at least addp's type was changed above.
record_for_optimizer(addp);
......@@ -660,27 +676,31 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
Compile* C = phase->C;
const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr();
bool is_instance = (tinst != NULL) && tinst->is_known_instance();
Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
Node *prev = NULL;
Node *result = orig_mem;
while (prev != result) {
prev = result;
if (result == start_mem)
break; // hit one of our sentinals
if (result->is_Mem()) {
MemNode *mem = result->as_Mem();
const Type *at = phase->type(mem->in(MemNode::Address));
const Type *at = phase->type(result->in(MemNode::Address));
if (at != Type::TOP) {
assert (at->isa_ptr() != NULL, "pointer type required.");
int idx = C->get_alias_index(at->is_ptr());
if (idx == alias_idx)
break;
}
result = mem->in(MemNode::Memory);
result = result->in(MemNode::Memory);
}
if (!is_instance)
continue; // don't search further for non-instance types
// skip over a call which does not affect this memory slice
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0);
if (proj_in->is_Call()) {
if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) {
break; // hit one of our sentinals
} else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call();
if (!call->may_modify(tinst, phase)) {
result = call->in(TypeFunc::Memory);
......@@ -1006,12 +1026,12 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
memnode_worklist.append_if_missing(use);
} else if (use->is_MergeMem()) {
mergemem_worklist.append_if_missing(use);
} else if (use->is_Call() && tinst != NULL) {
} else if (use->is_SafePoint() && tinst != NULL) {
// Look for MergeMem nodes for calls which reference unique allocation
// (through CheckCastPP nodes) even for debug info.
Node* m = use->in(TypeFunc::Memory);
uint iid = tinst->instance_id();
while (m->is_Proj() && m->in(0)->is_Call() &&
while (m->is_Proj() && m->in(0)->is_SafePoint() &&
m->in(0) != use && !m->in(0)->_idx != iid) {
m = m->in(0)->in(TypeFunc::Memory);
}
......@@ -1348,15 +1368,95 @@ bool ConnectionGraph::compute_escape() {
remove_deferred(ni, &deferred_edges, &visited);
Node *n = ptn->_node;
if (n->is_AddP()) {
// If this AddP computes an address which may point to more that one
// object or more then one field (array's element), nothing the address
// points to can be scalar replaceable.
// Search for objects which are not scalar replaceable.
// Mark their escape state as ArgEscape to propagate the state
// to referenced objects.
// Note: currently there are no difference in compiler optimizations
// for ArgEscape objects and NoEscape objects which are not
// scalar replaceable.
int offset = ptn->offset();
Node *base = get_addp_base(n);
ptset.Clear();
PointsTo(ptset, base, igvn);
if (ptset.Size() > 1 ||
(ptset.Size() != 0 && ptn->offset() == Type::OffsetBot)) {
int ptset_size = ptset.Size();
// Check if a field's initializing value is recorded and add
// a corresponding NULL field's value if it is not recorded.
// Connection Graph does not record a default initialization by NULL
// captured by Initialize node.
//
// Note: it will disable scalar replacement in some cases:
//
// Point p[] = new Point[1];
// p[0] = new Point(); // Will be not scalar replaced
//
// but it will save us from incorrect optimizations in next cases:
//
// Point p[] = new Point[1];
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
//
// Without a control flow analysis we can't distinguish above cases.
//
if (offset != Type::OffsetBot && ptset_size == 1) {
uint elem = ptset.getelem(); // Allocation node's index
// It does not matter if it is not Allocation node since
// only non-escaping allocations are scalar replaced.
if (ptnode_adr(elem)->_node->is_Allocate() &&
ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
InitializeNode* ini = alloc->initialization();
Node* value = NULL;
if (ini != NULL) {
BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
if (store != NULL && store->is_Store())
value = store->in(MemNode::ValueIn);
}
if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
// A field's initializing value was not recorded. Add NULL.
uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
add_pointsto_edge(ni, null_idx);
}
}
}
// An object is not scalar replaceable if the field which may point
// to it has unknown offset (unknown element of an array of objects).
//
if (offset == Type::OffsetBot) {
uint e_cnt = ptn->edge_count();
for (uint ei = 0; ei < e_cnt; ei++) {
uint npi = ptn->edge_target(ei);
set_escape_state(npi, PointsToNode::ArgEscape);
ptnode_adr(npi)->_scalar_replaceable = false;
}
}
// Currently an object is not scalar replaceable if a LoadStore node
// access its field since the field value is unknown after it.
//
bool has_LoadStore = false;
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *use = n->fast_out(i);
if (use->is_LoadStore()) {
has_LoadStore = true;
break;
}
}
// An object is not scalar replaceable if the address points
// to unknown field (unknown element for arrays, offset is OffsetBot).
//
// Or the address may point to more then one object. This may produce
// the false positive result (set scalar_replaceable to false)
// since the flow-insensitive escape analysis can't separate
// the case when stores overwrite the field's value from the case
// when stores happened on different control branches.
//
if (ptset_size > 1 || ptset_size != 0 &&
(has_LoadStore || offset == Type::OffsetBot)) {
for( VectorSetI j(&ptset); j.test(); ++j ) {
set_escape_state(j.elem, PointsToNode::ArgEscape);
ptnode_adr(j.elem)->_scalar_replaceable = false;
}
}
......@@ -1855,7 +1955,7 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
case Op_LoadN:
{
const Type *t = phase->type(n);
if (!t->isa_narrowoop() && t->isa_ptr() == NULL) {
if (t->make_ptr() == NULL) {
_processed.set(n->_idx);
return;
}
......@@ -1878,8 +1978,9 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
case Op_Phi:
{
if (n->as_Phi()->type()->isa_ptr() == NULL) {
// nothing to do if not an oop
const Type *t = n->as_Phi()->type();
if (t->make_ptr() == NULL) {
// nothing to do if not an oop or narrow oop
_processed.set(n->_idx);
return;
}
......@@ -2067,7 +2168,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
{
const Type *t = phase->type(n);
#ifdef ASSERT
if (!t->isa_narrowoop() && t->isa_ptr() == NULL)
if (t->make_ptr() == NULL)
assert(false, "Op_LoadP");
#endif
......@@ -2099,7 +2200,8 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
case Op_Phi:
{
#ifdef ASSERT
if (n->as_Phi()->type()->isa_ptr() == NULL)
const Type *t = n->as_Phi()->type();
if (t->make_ptr() == NULL)
assert(false, "Op_Phi");
#endif
for (uint i = 1; i < n->req() ; i++) {
......@@ -2213,16 +2315,14 @@ void ConnectionGraph::dump() {
PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
tty->print("%6d LocalVar [[%d]]", li, ni);
ptnode_adr(li)->_node->dump();
ptnode_adr(li)->dump(false);
}
}
if (Verbose) {
// Print all fields which reference this allocation
for (uint i = 0; i < ptn->edge_count(); i++) {
uint ei = ptn->edge_target(i);
tty->print("%6d Field [[%d]]", ei, ni);
ptnode_adr(ei)->_node->dump();
ptnode_adr(ei)->dump(false);
}
}
tty->cr();
......
......@@ -197,7 +197,7 @@ public:
void remove_edge(uint targIdx, EdgeType et);
#ifndef PRODUCT
void dump() const;
void dump(bool print_state=true) const;
#endif
};
......@@ -221,6 +221,8 @@ private:
// that pointer values loaded from
// a field which has not been set
// are assumed to point to.
uint _oop_null; // ConP(#NULL)
uint _noop_null; // ConN(#NULL)
Compile * _compile; // Compile object for current compilation
......
......@@ -322,7 +322,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
uint choice = 0; // Bigger is most important
uint latency = 0; // Bigger is scheduled first
uint score = 0; // Bigger is better
uint idx; // Index in worklist
int idx = -1; // Index in worklist
for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
// Order in worklist is used to break ties.
......@@ -412,9 +412,10 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
}
} // End of for all ready nodes in worklist
Node *n = worklist[idx]; // Get the winner
assert(idx >= 0, "index should be set");
Node *n = worklist[(uint)idx]; // Get the winner
worklist.map(idx,worklist.pop()); // Compress worklist
worklist.map((uint)idx, worklist.pop()); // Compress worklist
return n;
}
......@@ -599,7 +600,14 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
}
}
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
n->req() > TypeFunc::Parms ) {
// MemBarAcquire could be created without Precedent edge.
// del_req() replaces the specified edge with the last input edge
// and then removes the last edge. If the specified edge > number of
// edges the last edge will be moved outside of the input edges array
// and the edge will be lost. This is why this code should be
// executed only when Precedent (== TypeFunc::Parms) edge is present.
Node *x = n->in(TypeFunc::Parms);
n->del_req(TypeFunc::Parms);
n->add_prec(x);
......
......@@ -578,7 +578,8 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
Node *cmov = conditional_move( n );
if( cmov ) return cmov;
}
if( n->is_CFG() || n_op == Op_StorePConditional || n_op == Op_StoreLConditional || n_op == Op_CompareAndSwapI || n_op == Op_CompareAndSwapL ||n_op == Op_CompareAndSwapP) return n;
if( n->is_CFG() || n->is_LoadStore() )
return n;
if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd
n_op == Op_Opaque2 ) {
if( !C->major_progress() ) // If chance of no more loop opts...
......
......@@ -194,9 +194,10 @@ void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
}
// Search for a memory operation for the specified memory slice.
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc) {
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
Node *orig_mem = mem;
Node *alloc_mem = alloc->in(TypeFunc::Memory);
const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
while (true) {
if (mem == alloc_mem || mem == start_mem ) {
return mem; // hit one of our sentinals
......@@ -208,7 +209,13 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
// already know that the object is safe to eliminate.
if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
return in;
} else if (in->is_Call() || in->is_MemBar()) {
} else if (in->is_Call()) {
CallNode *call = in->as_Call();
if (!call->may_modify(tinst, phase)) {
mem = call->in(TypeFunc::Memory);
}
mem = in->in(TypeFunc::Memory);
} else if (in->is_MemBar()) {
mem = in->in(TypeFunc::Memory);
} else {
assert(false, "unexpected projection");
......@@ -265,7 +272,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
}
if (level <= 0) {
return NULL;
return NULL; // Give up: phi tree too deep
}
Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
Node *alloc_mem = alloc->in(TypeFunc::Memory);
......@@ -283,7 +290,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
if (in == NULL || in->is_top()) {
values.at_put(j, in);
} else {
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc);
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
if (val == start_mem || val == alloc_mem) {
// hit a sentinel, return appropriate 0 value
values.at_put(j, _igvn.zerocon(ft));
......@@ -308,7 +315,8 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
}
values.at_put(j, val);
} else {
return NULL; // unknown node on this path
assert(false, "unknown node on this path");
return NULL; // unknown node on this path
}
}
}
......@@ -344,7 +352,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
if (visited.test_set(mem->_idx)) {
return NULL; // found a loop, give up
}
mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc);
mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
if (mem == start_mem || mem == alloc_mem) {
done = true; // hit a sentinel, return appropriate 0 value
} else if (mem->is_Initialize()) {
......@@ -368,7 +376,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
Node *unique_input = NULL;
Node *top = C->top();
for (uint i = 1; i < mem->req(); i++) {
Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc);
Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
if (n == NULL || n == top || n == mem) {
continue;
} else if (unique_input == NULL) {
......@@ -396,7 +404,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
} else if (mem->is_Phi()) {
// attempt to produce a Phi reflecting the values on the input paths of the Phi
Node_Stack value_phis(a, 8);
Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, 8);
Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
if (phi != NULL) {
return phi;
} else {
......@@ -463,7 +471,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
Node* n = use->fast_out(k);
if (!n->is_Store() && n->Opcode() != Op_CastP2X) {
DEBUG_ONLY(disq_node = n;)
if (n->is_Load()) {
if (n->is_Load() || n->is_LoadStore()) {
NOT_PRODUCT(fail_eliminate = "Field load";)
} else {
NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
......
......@@ -94,14 +94,19 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
if (tinst == NULL || !tinst->is_known_instance_field())
return mchain; // don't try to optimize non-instance types
uint instance_id = tinst->instance_id();
Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
Node *prev = NULL;
Node *result = mchain;
while (prev != result) {
prev = result;
if (result == start_mem)
break; // hit one of our sentinals
// skip over a call which does not affect this memory slice
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0);
if (proj_in->is_Call()) {
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
break; // hit one of our sentinals
} else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call();
if (!call->may_modify(t_adr, phase)) {
result = call->in(TypeFunc::Memory);
......@@ -115,6 +120,8 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
}
} else if (proj_in->is_MemBar()) {
result = proj_in->in(TypeFunc::Memory);
} else {
assert(false, "unexpected projection");
}
} else if (result->is_MergeMem()) {
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
......
......@@ -607,6 +607,7 @@ public:
};
//------------------------------LoadStoreNode---------------------------
// Note: is_Mem() method returns 'true' for this class.
class LoadStoreNode : public Node {
public:
enum {
......
......@@ -1196,8 +1196,10 @@ void SuperWord::construct_bb() {
Node *n = lp()->fast_out(i);
if (in_bb(n) && (n->is_Phi() && n->bottom_type() == Type::MEMORY)) {
Node* n_tail = n->in(LoopNode::LoopBackControl);
_mem_slice_head.push(n);
_mem_slice_tail.push(n_tail);
if (n_tail != n->in(LoopNode::EntryControl)) {
_mem_slice_head.push(n);
_mem_slice_tail.push(n_tail);
}
}
}
......
......@@ -2472,6 +2472,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
PrintVMOptions = true;
}
if (match_option(option, "-XX:-PrintVMOptions", &tail)) {
PrintVMOptions = false;
}
}
// Parse default .hotspotrc settings file
......
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
* Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
......
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
* Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
......
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
* Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册