提交 986a5553 编写于 作者: K kvn

Merge

......@@ -81,4 +81,8 @@ public class DebugInfoReadStream extends CompressedReadStream {
Assert.that(false, "should not reach here");
return null;
}
public int readBCI() {
return readInt() + InvocationEntryBCI;
}
}
/*
* Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -259,7 +259,7 @@ public class NMethod extends CodeBlob {
if (Assert.ASSERTS_ENABLED) {
Assert.that(pd != null, "scope must be present");
}
return new ScopeDesc(this, pd.getScopeDecodeOffset());
return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
}
/** This is only for use by the debugging system, and is only
......@@ -291,7 +291,7 @@ public class NMethod extends CodeBlob {
public ScopeDesc getScopeDescNearDbg(Address pc) {
PCDesc pd = getPCDescNearDbg(pc);
if (pd == null) return null;
return new ScopeDesc(this, pd.getScopeDecodeOffset());
return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
}
public Map/*<Address, PcDesc>*/ getSafepoints() {
......
/*
* Copyright 2000-2004 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -36,6 +36,7 @@ import sun.jvm.hotspot.types.*;
public class PCDesc extends VMObject {
private static CIntegerField pcOffsetField;
private static CIntegerField scopeDecodeOffsetField;
private static CIntegerField pcFlagsField;
static {
VM.registerVMInitializedObserver(new Observer() {
......@@ -50,6 +51,7 @@ public class PCDesc extends VMObject {
pcOffsetField = type.getCIntegerField("_pc_offset");
scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
pcFlagsField = type.getCIntegerField("_flags");
}
public PCDesc(Address addr) {
......@@ -70,6 +72,12 @@ public class PCDesc extends VMObject {
return code.instructionsBegin().addOffsetTo(getPCOffset());
}
public boolean getReexecute() {
int flags = (int)pcFlagsField.getValue(addr);
return ((flags & 0x1)== 1); //first is the reexecute bit
}
public void print(NMethod code) {
printOn(System.out, code);
}
......
......@@ -52,44 +52,46 @@ public class ScopeDesc {
private List objects; // ArrayList<ScopeValue>
public ScopeDesc(NMethod code, int decodeOffset) {
public ScopeDesc(NMethod code, int decodeOffset, boolean reexecute) {
this.code = code;
this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL);
this.reexecute = reexecute;
// Decode header
DebugInfoReadStream stream = streamAt(decodeOffset);
senderDecodeOffset = stream.readInt();
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
setBCIAndReexecute(stream.readInt());
bci = stream.readBCI();
// Decode offsets for body and sender
localsDecodeOffset = stream.readInt();
expressionsDecodeOffset = stream.readInt();
monitorsDecodeOffset = stream.readInt();
}
public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) {
public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset, boolean reexecute) {
this.code = code;
this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(objectDecodeOffset);
this.reexecute = reexecute;
// Decode header
DebugInfoReadStream stream = streamAt(decodeOffset);
senderDecodeOffset = stream.readInt();
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
setBCIAndReexecute(stream.readInt());
bci = stream.readBCI();
// Decode offsets for body and sender
localsDecodeOffset = stream.readInt();
expressionsDecodeOffset = stream.readInt();
monitorsDecodeOffset = stream.readInt();
}
public NMethod getNMethod() { return code; }
public Method getMethod() { return method; }
public int getBCI() { return bci; }
public boolean getReexecute() {return reexecute;}
public NMethod getNMethod() { return code; }
public Method getMethod() { return method; }
public int getBCI() { return bci; }
public boolean getReexecute() { return reexecute;}
/** Returns a List&lt;ScopeValue&gt; */
public List getLocals() {
......@@ -117,7 +119,7 @@ public class ScopeDesc {
return null;
}
return new ScopeDesc(code, senderDecodeOffset);
return new ScopeDesc(code, senderDecodeOffset, false);
}
/** Returns where the scope was decoded */
......@@ -151,8 +153,8 @@ public class ScopeDesc {
public void printValueOn(PrintStream tty) {
tty.print("ScopeDesc for ");
method.printValueOn(tty);
tty.println(" @bci " + bci);
tty.println(" reexecute: " + reexecute);
tty.print(" @bci " + bci);
tty.println(" reexecute=" + reexecute);
}
// FIXME: add more accessors
......@@ -160,12 +162,6 @@ public class ScopeDesc {
//--------------------------------------------------------------------------------
// Internals only below this point
//
private void setBCIAndReexecute(int combination) {
int InvocationEntryBci = VM.getVM().getInvocationEntryBCI();
bci = (combination >> 1) + InvocationEntryBci;
reexecute = (combination & 1)==1 ? true : false;
}
private DebugInfoReadStream streamAt(int decodeOffset) {
return new DebugInfoReadStream(code, decodeOffset, objects);
}
......
/*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -38,7 +38,7 @@ enum {
// registers
enum {
pd_nof_cpu_regs_frame_map = 32, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 6, // number of cpu registers killed by calls
pd_nof_caller_save_cpu_regs_frame_map = 10, // number of cpu registers killed by calls
pd_nof_cpu_regs_reg_alloc = 20, // number of registers that are visible to register allocator
pd_nof_cpu_regs_linearscan = 32,// number of registers visible linear scan
pd_first_cpu_reg = 0,
......
......@@ -320,6 +320,10 @@ void FrameMap::init () {
_caller_save_cpu_regs[3] = FrameMap::O3_opr;
_caller_save_cpu_regs[4] = FrameMap::O4_opr;
_caller_save_cpu_regs[5] = FrameMap::O5_opr;
_caller_save_cpu_regs[6] = FrameMap::G1_opr;
_caller_save_cpu_regs[7] = FrameMap::G3_opr;
_caller_save_cpu_regs[8] = FrameMap::G4_opr;
_caller_save_cpu_regs[9] = FrameMap::G5_opr;
for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
......
......@@ -749,6 +749,10 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
assert(x->number_of_arguments() == 5, "wrong type");
// Make all state_for calls early since they can emit code
CodeEmitInfo* info = state_for(x, x->state());
// Note: spill caller save before setting the item
LIRItem src (x->argument_at(0), this);
LIRItem src_pos (x->argument_at(1), this);
......@@ -767,7 +771,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
ciArrayKlass* expected_type;
arraycopy_helper(x, &flags, &expected_type);
CodeEmitInfo* info = state_for(x, x->state());
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
length.result(), rlock_callee_saved(T_INT),
expected_type, flags, info);
......@@ -878,6 +881,9 @@ void LIRGenerator::do_NewInstance(NewInstance* x) {
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
// Evaluate state_for early since it may emit code
CodeEmitInfo* info = state_for(x, x->state());
LIRItem length(x->length(), this);
length.load_item();
......@@ -892,7 +898,6 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
__ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
CodeEmitInfo* info = state_for(x, x->state());
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
......@@ -902,7 +907,8 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this);
// Evaluate state_for early since it may emit code.
CodeEmitInfo* info = state_for(x, x->state());
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL;
......@@ -910,6 +916,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
patching_info = state_for(x, x->state_before());
}
LIRItem length(x->length(), this);
length.load_item();
const LIR_Opr reg = result_register_for(x->type());
......@@ -919,7 +926,6 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIR_Opr tmp4 = FrameMap::O1_oop_opr;
LIR_Opr klass_reg = FrameMap::G5_oop_opr;
LIR_Opr len = length.result();
CodeEmitInfo* info = state_for(x, x->state());
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
......@@ -943,25 +949,22 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
items->at_put(i, size);
}
// need to get the info before, as the items may become invalid through item_free
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers
// clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length();
while (i-- > 0) {
LIRItem* size = items->at(i);
// if a patching_info was generated above then debug information for the state before
// the call is going to be emitted. The LIRGenerator calls above may have left some values
// in registers and that's been recorded in the CodeEmitInfo. In that case the items
// for those values can't simply be freed if they are registers because the values
// might be destroyed by store_stack_parameter. So in the case of patching, delay the
// freeing of the items that already were in registers
size->load_item();
store_stack_parameter (size->result(),
in_ByteSize(STACK_BIAS +
......@@ -972,8 +975,6 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
// This instruction can be deoptimized in the slow path : use
// O0 as result register.
const LIR_Opr reg = result_register_for(x->type());
CodeEmitInfo* info = state_for(x, x->state());
jobject2reg_with_patching(reg, x->klass(), patching_info);
LIR_Opr rank = FrameMap::O1_opr;
__ move(LIR_OprFact::intConst(x->rank()), rank);
......
......@@ -1047,16 +1047,17 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
items->at_put(i, size);
}
// need to get the info before, as the items may become invalid through item_free
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers.
// clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length();
......
......@@ -2381,7 +2381,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight.
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
// Normal deoptimization
__ push(Deoptimization::Unpack_deopt);
__ jmp(cont);
......@@ -2392,7 +2392,7 @@ void SharedRuntime::generate_deopt_blob() {
// return address is the pc describes what bci to do re-execute at
// No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
__ push(Deoptimization::Unpack_reexecute);
__ jmp(cont);
......@@ -2428,7 +2428,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight.
// No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
// Now it is safe to overwrite any register
......@@ -2515,6 +2515,11 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm);
// Non standard control word may be leaked out through a safepoint blob, and we can
// deopt at a poll point with the non standard control word. However, we should make
// sure the control word is correct after restore_result_registers.
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
// All of the register save area has been popped of the stack. Only the
// return address remains.
......
......@@ -1229,13 +1229,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
// Compiled java method case.
if (decode_offset != 0) {
bool dummy_reexecute = false;
DebugInfoReadStream stream(nm, decode_offset);
decode_offset = stream.read_int();
method = (methodOop)nm->oop_at(stream.read_int());
//fill_in_stack_trace does not need the reexecute information which is designed
//for the deopt to reexecute
bci = stream.read_bci_and_reexecute(dummy_reexecute);
bci = stream.read_bci();
} else {
if (fr.is_first_frame()) break;
address pc = fr.pc();
......
/*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -255,8 +255,7 @@ class DebugInfoReadStream : public CompressedReadStream {
ScopeValue* read_object_value();
ScopeValue* get_cached_object();
// BCI encoding is mostly unsigned, but -1 is a distinguished value
// Decoding based on encoding: bci = InvocationEntryBci + read_int()/2; reexecute = read_int()%2 == 1 ? true : false;
int read_bci_and_reexecute(bool& reexecute) { int i = read_int(); reexecute = (i & 1) ? true : false; return (i >> 1) + InvocationEntryBci; }
int read_bci() { return read_int() + InvocationEntryBci; }
};
// DebugInfoWriteStream specializes CompressedWriteStream for
......@@ -269,6 +268,5 @@ class DebugInfoWriteStream : public CompressedWriteStream {
public:
DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size);
void write_handle(jobject h);
//Encoding bci and reexecute into one word as (bci - InvocationEntryBci)*2 + reexecute
void write_bci_and_reexecute(int bci, bool reexecute) { write_int(((bci - InvocationEntryBci) << 1) + (reexecute ? 1 : 0)); }
void write_bci(int bci) { write_int(bci - InvocationEntryBci); }
};
/*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -292,13 +292,16 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
int stream_offset = stream()->position();
last_pd->set_scope_decode_offset(stream_offset);
// Record reexecute bit into pcDesc
last_pd->set_should_reexecute(reexecute);
// serialize sender stream offest
stream()->write_int(sender_stream_offset);
// serialize scope
jobject method_enc = (method == NULL)? NULL: method->encoding();
stream()->write_int(oop_recorder()->find_index(method_enc));
stream()->write_bci_and_reexecute(bci, reexecute);
stream()->write_bci(bci);
assert(method == NULL ||
(method->is_native() && bci == 0) ||
(!method->is_native() && 0 <= bci && bci < method->code_size()) ||
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -966,7 +966,7 @@ ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
return new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset());
pd->obj_decode_offset(), pd->should_reexecute());
}
......@@ -1932,7 +1932,7 @@ void nmethod::verify_interrupt_point(address call_site) {
PcDesc* pd = pc_desc_at(ic->end_of_call());
assert(pd != NULL, "PcDesc must exist");
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset());
pd->obj_decode_offset(), pd->should_reexecute());
!sd->is_top(); sd = sd->sender()) {
sd->verify();
}
......@@ -2181,7 +2181,7 @@ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
PcDesc* p = pc_desc_near(begin+1);
if (p != NULL && p->real_pc(this) <= end) {
return new ScopeDesc(this, p->scope_decode_offset(),
p->obj_decode_offset());
p->obj_decode_offset(), p->should_reexecute());
}
return NULL;
}
......
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -26,9 +26,11 @@
# include "incls/_pcDesc.cpp.incl"
PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
_pc_offset = pc_offset;
_scope_decode_offset = scope_decode_offset;
_obj_decode_offset = obj_decode_offset;
_flags.word = 0;
}
address PcDesc::real_pc(const nmethod* code) const {
......@@ -50,6 +52,7 @@ void PcDesc::print(nmethod* code) {
tty->print(" ");
sd->method()->print_short_name(tty);
tty->print(" @%d", sd->bci());
tty->print(" reexecute=%s", sd->should_reexecute()?"true":"false");
tty->cr();
}
#endif
......
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -34,6 +34,13 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
int _scope_decode_offset; // offset for scope in nmethod
int _obj_decode_offset;
union PcDescFlags {
int word;
struct {
unsigned int reexecute: 1;
} bits;
} _flags;
public:
int pc_offset() const { return _pc_offset; }
int scope_decode_offset() const { return _scope_decode_offset; }
......@@ -53,6 +60,10 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
upper_offset_limit = (unsigned int)-1 >> 1
};
// Flags
bool should_reexecute() const { return _flags.bits.reexecute; }
void set_should_reexecute(bool z) { _flags.bits.reexecute = z; }
// Returns the real pc
address real_pc(const nmethod* code) const;
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -26,17 +26,19 @@
# include "incls/_scopeDesc.cpp.incl"
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) {
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
_code = code;
_decode_offset = decode_offset;
_objects = decode_object_values(obj_decode_offset);
_reexecute = reexecute;
decode_body();
}
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) {
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
_code = code;
_decode_offset = decode_offset;
_objects = decode_object_values(DebugInformationRecorder::serialized_null);
_reexecute = reexecute;
decode_body();
}
......@@ -45,8 +47,8 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
_code = parent->_code;
_decode_offset = parent->_sender_decode_offset;
_objects = parent->_objects;
_reexecute = false; //reexecute only applies to the first scope
decode_body();
assert(_reexecute == false, "reexecute not allowed");
}
......@@ -57,7 +59,6 @@ void ScopeDesc::decode_body() {
_sender_decode_offset = DebugInformationRecorder::serialized_null;
_method = methodHandle(_code->method());
_bci = InvocationEntryBci;
_reexecute = false;
_locals_decode_offset = DebugInformationRecorder::serialized_null;
_expressions_decode_offset = DebugInformationRecorder::serialized_null;
_monitors_decode_offset = DebugInformationRecorder::serialized_null;
......@@ -67,7 +68,7 @@ void ScopeDesc::decode_body() {
_sender_decode_offset = stream->read_int();
_method = methodHandle((methodOop) stream->read_oop());
_bci = stream->read_bci_and_reexecute(_reexecute);
_bci = stream->read_bci();
// decode offsets for body and sender
_locals_decode_offset = stream->read_int();
......
/*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -39,8 +39,7 @@ class SimpleScopeDesc : public StackObj {
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
int ignore_sender = buffer.read_int();
_method = methodOop(buffer.read_oop());
bool dummy_reexecute; //only methodOop and bci are needed!
_bci = buffer.read_bci_and_reexecute(dummy_reexecute);
_bci = buffer.read_bci();
}
methodOop method() { return _method; }
......@@ -53,12 +52,12 @@ class SimpleScopeDesc : public StackObj {
class ScopeDesc : public ResourceObj {
public:
// Constructor
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset);
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
// Calls above, giving default value of "serialized_null" to the
// "obj_decode_offset" argument. (We don't use a default argument to
// avoid a .hpp-.hpp dependency.)
ScopeDesc(const nmethod* code, int decode_offset);
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
// JVM state
methodHandle method() const { return _method; }
......
......@@ -409,8 +409,6 @@ codeBlob.cpp c1_Runtime1.hpp
compileBroker.cpp c1_Compiler.hpp
frame.hpp c1_Defs.hpp
frame_<arch>.cpp c1_Runtime1.hpp
globals.cpp c1_globals.hpp
......@@ -433,8 +431,6 @@ os_<os_family>.cpp c1_Runtime1.hpp
os_<os_arch>.cpp c1_Runtime1.hpp
registerMap.hpp c1_Defs.hpp
safepoint.cpp c1_globals.hpp
sharedRuntime.cpp c1_Runtime1.hpp
......
......@@ -749,7 +749,10 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
assert(mode == UnscaledNarrowOop ||
mode == ZeroBasedNarrowOop ||
mode == HeapBasedNarrowOop, "mode is invalid");
// Return specified base for the first request.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
return (char*)HeapBaseMinAddress;
}
const size_t total_size = heap_size + HeapBaseMinAddress;
if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
......@@ -857,7 +860,7 @@ jint Universe::initialize_heap() {
// Can't reserve heap below 4Gb.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
} else {
assert(Universe::narrow_oop_shift() == 0, "use unscaled narrow oop");
Universe::set_narrow_oop_shift(0);
if (PrintCompressedOopsMode) {
tty->print(", 32-bits Oops");
}
......
......@@ -376,7 +376,7 @@
product(intx, AutoBoxCacheMax, 128, \
"Sets max value cached by the java.lang.Integer autobox cache") \
\
product(bool, DoEscapeAnalysis, false, \
product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \
\
notproduct(bool, PrintEscapeAnalysis, false, \
......
......@@ -493,7 +493,8 @@ void JVMState::dump_spec(outputStream *st) const {
if (!printed)
_method->print_short_name(st);
st->print(" @ bci:%d",_bci);
st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false");
if(_reexecute == Reexecute_True)
st->print(" reexecute");
} else {
st->print(" runtime stub");
}
......
......@@ -458,6 +458,16 @@ private:
// Post-Allocation peephole copy removal
void post_allocate_copy_removal();
Node *skip_copies( Node *c );
// Replace the old node with the current live version of that value
// and yank the old value if it's dead.
int replace_and_yank_if_dead( Node *old, OptoReg::Name nreg,
Block *current_block, Node_List& value, Node_List& regnd ) {
Node* v = regnd[nreg];
assert(v->outcnt() != 0, "no dead values");
old->replace_by(v);
return yank_if_dead(old, current_block, &value, &regnd);
}
int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs );
int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd );
......
......@@ -1545,7 +1545,7 @@ void Compile::Optimize() {
if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
{
TracePhase t2("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, NULL, true );
PhaseIdealLoop ideal_loop( igvn, true );
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 1", 2);
if (failing()) return;
......@@ -1553,7 +1553,7 @@ void Compile::Optimize() {
// Loop opts pass if partial peeling occurred in previous pass
if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
TracePhase t3("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, NULL, false );
PhaseIdealLoop ideal_loop( igvn, false );
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 2", 2);
if (failing()) return;
......@@ -1561,10 +1561,15 @@ void Compile::Optimize() {
// Loop opts pass for loop-unrolling before CCP
if(major_progress() && (loop_opts_cnt > 0)) {
TracePhase t4("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, NULL, false );
PhaseIdealLoop ideal_loop( igvn, false );
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 3", 2);
}
if (!failing()) {
// Verify that last round of loop opts produced a valid graph
NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
PhaseIdealLoop::verify(igvn);
}
}
if (failing()) return;
......@@ -1597,12 +1602,20 @@ void Compile::Optimize() {
while(major_progress() && (loop_opts_cnt > 0)) {
TracePhase t2("idealLoop", &_t_idealLoop, true);
assert( cnt++ < 40, "infinite cycle in loop optimization" );
PhaseIdealLoop ideal_loop( igvn, NULL, true );
PhaseIdealLoop ideal_loop( igvn, true );
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
if (failing()) return;
}
}
{
// Verify that all previous optimizations produced a valid graph
// at least to this point, even if no loop optimizations were done.
NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
PhaseIdealLoop::verify(igvn);
}
{
NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
PhaseMacroExpand mex(igvn);
......@@ -2520,7 +2533,7 @@ bool Compile::final_graph_reshaping() {
// If original bytecodes contained a mixture of floats and doubles
// check if the optimizer has made it homogenous, item (3).
if( Use24BitFPMode && Use24BitFP &&
if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
frc.get_float_count() > 32 &&
frc.get_double_count() == 0 &&
(10 * frc.get_call_count() < frc.get_float_count()) ) {
......
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -396,7 +396,7 @@ struct NTarjan {
// nodes (using the is_CFG() call) and places them in a dominator tree. Thus,
// it needs a count of the CFG nodes for the mapping table. This is the
// Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
void PhaseIdealLoop::Dominators( ) {
void PhaseIdealLoop::Dominators() {
ResourceMark rm;
// Setup mappings from my Graph to Tarjan's stuff and back
// Note: Tarjan uses 1-based arrays
......@@ -454,7 +454,7 @@ void PhaseIdealLoop::Dominators( ) {
// flow into the main graph (and hence into ROOT) but are not reachable
// from above. Such code is dead, but requires a global pass to detect
// it; this global pass was the 'build_loop_tree' pass run just prior.
if( whead->is_Region() ) {
if( !_verify_only && whead->is_Region() ) {
for( uint i = 1; i < whead->req(); i++ ) {
if (!has_node(whead->in(i))) {
// Kill dead input path
......
......@@ -1420,13 +1420,12 @@ static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog*
}
//=============================================================================
//------------------------------PhaseIdealLoop---------------------------------
//----------------------------build_and_optimize-------------------------------
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
// its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs )
: PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(C->comp_arena()) {
void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
int old_progress = C->major_progress();
// Reset major-progress flag for the driver's heuristics
C->clear_major_progress();
......@@ -1465,18 +1464,20 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
}
// No loops after all
if( !_ltree_root->_child ) C->set_has_loops(false);
if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
// There should always be an outer loop containing the Root and Return nodes.
// If not, we have a degenerate empty program. Bail out in this case.
if (!has_node(C->root())) {
C->clear_major_progress();
C->record_method_not_compilable("empty program detected during loop optimization");
if (!_verify_only) {
C->clear_major_progress();
C->record_method_not_compilable("empty program detected during loop optimization");
}
return;
}
// Nothing to do, so get out
if( !C->has_loops() && !do_split_ifs && !verify_me) {
if( !C->has_loops() && !do_split_ifs && !_verify_me && !_verify_only ) {
_igvn.optimize(); // Cleanup NeverBranches
return;
}
......@@ -1486,7 +1487,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// Split shared headers and insert loop landing pads.
// Do not bother doing this on the Root loop of course.
if( !verify_me && _ltree_root->_child ) {
if( !_verify_me && !_verify_only && _ltree_root->_child ) {
if( _ltree_root->_child->beautify_loops( this ) ) {
// Re-build loop tree!
_ltree_root->_child = NULL;
......@@ -1515,23 +1516,25 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
Dominators();
// As a side effect, Dominators removed any unreachable CFG paths
// into RegionNodes. It doesn't do this test against Root, so
// we do it here.
for( uint i = 1; i < C->root()->req(); i++ ) {
if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root?
_igvn.hash_delete(C->root());
C->root()->del_req(i);
_igvn._worklist.push(C->root());
i--; // Rerun same iteration on compressed edges
if (!_verify_only) {
// As a side effect, Dominators removed any unreachable CFG paths
// into RegionNodes. It doesn't do this test against Root, so
// we do it here.
for( uint i = 1; i < C->root()->req(); i++ ) {
if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root?
_igvn.hash_delete(C->root());
C->root()->del_req(i);
_igvn._worklist.push(C->root());
i--; // Rerun same iteration on compressed edges
}
}
}
// Given dominators, try to find inner loops with calls that must
// always be executed (call dominates loop tail). These loops do
// not need a separate safepoint.
Node_List cisstack(a);
_ltree_root->check_safepts(visited, cisstack);
// Given dominators, try to find inner loops with calls that must
// always be executed (call dominates loop tail). These loops do
// not need a separate safepoint.
Node_List cisstack(a);
_ltree_root->check_safepts(visited, cisstack);
}
// Walk the DATA nodes and place into loops. Find earliest control
// node. For CFG nodes, the _nodes array starts out and remains
......@@ -1548,11 +1551,11 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// it will be processed among C->top() inputs
worklist.push( C->top() );
visited.set( C->top()->_idx ); // Set C->top() as visited now
build_loop_early( visited, worklist, nstack, verify_me );
build_loop_early( visited, worklist, nstack );
// Given early legal placement, try finding counted loops. This placement
// is good enough to discover most loop invariants.
if( !verify_me )
if( !_verify_me && !_verify_only )
_ltree_root->counted_loop( this );
// Find latest loop placement. Find ideal loop placement.
......@@ -1562,16 +1565,25 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
worklist.push( C->root() );
NOT_PRODUCT( C->verify_graph_edges(); )
worklist.push( C->top() );
build_loop_late( visited, worklist, nstack, verify_me );
build_loop_late( visited, worklist, nstack );
if (_verify_only) {
// restore major progress flag
for (int i = 0; i < old_progress; i++)
C->set_major_progress();
assert(C->unique() == unique, "verification mode made Nodes? ? ?");
assert(_igvn._worklist.size() == 0, "shouldn't push anything");
return;
}
// clear out the dead code
while(_deadlist.size()) {
igvn.remove_globally_dead_node(_deadlist.pop());
_igvn.remove_globally_dead_node(_deadlist.pop());
}
#ifndef PRODUCT
C->verify_graph_edges();
if( verify_me ) { // Nested verify pass?
if( _verify_me ) { // Nested verify pass?
// Check to see if the verify mode is broken
assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?");
return;
......@@ -1678,7 +1690,7 @@ static int fail; // debug only, so its multi-thread dont care
void PhaseIdealLoop::verify() const {
int old_progress = C->major_progress();
ResourceMark rm;
PhaseIdealLoop loop_verify( _igvn, this, false );
PhaseIdealLoop loop_verify( _igvn, this );
VectorSet visited(Thread::current()->resource_area());
fail = 0;
......@@ -2138,54 +2150,58 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
// optimizing an infinite loop?
l = _ltree_root; // Oops, found infinite loop
// Insert the NeverBranch between 'm' and it's control user.
NeverBranchNode *iff = new (C, 1) NeverBranchNode( m );
_igvn.register_new_node_with_optimizer(iff);
set_loop(iff, l);
Node *if_t = new (C, 1) CProjNode( iff, 0 );
_igvn.register_new_node_with_optimizer(if_t);
set_loop(if_t, l);
Node* cfg = NULL; // Find the One True Control User of m
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
Node* x = m->fast_out(j);
if (x->is_CFG() && x != m && x != iff)
{ cfg = x; break; }
if (!_verify_only) {
// Insert the NeverBranch between 'm' and it's control user.
NeverBranchNode *iff = new (C, 1) NeverBranchNode( m );
_igvn.register_new_node_with_optimizer(iff);
set_loop(iff, l);
Node *if_t = new (C, 1) CProjNode( iff, 0 );
_igvn.register_new_node_with_optimizer(if_t);
set_loop(if_t, l);
Node* cfg = NULL; // Find the One True Control User of m
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
Node* x = m->fast_out(j);
if (x->is_CFG() && x != m && x != iff)
{ cfg = x; break; }
}
assert(cfg != NULL, "must find the control user of m");
uint k = 0; // Probably cfg->in(0)
while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
cfg->set_req( k, if_t ); // Now point to NeverBranch
// Now create the never-taken loop exit
Node *if_f = new (C, 1) CProjNode( iff, 1 );
_igvn.register_new_node_with_optimizer(if_f);
set_loop(if_f, l);
// Find frame ptr for Halt. Relies on the optimizer
// V-N'ing. Easier and quicker than searching through
// the program structure.
Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr );
_igvn.register_new_node_with_optimizer(frame);
// Halt & Catch Fire
Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame );
_igvn.register_new_node_with_optimizer(halt);
set_loop(halt, l);
C->root()->add_req(halt);
}
assert(cfg != NULL, "must find the control user of m");
uint k = 0; // Probably cfg->in(0)
while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
cfg->set_req( k, if_t ); // Now point to NeverBranch
// Now create the never-taken loop exit
Node *if_f = new (C, 1) CProjNode( iff, 1 );
_igvn.register_new_node_with_optimizer(if_f);
set_loop(if_f, l);
// Find frame ptr for Halt. Relies on the optimizer
// V-N'ing. Easier and quicker than searching through
// the program structure.
Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr );
_igvn.register_new_node_with_optimizer(frame);
// Halt & Catch Fire
Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame );
_igvn.register_new_node_with_optimizer(halt);
set_loop(halt, l);
C->root()->add_req(halt);
set_loop(C->root(), _ltree_root);
}
}
// Weeny check for irreducible. This child was already visited (this
// IS the post-work phase). Is this child's loop header post-visited
// as well? If so, then I found another entry into the loop.
while( is_postvisited(l->_head) ) {
// found irreducible
l->_irreducible = 1; // = true
l = l->_parent;
_has_irreducible_loops = true;
// Check for bad CFG here to prevent crash, and bailout of compile
if (l == NULL) {
C->record_method_not_compilable("unhandled CFG detected during loop optimization");
return pre_order;
if (!_verify_only) {
while( is_postvisited(l->_head) ) {
// found irreducible
l->_irreducible = 1; // = true
l = l->_parent;
_has_irreducible_loops = true;
// Check for bad CFG here to prevent crash, and bailout of compile
if (l == NULL) {
C->record_method_not_compilable("unhandled CFG detected during loop optimization");
return pre_order;
}
}
}
......@@ -2253,7 +2269,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
// Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
// First pass computes the earliest controlling node possible. This is the
// controlling input with the deepest dominating depth.
void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) {
void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
while (worklist.size() != 0) {
// Use local variables nstack_top_n & nstack_top_i to cache values
// on nstack's top.
......@@ -2285,7 +2301,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist,
// (the old code here would yank a 2nd safepoint after seeing a
// first one, even though the 1st did not dominate in the loop body
// and thus could be avoided indefinitely)
if( !verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
is_deleteable_safept(n)) {
Node *in = n->in(TypeFunc::Control);
lazy_replace(n,in); // Pull safepoint now
......@@ -2408,12 +2424,31 @@ Node *PhaseIdealLoop::compute_idom( Node *region ) const {
return LCA;
}
//------------------------------get_late_ctrl----------------------------------
// Compute latest legal control.
Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
assert(early != NULL, "early control should not be NULL");
bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
bool had_error = false;
#ifdef ASSERT
if (early != C->root()) {
// Make sure that there's a dominance path from use to LCA
Node* d = use;
while (d != LCA) {
d = idom(d);
if (d == C->root()) {
tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx);
n->dump();
use->dump();
had_error = true;
break;
}
}
}
#endif
return had_error;
}
Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
// Compute LCA over list of uses
bool had_error = false;
Node *LCA = NULL;
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
Node* c = n->fast_out(i);
......@@ -2423,15 +2458,34 @@ Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
for( uint j=1; j<c->req(); j++ ) {// For all inputs
if( c->in(j) == n ) { // Found matching input?
Node *use = c->in(0)->in(j);
if (_verify_only && use->is_top()) continue;
LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
}
}
} else {
// For CFG data-users, use is in the block just prior
Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
}
}
assert(!had_error, "bad dominance");
return LCA;
}
//------------------------------get_late_ctrl----------------------------------
// Compute latest legal control.
Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
assert(early != NULL, "early control should not be NULL");
Node* LCA = compute_lca_of_uses(n, early);
#ifdef ASSERT
if (LCA == C->root() && LCA != early) {
// def doesn't dominate uses so print some useful debugging output
compute_lca_of_uses(n, early, true);
}
#endif
// if this is a load, check for anti-dependent stores
// We use a conservative algorithm to identify potential interfering
......@@ -2576,7 +2630,7 @@ void PhaseIdealLoop::clear_dom_lca_tags() {
//------------------------------build_loop_late--------------------------------
// Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
// Second pass finds latest legal placement, and ideal loop placement.
void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) {
void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
while (worklist.size() != 0) {
Node *n = worklist.pop();
// Only visit once
......@@ -2612,7 +2666,7 @@ void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, N
}
} else {
// All of n's children have been processed, complete post-processing.
build_loop_late_post(n, verify_me);
build_loop_late_post(n);
if (nstack.is_empty()) {
// Finished all nodes on stack.
// Process next node on the worklist.
......@@ -2631,9 +2685,9 @@ void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, N
//------------------------------build_loop_late_post---------------------------
// Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
// Second pass finds latest legal placement, and ideal loop placement.
void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify_me ) {
void PhaseIdealLoop::build_loop_late_post( Node *n ) {
if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress()) {
if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress() && !_verify_only) {
_igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
}
......@@ -2714,6 +2768,7 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
if( get_loop(legal)->_nest < get_loop(least)->_nest )
least = legal;
}
assert(early == legal || legal != C->root(), "bad dominance of inputs");
// Try not to place code on a loop entry projection
// which can inhibit range check elimination.
......@@ -2731,8 +2786,8 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
#ifdef ASSERT
// If verifying, verify that 'verify_me' has a legal location
// and choose it as our location.
if( verify_me ) {
Node *v_ctrl = verify_me->get_ctrl_no_update(n);
if( _verify_me ) {
Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
Node *legal = LCA;
while( early != legal ) { // While not at earliest legal
if( legal == v_ctrl ) break; // Check for prior good location
......
/*
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -442,6 +442,9 @@ class PhaseIdealLoop : public PhaseTransform {
uint *_preorders;
uint _max_preorder;
const PhaseIdealLoop* _verify_me;
bool _verify_only;
// Allocate _preorders[] array
void allocate_preorders() {
_max_preorder = C->unique()+8;
......@@ -497,6 +500,12 @@ class PhaseIdealLoop : public PhaseTransform {
Node_Array _dom_lca_tags;
void init_dom_lca_tags();
void clear_dom_lca_tags();
// Helper for debugging bad dominance relationships
bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
// Inline wrapper for frequent cases:
// 1) only one use
// 2) a use is the same as the current LCA passed as 'n1'
......@@ -511,6 +520,7 @@ class PhaseIdealLoop : public PhaseTransform {
return find_non_split_ctrl(n);
}
Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
// true if CFG node d dominates CFG node n
bool is_dominator(Node *d, Node *n);
......@@ -621,9 +631,9 @@ private:
IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
// Place Data nodes in some loop nest
void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
void build_loop_late_post ( Node* n, const PhaseIdealLoop *verify_me );
void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
void build_loop_late_post ( Node* n );
// Array of immediate dominance info for each CFG node indexed by node idx
private:
......@@ -662,6 +672,19 @@ private:
// Is safept not required by an outer loop?
bool is_deleteable_safept(Node* sfpt);
// Perform verification that the graph is valid.
PhaseIdealLoop( PhaseIterGVN &igvn) :
PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(C->comp_arena()),
_verify_me(NULL),
_verify_only(true) {
build_and_optimize(false);
}
// build the loop tree and perform any requested optimizations
void build_and_optimize(bool do_split_if);
public:
// Dominators for the sea of nodes
void Dominators();
......@@ -671,7 +694,32 @@ public:
Node *dom_lca_internal( Node *n1, Node *n2 ) const;
// Compute the Ideal Node to Loop mapping
PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs );
PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) :
PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(C->comp_arena()),
_verify_me(NULL),
_verify_only(false) {
build_and_optimize(do_split_ifs);
}
// Verify that verify_me made the same decisions as a fresh run.
PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(C->comp_arena()),
_verify_me(verify_me),
_verify_only(false) {
build_and_optimize(false);
}
// Build and verify the loop tree without modifying the graph. This
// is useful to verify that all inputs properly dominate their uses.
static void verify(PhaseIterGVN& igvn) {
#ifdef ASSERT
PhaseIdealLoop v(igvn);
#endif
}
// True if the method has at least 1 irreducible loop
bool _has_irreducible_loops;
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -53,6 +53,7 @@ elapsedTimer Phase::_t_codeGeneration;
elapsedTimer Phase::_t_registerMethod;
elapsedTimer Phase::_t_temporaryTimer1;
elapsedTimer Phase::_t_temporaryTimer2;
elapsedTimer Phase::_t_idealLoopVerify;
// Subtimers for _t_optimizer
elapsedTimer Phase::_t_iterGVN;
......@@ -88,51 +89,52 @@ void Phase::print_timers() {
tty->print_cr ("Accumulated compiler times:");
tty->print_cr ("---------------------------");
tty->print_cr (" Total compilation: %3.3f sec.", Phase::_t_totalCompilation.seconds());
tty->print (" method compilation : %3.3f sec", Phase::_t_methodCompilation.seconds());
tty->print (" method compilation : %3.3f sec", Phase::_t_methodCompilation.seconds());
tty->print ("/%d bytes",_total_bytes_compiled);
tty->print_cr (" (%3.0f bytes per sec) ", Phase::_total_bytes_compiled / Phase::_t_methodCompilation.seconds());
tty->print_cr (" stub compilation : %3.3f sec.", Phase::_t_stubCompilation.seconds());
tty->print_cr (" stub compilation : %3.3f sec.", Phase::_t_stubCompilation.seconds());
tty->print_cr (" Phases:");
tty->print_cr (" parse : %3.3f sec", Phase::_t_parser.seconds());
tty->print_cr (" parse : %3.3f sec", Phase::_t_parser.seconds());
if (DoEscapeAnalysis) {
tty->print_cr (" escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
tty->print_cr (" escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
}
tty->print_cr (" optimizer : %3.3f sec", Phase::_t_optimizer.seconds());
tty->print_cr (" optimizer : %3.3f sec", Phase::_t_optimizer.seconds());
if( Verbose || WizardMode ) {
tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds());
tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds());
tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds());
tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds());
tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds());
tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds());
tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds());
tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds());
tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds());
tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds());
double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
Phase::_t_graphReshaping.seconds();
double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", optimizer_subtotal, percent_of_optimizer);
tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", optimizer_subtotal, percent_of_optimizer);
}
tty->print_cr (" matcher : %3.3f sec", Phase::_t_matcher.seconds());
tty->print_cr (" scheduler : %3.3f sec", Phase::_t_scheduler.seconds());
tty->print_cr (" regalloc : %3.3f sec", Phase::_t_registerAllocation.seconds());
tty->print_cr (" matcher : %3.3f sec", Phase::_t_matcher.seconds());
tty->print_cr (" scheduler : %3.3f sec", Phase::_t_scheduler.seconds());
tty->print_cr (" regalloc : %3.3f sec", Phase::_t_registerAllocation.seconds());
if( Verbose || WizardMode ) {
tty->print_cr (" ctorChaitin : %3.3f sec", Phase::_t_ctorChaitin.seconds());
tty->print_cr (" buildIFG : %3.3f sec", Phase::_t_buildIFGphysical.seconds());
tty->print_cr (" computeLive : %3.3f sec", Phase::_t_computeLive.seconds());
tty->print_cr (" regAllocSplit: %3.3f sec", Phase::_t_regAllocSplit.seconds());
tty->print_cr (" ctorChaitin : %3.3f sec", Phase::_t_ctorChaitin.seconds());
tty->print_cr (" buildIFG : %3.3f sec", Phase::_t_buildIFGphysical.seconds());
tty->print_cr (" computeLive : %3.3f sec", Phase::_t_computeLive.seconds());
tty->print_cr (" regAllocSplit : %3.3f sec", Phase::_t_regAllocSplit.seconds());
tty->print_cr (" postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds());
tty->print_cr (" fixupSpills : %3.3f sec", Phase::_t_fixupSpills.seconds());
tty->print_cr (" fixupSpills : %3.3f sec", Phase::_t_fixupSpills.seconds());
double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() +
Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() +
Phase::_t_regAllocSplit.seconds() + Phase::_t_fixupSpills.seconds() +
Phase::_t_postAllocCopyRemoval.seconds();
double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc);
tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc);
}
tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds());
tty->print_cr (" blockOrdering: %3.3f sec", Phase::_t_blockOrdering.seconds());
tty->print_cr (" peephole : %3.3f sec", Phase::_t_peephole.seconds());
tty->print_cr (" codeGen : %3.3f sec", Phase::_t_codeGeneration.seconds());
tty->print_cr (" install_code : %3.3f sec", Phase::_t_registerMethod.seconds());
tty->print_cr (" ------------ : ----------");
tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds());
tty->print_cr (" blockOrdering : %3.3f sec", Phase::_t_blockOrdering.seconds());
tty->print_cr (" peephole : %3.3f sec", Phase::_t_peephole.seconds());
tty->print_cr (" codeGen : %3.3f sec", Phase::_t_codeGeneration.seconds());
tty->print_cr (" install_code : %3.3f sec", Phase::_t_registerMethod.seconds());
tty->print_cr (" -------------- : ----------");
double phase_subtotal = Phase::_t_parser.seconds() +
(DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) +
Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() +
......@@ -143,7 +145,7 @@ void Phase::print_timers() {
double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0;
// counters inside Compile::CodeGen include time for adapters and stubs
// so phase-total can be greater than 100%
tty->print_cr (" total : %3.3f sec, %3.2f %%", phase_subtotal, percent_of_method_compile);
tty->print_cr (" total : %3.3f sec, %3.2f %%", phase_subtotal, percent_of_method_compile);
assert( percent_of_method_compile > expected_method_compile_coverage ||
phase_subtotal < minimum_meaningful_method_compile,
......@@ -157,8 +159,8 @@ void Phase::print_timers() {
tty->cr();
tty->print_cr (" temporaryTimer2: %3.3f sec", Phase::_t_temporaryTimer2.seconds());
}
tty->print_cr (" output : %3.3f sec", Phase::_t_output.seconds());
tty->print_cr (" isched : %3.3f sec", Phase::_t_instrSched.seconds());
tty->print_cr (" bldOopMaps: %3.3f sec", Phase::_t_buildOopMaps.seconds());
tty->print_cr (" output : %3.3f sec", Phase::_t_output.seconds());
tty->print_cr (" isched : %3.3f sec", Phase::_t_instrSched.seconds());
tty->print_cr (" bldOopMaps : %3.3f sec", Phase::_t_buildOopMaps.seconds());
}
#endif
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -83,6 +83,7 @@ protected:
static elapsedTimer _t_registerMethod;
static elapsedTimer _t_temporaryTimer1;
static elapsedTimer _t_temporaryTimer2;
static elapsedTimer _t_idealLoopVerify;
// Subtimers for _t_optimizer
static elapsedTimer _t_iterGVN;
......
......@@ -1622,9 +1622,11 @@ void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
// old goes dead?
if( old ) {
switch (old->outcnt()) {
case 0: // Kill all his inputs, and recursively kill other dead nodes.
case 0:
// Put into the worklist to kill later. We do not kill it now because the
// recursive kill will delete the current node (this) if dead-loop exists
if (!old->is_top())
igvn->remove_dead_node( old );
igvn->_worklist.push( old );
break;
case 1:
if( old->is_Store() || old->has_special_unique_user() )
......
......@@ -88,6 +88,7 @@ int PhaseChaitin::yank_if_dead( Node *old, Block *current_block, Node_List *valu
value->map(old_reg,NULL); // Yank from value/regnd maps
regnd->map(old_reg,NULL); // This register's value is now unknown
}
assert(old->req() <= 2, "can't handle more inputs");
Node *tmp = old->req() > 1 ? old->in(1) : NULL;
old->disconnect_inputs(NULL);
if( !tmp ) break;
......@@ -530,6 +531,16 @@ void PhaseChaitin::post_allocate_copy_removal() {
// Do not change from int to pointer
Node *val = skip_copies(n);
// Clear out a dead definition before starting so that the
// elimination code doesn't have to guard against it. The
// definition could in fact be a kill projection with a count of
// 0 which is safe but since those are uninteresting for copy
// elimination just delete them as well.
if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) {
regnd.map(nreg, NULL);
value.map(nreg, NULL);
}
uint n_ideal_reg = n->ideal_reg();
if( is_single_register(n_ideal_reg) ) {
// If Node 'n' does not change the value mapped by the register,
......@@ -537,8 +548,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
// mapping so 'n' will go dead.
if( value[nreg] != val ) {
if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) {
n->replace_by(regnd[nreg]);
j -= yank_if_dead(n,b,&value,&regnd);
j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
} else {
// Update the mapping: record new Node defined by the register
regnd.map(nreg,n);
......@@ -546,10 +556,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
// Node after skipping all copies.
value.map(nreg,val);
}
} else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) {
} else if( !may_be_copy_of_callee(n) ) {
assert( n->is_Copy(), "" );
n->replace_by(regnd[nreg]);
j -= yank_if_dead(n,b,&value,&regnd);
j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
}
} else {
// If the value occupies a register pair, record same info
......@@ -565,18 +574,16 @@ void PhaseChaitin::post_allocate_copy_removal() {
}
if( value[nreg] != val || value[nreg_lo] != val ) {
if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) {
n->replace_by(regnd[nreg]);
j -= yank_if_dead(n,b,&value,&regnd);
j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
} else {
regnd.map(nreg , n );
regnd.map(nreg_lo, n );
value.map(nreg ,val);
value.map(nreg_lo,val);
}
} else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) {
} else if( !may_be_copy_of_callee(n) ) {
assert( n->is_Copy(), "" );
n->replace_by(regnd[nreg]);
j -= yank_if_dead(n,b,&value,&regnd);
j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
}
}
......
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -402,7 +402,7 @@ void JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nmethod *nm,
address scopes_data = nm->scopes_data_begin();
for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
ScopeDesc sc0(nm, pcd->scope_decode_offset());
ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute());
ScopeDesc *sd = &sc0;
while( !sd->is_top() ) { sd = sd->sender(); }
int bci = sd->bci();
......
......@@ -1233,10 +1233,8 @@ void Arguments::set_ergonomics_flags() {
// Check that UseCompressedOops can be set with the max heap size allocated
// by ergonomics.
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
// Turn off until bug is fixed.
// the following line to return it to default status.
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
FLAG_SET_ERGO(bool, UseCompressedOops, true);
}
#ifdef _WIN64
if (UseLargePages && UseCompressedOops) {
......
......@@ -402,12 +402,7 @@ inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
DebugInfoReadStream buffer(nm(), decode_offset);
_sender_decode_offset = buffer.read_int();
_method = methodOop(buffer.read_oop());
// Deoptimization needs reexecute bit to determine whether to reexecute the bytecode
// only at the time when it "unpack_frames", and the reexecute bit info could always
// be obtained from the scopeDesc in the compiledVFrame. As a result, we don't keep
// the reexecute bit here.
bool dummy_reexecute;
_bci = buffer.read_bci_and_reexecute(dummy_reexecute);
_bci = buffer.read_bci();
assert(_method->is_method(), "checking type of decoded method");
}
......
......@@ -593,6 +593,7 @@ static inline uint64_t cast_uint64_t(size_t x)
\
nonstatic_field(PcDesc, _pc_offset, int) \
nonstatic_field(PcDesc, _scope_decode_offset, int) \
nonstatic_field(PcDesc, _flags, PcDesc::PcDescFlags) \
\
/***************************************************/ \
/* CodeBlobs (NOTE: incomplete, but only a little) */ \
......@@ -1158,6 +1159,7 @@ static inline uint64_t cast_uint64_t(size_t x)
/***************************************/ \
\
declare_toplevel_type(PcDesc) \
declare_integer_type(PcDesc::PcDescFlags) \
\
/************************/ \
/* OopMap and OopMapSet */ \
......
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6795465
* @summary Crash in assembler_sparc.cpp with client compiler on solaris-sparc
*
* @run main Test6795465
*/
public class Test6795465 {
static long var_1 = -1;
void test() {
long var_2 = var_1 * 1;
var_2 = var_2 + (new byte[1])[0];
}
public static void main(String[] args) {
Test6795465 t = new Test6795465();
for (int i = 0; i < 200000; i++) {
t.test();
}
}
}
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/**
* @test
* @bug 6866651
* @summary delay dead node elimination in set_req_X to prevent killing the current node when it is in use
*
* @run main Test
*/
public class Test {
static int sum() {
int s = 0;
for (int x = 1, y = 0; x != 0; x++, y--) {
s ^= y;
}
return s;
}
public static void main(final String[] args) {
for (int k = 0; k < 2; k++) {
System.err.println(String.valueOf(sum()));
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册