提交 d1c2887d 编写于 作者: I iveresov

Merge

...@@ -365,7 +365,7 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp ...@@ -365,7 +365,7 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
return entry; return entry;
} }
address CppInterpreter::return_entry(TosState state, int length) { address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
// make it look good in the debugger // make it look good in the debugger
return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset; return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
} }
......
...@@ -153,13 +153,9 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { ...@@ -153,13 +153,9 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
} }
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
TosState incoming_state = state;
Label cont;
address compiled_entry = __ pc();
address entry = __ pc(); address entry = __ pc();
#if !defined(_LP64) && defined(COMPILER2) #if !defined(_LP64) && defined(COMPILER2)
// All return values are where we want them, except for Longs. C2 returns // All return values are where we want them, except for Longs. C2 returns
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
...@@ -170,14 +166,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -170,14 +166,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing. // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
if (incoming_state == ltos) { if (state == ltos) {
__ srl (G1, 0, O1); __ srl (G1, 0, O1);
__ srlx(G1, 32, O0); __ srlx(G1, 32, O0);
} }
#endif // !_LP64 && COMPILER2 #endif // !_LP64 && COMPILER2
__ bind(cont);
// The callee returns with the stack possibly adjusted by adapter transition // The callee returns with the stack possibly adjusted by adapter transition
// We remove that possible adjustment here. // We remove that possible adjustment here.
// All interpreter local registers are untouched. Any result is passed back // All interpreter local registers are untouched. Any result is passed back
...@@ -186,29 +180,18 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -186,29 +180,18 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ mov(Llast_SP, SP); // Remove any adapter added stack space. __ mov(Llast_SP, SP); // Remove any adapter added stack space.
Label L_got_cache, L_giant_index;
const Register cache = G3_scratch; const Register cache = G3_scratch;
const Register size = G1_scratch; const Register index = G1_scratch;
if (EnableInvokeDynamic) { __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
__ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index); const Register flags = cache;
} __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1); const Register parameter_size = flags;
__ bind(L_got_cache); __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
__ ld_ptr(cache, ConstantPoolCache::base_offset() + __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
ConstantPoolCacheEntry::flags_offset(), size); __ add(Lesp, parameter_size, Lesp); // pop arguments
__ and3(size, 0xFF, size); // argument size in words
__ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
__ add(Lesp, size, Lesp); // pop arguments
__ dispatch_next(state, step); __ dispatch_next(state, step);
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
__ ba_short(L_got_cache);
}
return entry; return entry;
} }
......
...@@ -2932,9 +2932,7 @@ void TemplateTable::prepare_invoke(int byte_no, ...@@ -2932,9 +2932,7 @@ void TemplateTable::prepare_invoke(int byte_no,
ConstantPoolCacheEntry::verify_tos_state_shift(); ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address // load return address
{ {
const address table_addr = (is_invokeinterface || is_invokedynamic) ? const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
(address)Interpreter::return_5_addrs_by_index_table() :
(address)Interpreter::return_3_addrs_by_index_table();
AddressLiteral table(table_addr); AddressLiteral table(table_addr);
__ set(table, temp); __ set(table, temp);
__ sll(ra, LogBytesPerWord, ra); __ sll(ra, LogBytesPerWord, ra);
...@@ -2984,7 +2982,7 @@ void TemplateTable::invokevirtual(int byte_no) { ...@@ -2984,7 +2982,7 @@ void TemplateTable::invokevirtual(int byte_no) {
__ verify_oop(O0_recv); __ verify_oop(O0_recv);
// get return address // get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::invoke_return_entry_table());
__ set(table, Rtemp); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
// Make sure we don't need to mask Rret after the above shift // Make sure we don't need to mask Rret after the above shift
...@@ -3026,7 +3024,7 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { ...@@ -3026,7 +3024,7 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ profile_final_call(O4); __ profile_final_call(O4);
// get return address // get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::invoke_return_entry_table());
__ set(table, Rtemp); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
// Make sure we don't need to mask Rret after the above shift // Make sure we don't need to mask Rret after the above shift
......
...@@ -1468,19 +1468,18 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { ...@@ -1468,19 +1468,18 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
addr = new LIR_Address(src.result(), offset, type); addr = new LIR_Address(src.result(), offset, type);
} }
if (data != dst) { // Because we want a 2-arg form of xchg and xadd
__ move(data, dst); __ move(data, dst);
data = dst;
}
if (x->is_add()) { if (x->is_add()) {
__ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
} else { } else {
if (is_obj) { if (is_obj) {
// Do the pre-write barrier, if any. // Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL); true /* do_load */, false /* patch */, NULL);
} }
__ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
if (is_obj) { if (is_obj) {
// Seems to be a precise address // Seems to be a precise address
post_barrier(LIR_OprFact::address(addr), data); post_barrier(LIR_OprFact::address(addr), data);
......
...@@ -367,7 +367,7 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp ...@@ -367,7 +367,7 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
return entry; return entry;
} }
address CppInterpreter::return_entry(TosState state, int length) { address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
// make it look good in the debugger // make it look good in the debugger
return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation); return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
} }
......
...@@ -150,13 +150,12 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) ...@@ -150,13 +150,12 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
} }
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
TosState incoming_state = state;
address entry = __ pc(); address entry = __ pc();
#ifdef COMPILER2 #ifdef COMPILER2
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
for (int i = 1; i < 8; i++) { for (int i = 1; i < 8; i++) {
__ ffree(i); __ ffree(i);
} }
...@@ -164,7 +163,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -164,7 +163,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ empty_FPU_stack(); __ empty_FPU_stack();
} }
#endif #endif
if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
__ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
} else { } else {
__ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
...@@ -172,12 +171,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -172,12 +171,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// In SSE mode, interpreter returns FP results in xmm0 but they need // In SSE mode, interpreter returns FP results in xmm0 but they need
// to end up back on the FPU so it can operate on them. // to end up back on the FPU so it can operate on them.
if (incoming_state == ftos && UseSSE >= 1) { if (state == ftos && UseSSE >= 1) {
__ subptr(rsp, wordSize); __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0); __ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0)); __ fld_s(Address(rsp, 0));
__ addptr(rsp, wordSize); __ addptr(rsp, wordSize);
} else if (incoming_state == dtos && UseSSE >= 2) { } else if (state == dtos && UseSSE >= 2) {
__ subptr(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0); __ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0)); __ fld_d(Address(rsp, 0));
...@@ -194,32 +193,21 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -194,32 +193,21 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
if (incoming_state == atos) { if (state == atos) {
Register mdp = rbx; Register mdp = rbx;
Register tmp = rcx; Register tmp = rcx;
__ profile_return_type(mdp, rax, tmp); __ profile_return_type(mdp, rax, tmp);
} }
Label L_got_cache, L_giant_index; const Register cache = rbx;
if (EnableInvokeDynamic) { const Register index = rcx;
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr, ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
__ andptr(rbx, 0xFF);
__ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
__ dispatch_next(state, step);
// out of the main line of code... const Register flags = cache;
if (EnableInvokeDynamic) { __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ bind(L_giant_index); __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
__ jmp(L_got_cache); __ dispatch_next(state, step);
}
return entry; return entry;
} }
......
...@@ -166,7 +166,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) ...@@ -166,7 +166,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
} }
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
// Restore stack bottom in case i2c adjusted stack // Restore stack bottom in case i2c adjusted stack
...@@ -183,27 +183,15 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -183,27 +183,15 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ profile_return_type(mdp, rax, tmp); __ profile_return_type(mdp, rax, tmp);
} }
Label L_got_cache, L_giant_index; const Register cache = rbx;
if (EnableInvokeDynamic) { const Register index = rcx;
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr,
in_bytes(ConstantPoolCache::base_offset()) +
3 * wordSize));
__ andl(rbx, 0xFF);
__ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step);
// out of the main line of code... const Register flags = cache;
if (EnableInvokeDynamic) { __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ bind(L_giant_index); __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
__ jmp(L_got_cache); __ dispatch_next(state, step);
}
return entry; return entry;
} }
......
...@@ -2925,9 +2925,7 @@ void TemplateTable::prepare_invoke(int byte_no, ...@@ -2925,9 +2925,7 @@ void TemplateTable::prepare_invoke(int byte_no,
ConstantPoolCacheEntry::verify_tos_state_shift(); ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address // load return address
{ {
const address table_addr = (is_invokeinterface || is_invokedynamic) ? const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
(address)Interpreter::return_5_addrs_by_index_table() :
(address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr); ExternalAddress table(table_addr);
__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
} }
......
...@@ -2980,9 +2980,7 @@ void TemplateTable::prepare_invoke(int byte_no, ...@@ -2980,9 +2980,7 @@ void TemplateTable::prepare_invoke(int byte_no,
ConstantPoolCacheEntry::verify_tos_state_shift(); ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address // load return address
{ {
const address table_addr = (is_invokeinterface || is_invokedynamic) ? const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
(address)Interpreter::return_5_addrs_by_index_table() :
(address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr); ExternalAddress table(table_addr);
__ lea(rscratch1, table); __ lea(rscratch1, table);
__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
......
...@@ -1006,7 +1006,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState istate, ...@@ -1006,7 +1006,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
istate->set_stack_limit(stack_base - method->max_stack() - 1); istate->set_stack_limit(stack_base - method->max_stack() - 1);
} }
address CppInterpreter::return_entry(TosState state, int length) { address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
ShouldNotCallThis(); ShouldNotCallThis();
return NULL; return NULL;
} }
......
...@@ -57,6 +57,8 @@ define_pd_global(bool, UseMembar, true); ...@@ -57,6 +57,8 @@ define_pd_global(bool, UseMembar, true);
// GC Ergo Flags // GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 0);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
...@@ -1873,7 +1873,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1873,7 +1873,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// number of implementors for decl_interface is 0 or 1. If // number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's // it's 0 then no class implements decl_interface and there's
// no point in inlining. // no point in inlining.
if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) { if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) {
singleton = NULL; singleton = NULL;
} }
} }
......
...@@ -57,6 +57,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) : ...@@ -57,6 +57,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
_init_state = ik->init_state(); _init_state = ik->init_state();
_nonstatic_field_size = ik->nonstatic_field_size(); _nonstatic_field_size = ik->nonstatic_field_size();
_has_nonstatic_fields = ik->has_nonstatic_fields(); _has_nonstatic_fields = ik->has_nonstatic_fields();
_has_default_methods = ik->has_default_methods();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
_implementor = NULL; // we will fill these lazily _implementor = NULL; // we will fill these lazily
......
...@@ -52,6 +52,7 @@ private: ...@@ -52,6 +52,7 @@ private:
bool _has_finalizer; bool _has_finalizer;
bool _has_subklass; bool _has_subklass;
bool _has_nonstatic_fields; bool _has_nonstatic_fields;
bool _has_default_methods;
ciFlags _flags; ciFlags _flags;
jint _nonstatic_field_size; jint _nonstatic_field_size;
...@@ -171,6 +172,11 @@ public: ...@@ -171,6 +172,11 @@ public:
} }
} }
bool has_default_methods() {
assert(is_loaded(), "must be loaded");
return _has_default_methods;
}
ciInstanceKlass* get_canonical_holder(int offset); ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static); ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
......
...@@ -780,6 +780,10 @@ CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) ...@@ -780,6 +780,10 @@ CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS)
void CompileBroker::compilation_init() { void CompileBroker::compilation_init() {
_last_method_compiled[0] = '\0'; _last_method_compiled[0] = '\0';
// No need to initialize compilation system if we do not use it.
if (!UseCompiler) {
return;
}
#ifndef SHARK #ifndef SHARK
// Set the interface to the current compiler(s). // Set the interface to the current compiler(s).
int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
......
...@@ -158,8 +158,8 @@ class AbstractInterpreter: AllStatic { ...@@ -158,8 +158,8 @@ class AbstractInterpreter: AllStatic {
// Runtime support // Runtime support
// length = invoke bytecode length (to advance to next bytecode) // length = invoke bytecode length (to advance to next bytecode)
static address deopt_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } static address deopt_entry(TosState state, int length) { ShouldNotReachHere(); return NULL; }
static address return_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } static address return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotReachHere(); return NULL; }
static address rethrow_exception_entry() { return _rethrow_exception_entry; } static address rethrow_exception_entry() { return _rethrow_exception_entry; }
......
...@@ -78,7 +78,7 @@ class CppInterpreter: public AbstractInterpreter { ...@@ -78,7 +78,7 @@ class CppInterpreter: public AbstractInterpreter {
static address stack_result_to_stack(int index) { return _stack_to_stack[index]; } static address stack_result_to_stack(int index) { return _stack_to_stack[index]; }
static address stack_result_to_native(int index) { return _stack_to_native_abi[index]; } static address stack_result_to_native(int index) { return _stack_to_native_abi[index]; }
static address return_entry (TosState state, int length); static address return_entry (TosState state, int length, Bytecodes::Code code);
static address deopt_entry (TosState state, int length); static address deopt_entry (TosState state, int length);
#ifdef TARGET_ARCH_x86 #ifdef TARGET_ARCH_x86
......
...@@ -329,15 +329,21 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) { ...@@ -329,15 +329,21 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) {
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Deoptimization support // Deoptimization support
// If deoptimization happens, this function returns the point of next bytecode to continue execution /**
* If a deoptimization happens, this function returns the point of next bytecode to continue execution.
*/
address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
assert(method->contains(bcp), "just checkin'"); assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
// Get the original and rewritten bytecode.
Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
int bci = method->bci_from(bcp);
int length = -1; // initial value for debugging const int bci = method->bci_from(bcp);
// compute continuation length // compute continuation length
length = Bytecodes::length_at(method, bcp); const int length = Bytecodes::length_at(method, bcp);
// compute result type // compute result type
BasicType type = T_ILLEGAL; BasicType type = T_ILLEGAL;
...@@ -393,7 +399,7 @@ address AbstractInterpreter::deopt_continue_after_entry(Method* method, address ...@@ -393,7 +399,7 @@ address AbstractInterpreter::deopt_continue_after_entry(Method* method, address
return return
is_top_frame is_top_frame
? Interpreter::deopt_entry (as_TosState(type), length) ? Interpreter::deopt_entry (as_TosState(type), length)
: Interpreter::return_entry(as_TosState(type), length); : Interpreter::return_entry(as_TosState(type), length, code);
} }
// If deoptimization happens, this function returns the point where the interpreter reexecutes // If deoptimization happens, this function returns the point where the interpreter reexecutes
......
...@@ -184,8 +184,9 @@ EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deo ...@@ -184,8 +184,9 @@ EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deo
EntryPoint TemplateInterpreter::_continuation_entry; EntryPoint TemplateInterpreter::_continuation_entry;
EntryPoint TemplateInterpreter::_safept_entry; EntryPoint TemplateInterpreter::_safept_entry;
address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs];
address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs];
address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs];
DispatchTable TemplateInterpreter::_active_table; DispatchTable TemplateInterpreter::_active_table;
DispatchTable TemplateInterpreter::_normal_table; DispatchTable TemplateInterpreter::_normal_table;
...@@ -237,22 +238,37 @@ void TemplateInterpreterGenerator::generate_all() { ...@@ -237,22 +238,37 @@ void TemplateInterpreterGenerator::generate_all() {
#endif // !PRODUCT #endif // !PRODUCT
{ CodeletMark cm(_masm, "return entry points"); { CodeletMark cm(_masm, "return entry points");
const int index_size = sizeof(u2);
for (int i = 0; i < Interpreter::number_of_return_entries; i++) { for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
Interpreter::_return_entry[i] = Interpreter::_return_entry[i] =
EntryPoint( EntryPoint(
generate_return_entry_for(itos, i), generate_return_entry_for(itos, i, index_size),
generate_return_entry_for(itos, i), generate_return_entry_for(itos, i, index_size),
generate_return_entry_for(itos, i), generate_return_entry_for(itos, i, index_size),
generate_return_entry_for(atos, i), generate_return_entry_for(atos, i, index_size),
generate_return_entry_for(itos, i), generate_return_entry_for(itos, i, index_size),
generate_return_entry_for(ltos, i), generate_return_entry_for(ltos, i, index_size),
generate_return_entry_for(ftos, i), generate_return_entry_for(ftos, i, index_size),
generate_return_entry_for(dtos, i), generate_return_entry_for(dtos, i, index_size),
generate_return_entry_for(vtos, i) generate_return_entry_for(vtos, i, index_size)
); );
} }
} }
{ CodeletMark cm(_masm, "invoke return entry points");
const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos};
const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
TosState state = states[i];
Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
}
}
{ CodeletMark cm(_masm, "earlyret entry points"); { CodeletMark cm(_masm, "earlyret entry points");
Interpreter::_earlyret_entry = Interpreter::_earlyret_entry =
EntryPoint( EntryPoint(
...@@ -298,13 +314,6 @@ void TemplateInterpreterGenerator::generate_all() { ...@@ -298,13 +314,6 @@ void TemplateInterpreterGenerator::generate_all() {
} }
} }
for (int j = 0; j < number_of_states; j++) {
const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
int index = Interpreter::TosState_as_index(states[j]);
Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
}
{ CodeletMark cm(_masm, "continuation entry points"); { CodeletMark cm(_masm, "continuation entry points");
Interpreter::_continuation_entry = Interpreter::_continuation_entry =
EntryPoint( EntryPoint(
...@@ -534,9 +543,46 @@ void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState t ...@@ -534,9 +543,46 @@ void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState t
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Entry points // Entry points
address TemplateInterpreter::return_entry(TosState state, int length) { /**
* Returns the return entry table for the given invoke bytecode.
*/
address* TemplateInterpreter::invoke_return_entry_table_for(Bytecodes::Code code) {
switch (code) {
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokehandle:
return Interpreter::invoke_return_entry_table();
case Bytecodes::_invokeinterface:
return Interpreter::invokeinterface_return_entry_table();
case Bytecodes::_invokedynamic:
return Interpreter::invokedynamic_return_entry_table();
default:
fatal(err_msg("invalid bytecode: %s", Bytecodes::name(code)));
return NULL;
}
}
/**
* Returns the return entry address for the given top-of-stack state and bytecode.
*/
address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length"); guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
return _return_entry[length].entry(state); const int index = TosState_as_index(state);
switch (code) {
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokehandle:
return _invoke_return_entry[index];
case Bytecodes::_invokeinterface:
return _invokeinterface_return_entry[index];
case Bytecodes::_invokedynamic:
return _invokedynamic_return_entry[index];
default:
assert(!Bytecodes::is_invoke(code), err_msg("invoke instructions should be handled separately: %s", Bytecodes::name(code)));
return _return_entry[length].entry(state);
}
} }
......
...@@ -120,8 +120,9 @@ class TemplateInterpreter: public AbstractInterpreter { ...@@ -120,8 +120,9 @@ class TemplateInterpreter: public AbstractInterpreter {
static EntryPoint _continuation_entry; static EntryPoint _continuation_entry;
static EntryPoint _safept_entry; static EntryPoint _safept_entry;
static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries static address _invoke_return_entry[number_of_return_addrs]; // for invokestatic, invokespecial, invokevirtual return entries
static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries static address _invokeinterface_return_entry[number_of_return_addrs]; // for invokeinterface return entries
static address _invokedynamic_return_entry[number_of_return_addrs]; // for invokedynamic return entries
static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch) static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch)
static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode) static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode)
...@@ -161,12 +162,15 @@ class TemplateInterpreter: public AbstractInterpreter { ...@@ -161,12 +162,15 @@ class TemplateInterpreter: public AbstractInterpreter {
static address* normal_table() { return _normal_table.table_for(); } static address* normal_table() { return _normal_table.table_for(); }
// Support for invokes // Support for invokes
static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; } static address* invoke_return_entry_table() { return _invoke_return_entry; }
static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; } static address* invokeinterface_return_entry_table() { return _invokeinterface_return_entry; }
static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table static address* invokedynamic_return_entry_table() { return _invokedynamic_return_entry; }
static int TosState_as_index(TosState state);
static address return_entry (TosState state, int length); static address* invoke_return_entry_table_for(Bytecodes::Code code);
static address deopt_entry (TosState state, int length);
static address deopt_entry(TosState state, int length);
static address return_entry(TosState state, int length, Bytecodes::Code code);
// Safepoint support // Safepoint support
static void notice_safepoints(); // stops the thread when reaching a safepoint static void notice_safepoints(); // stops the thread when reaching a safepoint
......
...@@ -53,7 +53,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator { ...@@ -53,7 +53,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
address generate_ClassCastException_handler(); address generate_ClassCastException_handler();
address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_ArrayIndexOutOfBounds_handler(const char* name);
address generate_continuation_for(TosState state); address generate_continuation_for(TosState state);
address generate_return_entry_for(TosState state, int step); address generate_return_entry_for(TosState state, int step, size_t index_size);
address generate_earlyret_entry_for(TosState state); address generate_earlyret_entry_for(TosState state);
address generate_deopt_entry_for(TosState state, int step); address generate_deopt_entry_for(TosState state, int step);
address generate_safept_entry_for(TosState state, address runtime_entry); address generate_safept_entry_for(TosState state, address runtime_entry);
......
...@@ -1515,7 +1515,10 @@ Bytecodes::Code Method::orig_bytecode_at(int bci) const { ...@@ -1515,7 +1515,10 @@ Bytecodes::Code Method::orig_bytecode_at(int bci) const {
return bp->orig_bytecode(); return bp->orig_bytecode();
} }
} }
ShouldNotReachHere(); {
ResourceMark rm;
fatal(err_msg("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci));
}
return Bytecodes::_shouldnotreachhere; return Bytecodes::_shouldnotreachhere;
} }
......
...@@ -2006,9 +2006,9 @@ bool LibraryCallKit::inline_math_addExactI(bool is_increment) { ...@@ -2006,9 +2006,9 @@ bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
Node* arg2 = NULL; Node* arg2 = NULL;
if (is_increment) { if (is_increment) {
arg2 = intcon(1); arg2 = intcon(1);
} else { } else {
arg2 = argument(1); arg2 = argument(1);
} }
Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
...@@ -2056,7 +2056,7 @@ bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) { ...@@ -2056,7 +2056,7 @@ bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
if (is_decrement) { if (is_decrement) {
arg2 = longcon(1); arg2 = longcon(1);
} else { } else {
Node* arg2 = argument(2); // type long arg2 = argument(2); // type long
// argument(3) == TOP // argument(3) == TOP
} }
......
...@@ -713,6 +713,10 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { ...@@ -713,6 +713,10 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
case Op_ModL: body_size += 30; break; case Op_ModL: body_size += 30; break;
case Op_DivL: body_size += 30; break; case Op_DivL: body_size += 30; break;
case Op_MulL: body_size += 10; break; case Op_MulL: body_size += 10; break;
case Op_FlagsProj:
// Can't handle unrolling of loops containing
// nodes that generate a FlagsProj at the moment
return false;
case Op_StrComp: case Op_StrComp:
case Op_StrEquals: case Op_StrEquals:
case Op_StrIndexOf: case Op_StrIndexOf:
......
...@@ -97,7 +97,8 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_ ...@@ -97,7 +97,8 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_
static bool expected_yanked_node(Node *old, Node *orig_old) { static bool expected_yanked_node(Node *old, Node *orig_old) {
// This code is expected only next original nodes: // This code is expected only next original nodes:
// - load from constant table node which may have next data input nodes: // - load from constant table node which may have next data input nodes:
// MachConstantBase, Phi, MachTemp, MachSpillCopy // MachConstantBase, MachTemp, MachSpillCopy
// - Phi nodes that are considered Junk
// - load constant node which may have next data input nodes: // - load constant node which may have next data input nodes:
// MachTemp, MachSpillCopy // MachTemp, MachSpillCopy
// - MachSpillCopy // - MachSpillCopy
...@@ -112,7 +113,9 @@ static bool expected_yanked_node(Node *old, Node *orig_old) { ...@@ -112,7 +113,9 @@ static bool expected_yanked_node(Node *old, Node *orig_old) {
return (old == orig_old); return (old == orig_old);
} else if (old->is_MachTemp()) { } else if (old->is_MachTemp()) {
return orig_old->is_Con(); return orig_old->is_Con();
} else if (old->is_Phi() || old->is_MachConstantBase()) { } else if (old->is_Phi()) { // Junk phi's
return true;
} else if (old->is_MachConstantBase()) {
return (orig_old->is_Con() && orig_old->is_MachConstant()); return (orig_old->is_Con() && orig_old->is_MachConstant());
} }
return false; return false;
...@@ -522,11 +525,9 @@ void PhaseChaitin::post_allocate_copy_removal() { ...@@ -522,11 +525,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
} }
if (u != NodeSentinel) { // Junk Phi. Remove if (u != NodeSentinel) { // Junk Phi. Remove
block->remove_node(j--);
phi_dex--;
_cfg.unmap_node_from_block(phi);
phi->replace_by(u); phi->replace_by(u);
phi->disconnect_inputs(NULL, C); j -= yank_if_dead(phi, block, &value, &regnd);
phi_dex--;
continue; continue;
} }
// Note that if value[pidx] exists, then we merged no new values here // Note that if value[pidx] exists, then we merged no new values here
......
...@@ -2787,13 +2787,11 @@ intptr_t TypeOopPtr::get_con() const { ...@@ -2787,13 +2787,11 @@ intptr_t TypeOopPtr::get_con() const {
//-----------------------------filter------------------------------------------ //-----------------------------filter------------------------------------------
// Do not allow interface-vs.-noninterface joins to collapse to top. // Do not allow interface-vs.-noninterface joins to collapse to top.
const Type *TypeOopPtr::filter( const Type *kills ) const { const Type *TypeOopPtr::filter(const Type *kills) const {
const Type* ft = join(kills); const Type* ft = join(kills);
const TypeInstPtr* ftip = ft->isa_instptr(); const TypeInstPtr* ftip = ft->isa_instptr();
const TypeInstPtr* ktip = kills->isa_instptr(); const TypeInstPtr* ktip = kills->isa_instptr();
const TypeKlassPtr* ftkp = ft->isa_klassptr();
const TypeKlassPtr* ktkp = kills->isa_klassptr();
if (ft->empty()) { if (ft->empty()) {
// Check for evil case of 'this' being a class and 'kills' expecting an // Check for evil case of 'this' being a class and 'kills' expecting an
...@@ -2807,8 +2805,6 @@ const Type *TypeOopPtr::filter( const Type *kills ) const { ...@@ -2807,8 +2805,6 @@ const Type *TypeOopPtr::filter( const Type *kills ) const {
// uplift the type. // uplift the type.
if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface()) if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface())
return kills; // Uplift to interface return kills; // Uplift to interface
if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
return kills; // Uplift to interface
return Type::TOP; // Canonical empty value return Type::TOP; // Canonical empty value
} }
...@@ -2825,14 +2821,6 @@ const Type *TypeOopPtr::filter( const Type *kills ) const { ...@@ -2825,14 +2821,6 @@ const Type *TypeOopPtr::filter( const Type *kills ) const {
assert(!ftip->klass_is_exact(), "interface could not be exact"); assert(!ftip->klass_is_exact(), "interface could not be exact");
return ktip->cast_to_ptr_type(ftip->ptr()); return ktip->cast_to_ptr_type(ftip->ptr());
} }
// Interface klass type could be exact in opposite to interface type,
// return it here instead of incorrect Constant ptr J/L/Object (6894807).
if (ftkp != NULL && ktkp != NULL &&
ftkp->is_loaded() && ftkp->klass()->is_interface() &&
!ftkp->klass_is_exact() && // Keep exact interface klass
ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
return ktkp->cast_to_ptr_type(ftkp->ptr());
}
return ft; return ft;
} }
...@@ -4385,6 +4373,33 @@ bool TypeKlassPtr::singleton(void) const { ...@@ -4385,6 +4373,33 @@ bool TypeKlassPtr::singleton(void) const {
return (_offset == 0) && !below_centerline(_ptr); return (_offset == 0) && !below_centerline(_ptr);
} }
// Do not allow interface-vs.-noninterface joins to collapse to top.
const Type *TypeKlassPtr::filter(const Type *kills) const {
// logic here mirrors the one from TypeOopPtr::filter. See comments
// there.
const Type* ft = join(kills);
const TypeKlassPtr* ftkp = ft->isa_klassptr();
const TypeKlassPtr* ktkp = kills->isa_klassptr();
if (ft->empty()) {
if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
return kills; // Uplift to interface
return Type::TOP; // Canonical empty value
}
// Interface klass type could be exact in opposite to interface type,
// return it here instead of incorrect Constant ptr J/L/Object (6894807).
if (ftkp != NULL && ktkp != NULL &&
ftkp->is_loaded() && ftkp->klass()->is_interface() &&
!ftkp->klass_is_exact() && // Keep exact interface klass
ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
return ktkp->cast_to_ptr_type(ftkp->ptr());
}
return ft;
}
//----------------------compute_klass------------------------------------------ //----------------------compute_klass------------------------------------------
// Compute the defining klass for this class // Compute the defining klass for this class
ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const { ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
......
...@@ -63,7 +63,7 @@ class TypeRawPtr; ...@@ -63,7 +63,7 @@ class TypeRawPtr;
class TypeOopPtr; class TypeOopPtr;
class TypeInstPtr; class TypeInstPtr;
class TypeAryPtr; class TypeAryPtr;
class TypeKlassPtr; class TypeKlassPtr;
class TypeMetadataPtr; class TypeMetadataPtr;
//------------------------------Type------------------------------------------- //------------------------------Type-------------------------------------------
...@@ -1202,6 +1202,9 @@ public: ...@@ -1202,6 +1202,9 @@ public:
virtual intptr_t get_con() const; virtual intptr_t get_con() const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types. // Convenience common pre-built types.
static const TypeKlassPtr* OBJECT; // Not-null object klass or below static const TypeKlassPtr* OBJECT; // Not-null object klass or below
static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
oop* HandleArea::allocate_handle(oop obj) { oop* HandleArea::allocate_handle(oop obj) {
assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark"); assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark"); assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
assert(obj->is_oop(), "sanity check"); assert(obj->is_oop(), err_msg("not an oop: " INTPTR_FORMAT, (intptr_t*) obj));
return real_allocate_handle(obj); return real_allocate_handle(obj);
} }
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8026735
* @summary CHA in C1 should make correct decisions about default methods
* @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod::test -XX:TieredStopAtLevel=1 InlineDefaultMethod
*/
interface InterfaceWithDefaultMethod0 {
default public int defaultMethod() {
return 1;
}
}
interface InterfaceWithDefaultMethod1 extends InterfaceWithDefaultMethod0 { }
abstract class Subtype implements InterfaceWithDefaultMethod1 { }
class Decoy extends Subtype {
public int defaultMethod() {
return 2;
}
}
class Instance extends Subtype { }
public class InlineDefaultMethod {
public static int test(InterfaceWithDefaultMethod1 x) {
return x.defaultMethod();
}
public static void main(String[] args) {
InterfaceWithDefaultMethod1 a = new Decoy();
InterfaceWithDefaultMethod1 b = new Instance();
if (test(a) != 2 ||
test(b) != 1) {
System.err.println("FAILED");
System.exit(97);
}
System.err.println("PASSED");
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8027444
* @summary Test nested loops
* @compile NestedMathExactTest.java
* @run main NestedMathExactTest
*
*/
public class NestedMathExactTest {
public static final int LIMIT = 100;
public static int[] result = new int[LIMIT];
public static int value = 17;
public static void main(String[] args) {
for (int i = 0; i < 100; ++i) {
result[i] = runTest();
}
}
public static int runTest() {
int sum = 0;
for (int j = 0; j < 100000; j = Math.addExact(j, 1)) {
sum = 1;
for (int i = 0; i < 5; ++i) {
sum *= value;
}
}
return sum;
}
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* /*
* @test * @test
* @bug 8026844 * @bug 8026844
* @bug 8027353
* @summary Test constant subtractExact * @summary Test constant subtractExact
* @compile SubExactLConstantTest.java Verify.java * @compile SubExactLConstantTest.java Verify.java
* @run main SubExactLConstantTest * @run main SubExactLConstantTest
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* /*
* @test * @test
* @bug 8026844 * @bug 8026844
* @bug 8027353
* @summary Test non constant subtractExact * @summary Test non constant subtractExact
* @compile SubExactLNonConstantTest.java Verify.java * @compile SubExactLNonConstantTest.java Verify.java
* @run main SubExactLNonConstantTest * @run main SubExactLNonConstantTest
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8026949
* @summary Test ensures correct VM output during startup
* @library ../../testlibrary
*
*/
import com.oracle.java.testlibrary.*;
public class StartupOutput {
public static void main(String[] args) throws Exception {
ProcessBuilder pb;
OutputAnalyzer out;
pb = ProcessTools.createJavaProcessBuilder("-Xint", "-XX:+DisplayVMOutputToStdout", "-version");
out = new OutputAnalyzer(pb.start());
out.shouldNotContain("no space to run compilers");
out.shouldHaveExitValue(0);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册