提交 1d037d1c 编写于 作者: A amurillo

Merge

...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=40 HS_MINOR_VER=40
HS_BUILD_NUMBER=22 HS_BUILD_NUMBER=23
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -545,6 +545,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe ...@@ -545,6 +545,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
cmplw(CCR0, Rindex, Rlength); cmplw(CCR0, Rindex, Rlength);
sldi(RsxtIndex, RsxtIndex, index_shift); sldi(RsxtIndex, RsxtIndex, index_shift);
blt(CCR0, LnotOOR); blt(CCR0, LnotOOR);
// Index should be in R17_tos, array should be in R4_ARG2.
mr(R17_tos, Rindex);
mr(R4_ARG2, Rarray);
load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
mtctr(Rtmp); mtctr(Rtmp);
bctr(); bctr();
...@@ -1679,6 +1682,228 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper( ...@@ -1679,6 +1682,228 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
} }
} }
// Argument and return type profilig.
// kills: tmp, tmp2, R0, CR0, CR1
void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
Label do_nothing, do_update;
// tmp2 = obj is allowed
assert_different_registers(obj, mdo_addr_base, tmp, R0);
assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
const Register klass = tmp2;
verify_oop(obj);
ld(tmp, mdo_addr_offs, mdo_addr_base);
// Set null_seen if obj is 0.
cmpdi(CCR0, obj, 0);
ori(R0, tmp, TypeEntries::null_seen);
beq(CCR0, do_update);
load_klass(klass, obj);
clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
cmpd(CCR1, R0, klass);
// Klass seen before, nothing to do (regardless of unknown bit).
//beq(CCR1, do_nothing);
andi_(R0, klass, TypeEntries::type_unknown);
// Already unknown. Nothing to do anymore.
//bne(CCR0, do_nothing);
crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
beq(CCR0, do_nothing);
clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
beq(CCR0, do_update); // First time here. Set profile type.
// Different than before. Cannot keep accurate profile.
ori(R0, tmp, TypeEntries::type_unknown);
bind(do_update);
// update profile
std(R0, mdo_addr_offs, mdo_addr_base);
align(32, 12);
bind(do_nothing);
}
void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
if (!ProfileInterpreter) {
return;
}
assert_different_registers(callee, tmp1, tmp2, R28_mdx);
if (MethodData::profile_arguments() || MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(profile_continue);
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
bne(CCR0, profile_continue);
if (MethodData::profile_arguments()) {
Label done;
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
add(R28_mdx, off_to_args, R28_mdx);
for (int i = 0; i < TypeProfileArgsLimit; i++) {
if (i > 0 || MethodData::profile_return()) {
// If return value type is profiled we may have no argument to profile.
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
blt(CCR0, done);
}
ld(tmp1, in_bytes(Method::const_offset()), callee);
lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
// Stack offset o (zero based) from the start of the argument
// list, for n arguments translates into offset n - o - 1 from
// the end of the argument list. But there's an extra slot at
// the top of the stack. So the offset is n - o from Lesp.
ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
subf(tmp1, tmp2, tmp1);
sldi(tmp1, tmp1, Interpreter::logStackElementSize);
ldx(tmp1, tmp1, R15_esp);
profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
addi(R28_mdx, R28_mdx, to_add);
off_to_args += to_add;
}
if (MethodData::profile_return()) {
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
}
bind(done);
if (MethodData::profile_return()) {
// We're right after the type profile for the last
// argument. tmp1 is the number of cells left in the
// CallTypeData/VirtualCallTypeData to reach its end. Non null
// if there's a return to profile.
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
add(R28_mdx, tmp1, R28_mdx);
}
} else {
assert(MethodData::profile_return(), "either profile call args or call ret");
update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
}
// Mdp points right after the end of the
// CallTypeData/VirtualCallTypeData, right after the cells for the
// return value type if there's one.
align(32, 12);
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
assert_different_registers(ret, tmp1, tmp2);
if (ProfileInterpreter && MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(profile_continue);
if (MethodData::profile_return_jsr292_only()) {
// If we don't profile all invoke bytecodes we must make sure
// it's a bytecode we indeed profile. We can't go back to the
// begining of the ProfileData we intend to update to check its
// type because we're right after it and we don't known its
// length.
lbz(tmp1, 0, R14_bcp);
lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
bne(CCR0, profile_continue);
}
profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
align(32, 12);
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
if (ProfileInterpreter && MethodData::profile_parameters()) {
Label profile_continue, done;
test_method_data_pointer(profile_continue);
// Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters.
lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
cmpwi(CCR0, tmp1, 0);
blt(CCR0, profile_continue);
// Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last
// parameters. Collect profiling from last parameter down.
// mdo start + parameters offset + array length - 1
// Pointer to the parameter area in the MDO.
const Register mdp = tmp1;
add(mdp, tmp1, R28_mdx);
// Pffset of the current profile entry to update.
const Register entry_offset = tmp2;
// entry_offset = array len in number of cells
ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
// entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field
addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
// entry_offset in bytes
sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
Label loop;
align(32, 12);
bind(loop);
// Load offset on the stack from the slot for this parameter.
ld(tmp3, entry_offset, mdp);
sldi(tmp3, tmp3, Interpreter::logStackElementSize);
neg(tmp3, tmp3);
// Read the parameter from the local area.
ldx(tmp3, tmp3, R18_locals);
// Make entry_offset now point to the type field for this parameter.
int type_base = in_bytes(ParametersTypeData::type_offset(0));
assert(type_base > off_base, "unexpected");
addi(entry_offset, entry_offset, type_base - off_base);
// Profile the parameter.
profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
// Go to next parameter.
int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
cmpdi(CCR0, entry_offset, off_base + delta);
addi(entry_offset, entry_offset, -delta);
bge(CCR0, loop);
align(32, 12);
bind(profile_continue);
}
}
// Add a InterpMonitorElem to stack (see frame_sparc.hpp). // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) { void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
...@@ -2040,20 +2265,19 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis ...@@ -2040,20 +2265,19 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
bne(CCR0, test); bne(CCR0, test);
address fd = CAST_FROM_FN_PTR(address, verify_return_address); address fd = CAST_FROM_FN_PTR(address, verify_return_address);
unsigned int nbytes_save = 10*8; // 10 volatile gprs const int nbytes_save = 11*8; // volatile gprs except R0
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
save_LR_CR(Rtmp); save_LR_CR(Rtmp); // Save in old frame.
push_frame_reg_args(nbytes_save, Rtmp); push_frame_reg_args(nbytes_save, Rtmp);
save_volatile_gprs(R1_SP, 112); // except R0
load_const_optimized(Rtmp, fd, R0); load_const_optimized(Rtmp, fd, R0);
mr_if_needed(R4_ARG2, reg); mr_if_needed(R4_ARG2, reg);
mr(R3_ARG1, R19_method); mr(R3_ARG1, R19_method);
call_c(Rtmp); // call C call_c(Rtmp); // call C
restore_volatile_gprs(R1_SP, 112); // except R0
pop_frame(); pop_frame();
restore_LR_CR(Rtmp); restore_LR_CR(Rtmp);
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
b(skip); b(skip);
// Perform a more elaborate out-of-line call. // Perform a more elaborate out-of-line call.
......
...@@ -255,6 +255,12 @@ class InterpreterMacroAssembler: public MacroAssembler { ...@@ -255,6 +255,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call); void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call); void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
// Argument and return type profiling.
void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
void profile_return_type(Register ret, Register tmp1, Register tmp2);
void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
#endif // !CC_INTERP #endif // !CC_INTERP
// Debugging // Debugging
......
...@@ -806,6 +806,7 @@ void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) { ...@@ -806,6 +806,7 @@ void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
// For verify_oops. // For verify_oops.
void MacroAssembler::save_volatile_gprs(Register dst, int offset) { void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
std(R2, offset, dst); offset += 8;
std(R3, offset, dst); offset += 8; std(R3, offset, dst); offset += 8;
std(R4, offset, dst); offset += 8; std(R4, offset, dst); offset += 8;
std(R5, offset, dst); offset += 8; std(R5, offset, dst); offset += 8;
...@@ -820,6 +821,7 @@ void MacroAssembler::save_volatile_gprs(Register dst, int offset) { ...@@ -820,6 +821,7 @@ void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
// For verify_oops. // For verify_oops.
void MacroAssembler::restore_volatile_gprs(Register src, int offset) { void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
ld(R2, offset, src); offset += 8;
ld(R3, offset, src); offset += 8; ld(R3, offset, src); offset += 8;
ld(R4, offset, src); offset += 8; ld(R4, offset, src); offset += 8;
ld(R5, offset, src); offset += 8; ld(R5, offset, src); offset += 8;
...@@ -1186,6 +1188,16 @@ void MacroAssembler::call_VM(Register oop_result, address entry_point, Register ...@@ -1186,6 +1188,16 @@ void MacroAssembler::call_VM(Register oop_result, address entry_point, Register
call_VM(oop_result, entry_point, check_exceptions); call_VM(oop_result, entry_point, check_exceptions);
} }
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
bool check_exceptions) {
// R3_ARG1 is reserved for the thread
mr_if_needed(R4_ARG2, arg_1);
assert(arg_2 != R4_ARG2, "smashed argument");
mr_if_needed(R5_ARG3, arg_2);
mr_if_needed(R6_ARG4, arg_3);
call_VM(oop_result, entry_point, check_exceptions);
}
void MacroAssembler::call_VM_leaf(address entry_point) { void MacroAssembler::call_VM_leaf(address entry_point) {
call_VM_leaf_base(entry_point); call_VM_leaf_base(entry_point);
} }
...@@ -3058,35 +3070,27 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) { ...@@ -3058,35 +3070,27 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
if (!VerifyOops) { if (!VerifyOops) {
return; return;
} }
// Will be preserved.
Register tmp = R11;
assert(oop != tmp, "precondition");
unsigned int nbytes_save = 10*8; // 10 volatile gprs
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
// save tmp const Register tmp = R11; // Will be preserved.
mr(R0, tmp); const int nbytes_save = 11*8; // Volatile gprs except R0.
// kill tmp save_volatile_gprs(R1_SP, -nbytes_save); // except R0
save_LR_CR(tmp);
if (oop == tmp) mr(R4_ARG2, oop);
save_LR_CR(tmp); // save in old frame
push_frame_reg_args(nbytes_save, tmp); push_frame_reg_args(nbytes_save, tmp);
// restore tmp
mr(tmp, R0);
save_volatile_gprs(R1_SP, 112); // except R0
// load FunctionDescriptor** / entry_address * // load FunctionDescriptor** / entry_address *
load_const(tmp, fd); load_const_optimized(tmp, fd, R0);
// load FunctionDescriptor* / entry_address // load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp); ld(tmp, 0, tmp);
mr(R4_ARG2, oop); if (oop != tmp) mr_if_needed(R4_ARG2, oop);
load_const(R3_ARG1, (address)msg); load_const_optimized(R3_ARG1, (address)msg, R0);
// call destination for its side effect // Call destination for its side effect.
call_c(tmp); call_c(tmp);
restore_volatile_gprs(R1_SP, 112); // except R0
pop_frame(); pop_frame();
// save tmp
mr(R0, tmp);
// kill tmp
restore_LR_CR(tmp); restore_LR_CR(tmp);
// restore tmp restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
mr(tmp, R0);
} }
const char* stop_types[] = { const char* stop_types[] = {
......
...@@ -368,6 +368,7 @@ class MacroAssembler: public Assembler { ...@@ -368,6 +368,7 @@ class MacroAssembler: public Assembler {
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
void call_VM_leaf(address entry_point); void call_VM_leaf(address entry_point);
void call_VM_leaf(address entry_point, Register arg_1); void call_VM_leaf(address entry_point, Register arg_1);
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
......
...@@ -100,10 +100,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { ...@@ -100,10 +100,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
MacroAssembler* a = new MacroAssembler(&cb); MacroAssembler* a = new MacroAssembler(&cb);
// Patch the call. // Patch the call.
if (ReoptimizeCallSequences && if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
a->is_within_range_of_b(dest, addr_call)) {
a->bl(dest);
} else {
address trampoline_stub_addr = get_trampoline(); address trampoline_stub_addr = get_trampoline();
// We did not find a trampoline stub because the current codeblob // We did not find a trampoline stub because the current codeblob
...@@ -115,9 +112,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { ...@@ -115,9 +112,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
// Patch the constant in the call's trampoline stub. // Patch the constant in the call's trampoline stub.
NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
dest = trampoline_stub_addr;
a->bl(trampoline_stub_addr);
} }
OrderAccess::release();
a->bl(dest);
ICache::ppc64_flush_icache_bytes(addr_call, code_size); ICache::ppc64_flush_icache_bytes(addr_call, code_size);
} }
......
...@@ -1938,8 +1938,9 @@ ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_ ...@@ -1938,8 +1938,9 @@ ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_
// -------------------------------------------------------------------- // --------------------------------------------------------------------
// Check for hi bits still needing moving. Only happens for misaligned // Check for hi bits still needing moving. Only happens for misaligned
// arguments to native calls. // arguments to native calls.
if (src_hi == dst_hi) if (src_hi == dst_hi) {
return ppc64Opcode_none; // Self copy; no move. return ppc64Opcode_none; // Self copy; no move.
}
ShouldNotReachHere(); ShouldNotReachHere();
return ppc64Opcode_undefined; return ppc64Opcode_undefined;
...@@ -1961,14 +1962,15 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const { ...@@ -1961,14 +1962,15 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
} }
uint MachNopNode::size(PhaseRegAlloc *ra_) const { uint MachNopNode::size(PhaseRegAlloc *ra_) const {
return _count * 4; return _count * 4;
} }
#ifndef PRODUCT #ifndef PRODUCT
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const { void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_reg_first(this); char reg_str[128];
st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset); ra_->dump_register(this, reg_str);
st->print("ADDI %s, SP, %d \t// box node", reg_str, offset);
} }
#endif #endif
......
...@@ -90,7 +90,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handle ...@@ -90,7 +90,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handle
// Thread will be loaded to R3_ARG1. // Thread will be loaded to R3_ARG1.
// Target class oop is in register R5_ARG3 by convention! // Target class oop is in register R5_ARG3 by convention!
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3)); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
// Above call must not return here since exception pending. // Above call must not return here since exception pending.
DEBUG_ONLY(__ should_not_reach_here();) DEBUG_ONLY(__ should_not_reach_here();)
return entry; return entry;
...@@ -171,6 +171,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, ...@@ -171,6 +171,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Compiled code destroys templateTableBase, reload. // Compiled code destroys templateTableBase, reload.
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
if (state == atos) {
__ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
}
const Register cache = R11_scratch1; const Register cache = R11_scratch1;
const Register size = R12_scratch2; const Register size = R12_scratch2;
__ get_cache_and_index_at_bcp(cache, 1, index_size); __ get_cache_and_index_at_bcp(cache, 1, index_size);
...@@ -1230,6 +1234,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { ...@@ -1230,6 +1234,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ li(R0, 1); __ li(R0, 1);
__ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
} }
// Argument and return type profiling.
__ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
// Increment invocation counter and check for overflow. // Increment invocation counter and check for overflow.
if (inc_counter) { if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
...@@ -1549,6 +1557,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() { ...@@ -1549,6 +1557,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ set_method_data_pointer_for_bcp(); __ set_method_data_pointer_for_bcp();
__ ld(R11_scratch1, 0, R1_SP);
__ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
} }
#if INCLUDE_JVMTI #if INCLUDE_JVMTI
Label L_done; Label L_done;
...@@ -1560,13 +1570,11 @@ void TemplateInterpreterGenerator::generate_throw_exception() { ...@@ -1560,13 +1570,11 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ ld(R4_ARG2, 0, R18_locals); __ ld(R4_ARG2, 0, R18_locals);
__ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
R4_ARG2, R19_method, R14_bcp); __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
__ cmpdi(CCR0, R4_ARG2, 0);
__ cmpdi(CCR0, R11_scratch1, 0);
__ beq(CCR0, L_done); __ beq(CCR0, L_done);
__ std(R4_ARG2, wordSize, R15_esp);
__ std(R11_scratch1, wordSize, R15_esp);
__ bind(L_done); __ bind(L_done);
#endif // INCLUDE_JVMTI #endif // INCLUDE_JVMTI
__ dispatch_next(vtos); __ dispatch_next(vtos);
......
...@@ -3234,6 +3234,8 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, ...@@ -3234,6 +3234,8 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
// Load target. // Load target.
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
__ ldx(Rtarget_method, Rindex, Rrecv_klass); __ ldx(Rtarget_method, Rindex, Rrecv_klass);
// Argument and return type profiling.
__ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
__ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
} }
...@@ -3317,6 +3319,8 @@ void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Regis ...@@ -3317,6 +3319,8 @@ void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Regis
__ null_check_throw(Rrecv, -1, Rscratch1); __ null_check_throw(Rrecv, -1, Rscratch1);
__ profile_final_call(Rrecv, Rscratch1); __ profile_final_call(Rrecv, Rscratch1);
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
// Do the call. // Do the call.
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
...@@ -3338,6 +3342,8 @@ void TemplateTable::invokespecial(int byte_no) { ...@@ -3338,6 +3342,8 @@ void TemplateTable::invokespecial(int byte_no) {
__ null_check_throw(Rreceiver, -1, R11_scratch1); __ null_check_throw(Rreceiver, -1, R11_scratch1);
__ profile_call(R11_scratch1, R12_scratch2); __ profile_call(R11_scratch1, R12_scratch2);
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
__ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
} }
...@@ -3352,6 +3358,8 @@ void TemplateTable::invokestatic(int byte_no) { ...@@ -3352,6 +3358,8 @@ void TemplateTable::invokestatic(int byte_no) {
prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
__ profile_call(R11_scratch1, R12_scratch2); __ profile_call(R11_scratch1, R12_scratch2);
// Argument and return type profiling.
__ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
__ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
} }
...@@ -3373,6 +3381,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, ...@@ -3373,6 +3381,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
// Final call case. // Final call case.
__ profile_final_call(Rtemp1, Rscratch); __ profile_final_call(Rtemp1, Rscratch);
// Argument and return type profiling.
__ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
// Do the final call - the index (f2) contains the method. // Do the final call - the index (f2) contains the method.
__ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
...@@ -3424,6 +3434,8 @@ void TemplateTable::invokeinterface(int byte_no) { ...@@ -3424,6 +3434,8 @@ void TemplateTable::invokeinterface(int byte_no) {
__ cmpdi(CCR0, Rindex, 0); __ cmpdi(CCR0, Rindex, 0);
__ beq(CCR0, Lthrow_ame); __ beq(CCR0, Lthrow_ame);
// Found entry. Jump off! // Found entry. Jump off!
// Argument and return type profiling.
__ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
__ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
// Vtable entry was NULL => Throw abstract method error. // Vtable entry was NULL => Throw abstract method error.
...@@ -3477,6 +3489,8 @@ void TemplateTable::invokedynamic(int byte_no) { ...@@ -3477,6 +3489,8 @@ void TemplateTable::invokedynamic(int byte_no) {
// to be the callsite object the bootstrap method returned. This is passed to a // to be the callsite object the bootstrap method returned. This is passed to a
// "link" method which does the dispatch (Most likely just grabs the MH stored // "link" method which does the dispatch (Most likely just grabs the MH stored
// inside the callsite and does an invokehandle). // inside the callsite and does an invokehandle).
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
} }
...@@ -3503,6 +3517,8 @@ void TemplateTable::invokehandle(int byte_no) { ...@@ -3503,6 +3517,8 @@ void TemplateTable::invokehandle(int byte_no) {
__ profile_final_call(Rrecv, Rscratch1); __ profile_final_call(Rrecv, Rscratch1);
// Still no call from handle => We call the method handle interpreter here. // Still no call from handle => We call the method handle interpreter here.
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
} }
......
...@@ -139,13 +139,44 @@ void VM_Version::initialize() { ...@@ -139,13 +139,44 @@ void VM_Version::initialize() {
} }
assert(AllocatePrefetchLines > 0, "invalid value"); assert(AllocatePrefetchLines > 0, "invalid value");
if (AllocatePrefetchLines < 1) // Set valid value in product VM. if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
AllocatePrefetchLines = 1; // Conservative value. AllocatePrefetchLines = 1; // Conservative value.
}
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
AllocatePrefetchStyle = 1; // Fall back if inappropriate. AllocatePrefetchStyle = 1; // Fall back if inappropriate.
}
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
if (UseCRC32Intrinsics) {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
warning("CRC32 intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support.
if (UseAES) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
if (UseSHA) {
warning("SHA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
warning("SHA intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
} }
void VM_Version::print_features() { void VM_Version::print_features() {
......
...@@ -675,7 +675,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { ...@@ -675,7 +675,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
case handle_exception_nofpu_id: case handle_exception_nofpu_id:
case handle_exception_id: case handle_exception_id:
// At this point all registers MAY be live. // At this point all registers MAY be live.
oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
break; break;
case handle_exception_from_callee_id: { case handle_exception_from_callee_id: {
// At this point all registers except exception oop (RAX) and // At this point all registers except exception oop (RAX) and
...@@ -748,7 +748,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { ...@@ -748,7 +748,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
case handle_exception_nofpu_id: case handle_exception_nofpu_id:
case handle_exception_id: case handle_exception_id:
// Restore the registers that were saved at the beginning. // Restore the registers that were saved at the beginning.
restore_live_registers(sasm, id == handle_exception_nofpu_id); restore_live_registers(sasm, id != handle_exception_nofpu_id);
break; break;
case handle_exception_from_callee_id: case handle_exception_from_callee_id:
// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
......
此差异已折叠。
...@@ -209,7 +209,7 @@ class Aix { ...@@ -209,7 +209,7 @@ class Aix {
return _can_use_16M_pages == 1 ? true : false; return _can_use_16M_pages == 1 ? true : false;
} }
static address ucontext_get_pc(ucontext_t* uc); static address ucontext_get_pc(const ucontext_t* uc);
static intptr_t* ucontext_get_sp(ucontext_t* uc); static intptr_t* ucontext_get_sp(ucontext_t* uc);
static intptr_t* ucontext_get_fp(ucontext_t* uc); static intptr_t* ucontext_get_fp(ucontext_t* uc);
// Set PC into context. Needed for continuation after signal. // Set PC into context. Needed for continuation after signal.
......
...@@ -91,8 +91,9 @@ void os::initialize_thread(Thread *thread) { } ...@@ -91,8 +91,9 @@ void os::initialize_thread(Thread *thread) { }
// Frame information (pc, sp, fp) retrieved via ucontext // Frame information (pc, sp, fp) retrieved via ucontext
// always looks like a C-frame according to the frame // always looks like a C-frame according to the frame
// conventions in frame_ppc64.hpp. // conventions in frame_ppc.hpp.
address os::Aix::ucontext_get_pc(ucontext_t * uc) {
address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
return (address)uc->uc_mcontext.jmp_context.iar; return (address)uc->uc_mcontext.jmp_context.iar;
} }
...@@ -486,7 +487,7 @@ void os::Aix::init_thread_fpu_state(void) { ...@@ -486,7 +487,7 @@ void os::Aix::init_thread_fpu_state(void) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Aix::min_stack_allowed = 768*K; size_t os::Aix::min_stack_allowed = 128*K;
// Aix is always in floating stack mode. The stack size for a new // Aix is always in floating stack mode. The stack size for a new
// thread can be set via pthread_attr_setstacksize(). // thread can be set via pthread_attr_setstacksize().
...@@ -499,7 +500,7 @@ size_t os::Aix::default_stack_size(os::ThreadType thr_type) { ...@@ -499,7 +500,7 @@ size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
// because of the strange 'fallback logic' in os::create_thread(). // because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads! // specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s; return s;
} }
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
* *
*/ */
#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP #ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP #define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
static void setup_fpu() {} static void setup_fpu() {}
...@@ -32,4 +32,4 @@ ...@@ -32,4 +32,4 @@
// Note: Currently only used in 64 bit Windows implementations // Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; } static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP #endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
* *
*/ */
#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP #ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP #define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
#include "runtime/prefetch.hpp" #include "runtime/prefetch.hpp"
...@@ -55,4 +55,4 @@ inline void Prefetch::write(void *loc, intx interval) { ...@@ -55,4 +55,4 @@ inline void Prefetch::write(void *loc, intx interval) {
#endif #endif
} }
#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP #endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
* *
*/ */
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP #ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP #define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
// Processor dependent parts of ThreadLocalStorage // Processor dependent parts of ThreadLocalStorage
...@@ -33,4 +33,4 @@ public: ...@@ -33,4 +33,4 @@ public:
return (Thread *) os::thread_local_storage_at(thread_index()); return (Thread *) os::thread_local_storage_at(thread_index());
} }
#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP #endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
* *
*/ */
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP #ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP #define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
private: private:
void pd_initialize() { void pd_initialize() {
...@@ -76,4 +76,4 @@ ...@@ -76,4 +76,4 @@
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; } intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP #endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
...@@ -453,7 +453,7 @@ void os::Linux::set_fpu_control_word(int fpu_control) { ...@@ -453,7 +453,7 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Linux::min_stack_allowed = 768*K; size_t os::Linux::min_stack_allowed = 128*K;
bool os::Linux::supports_variable_stack_size() { return true; } bool os::Linux::supports_variable_stack_size() { return true; }
......
...@@ -3058,21 +3058,39 @@ void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) { ...@@ -3058,21 +3058,39 @@ void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
} }
} }
// Transfer ownership of metadata allocated to the InstanceKlass. // Create the Annotations object that will
void ClassFileParser::apply_parsed_class_metadata( // hold the annotations array for the Klass.
instanceKlassHandle this_klass, void ClassFileParser::create_combined_annotations(TRAPS) {
int java_fields_count, TRAPS) { if (_annotations == NULL &&
// Assign annotations if needed _type_annotations == NULL &&
if (_annotations != NULL || _type_annotations != NULL || _fields_annotations == NULL &&
_fields_annotations != NULL || _fields_type_annotations != NULL) { _fields_type_annotations == NULL) {
// Don't create the Annotations object unnecessarily.
return;
}
Annotations* annotations = Annotations::allocate(_loader_data, CHECK); Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
annotations->set_class_annotations(_annotations); annotations->set_class_annotations(_annotations);
annotations->set_class_type_annotations(_type_annotations); annotations->set_class_type_annotations(_type_annotations);
annotations->set_fields_annotations(_fields_annotations); annotations->set_fields_annotations(_fields_annotations);
annotations->set_fields_type_annotations(_fields_type_annotations); annotations->set_fields_type_annotations(_fields_type_annotations);
this_klass->set_annotations(annotations);
}
// This is the Annotations object that will be
// assigned to InstanceKlass being constructed.
_combined_annotations = annotations;
// The annotations arrays below has been transfered the
// _combined_annotations so these fields can now be cleared.
_annotations = NULL;
_type_annotations = NULL;
_fields_annotations = NULL;
_fields_type_annotations = NULL;
}
// Transfer ownership of metadata allocated to the InstanceKlass.
void ClassFileParser::apply_parsed_class_metadata(
instanceKlassHandle this_klass,
int java_fields_count, TRAPS) {
_cp->set_pool_holder(this_klass()); _cp->set_pool_holder(this_klass());
this_klass->set_constants(_cp); this_klass->set_constants(_cp);
this_klass->set_fields(_fields, java_fields_count); this_klass->set_fields(_fields, java_fields_count);
...@@ -3080,6 +3098,7 @@ void ClassFileParser::apply_parsed_class_metadata( ...@@ -3080,6 +3098,7 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_inner_classes(_inner_classes); this_klass->set_inner_classes(_inner_classes);
this_klass->set_local_interfaces(_local_interfaces); this_klass->set_local_interfaces(_local_interfaces);
this_klass->set_transitive_interfaces(_transitive_interfaces); this_klass->set_transitive_interfaces(_transitive_interfaces);
this_klass->set_annotations(_combined_annotations);
// Clear out these fields so they don't get deallocated by the destructor // Clear out these fields so they don't get deallocated by the destructor
clear_class_metadata(); clear_class_metadata();
...@@ -3939,6 +3958,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, ...@@ -3939,6 +3958,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
ClassAnnotationCollector parsed_annotations; ClassAnnotationCollector parsed_annotations;
parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle)); parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
// Finalize the Annotations metadata object,
// now that all annotation arrays have been created.
create_combined_annotations(CHECK_(nullHandle));
// Make sure this is the end of class file stream // Make sure this is the end of class file stream
guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
...@@ -4239,10 +4262,27 @@ ClassFileParser::~ClassFileParser() { ...@@ -4239,10 +4262,27 @@ ClassFileParser::~ClassFileParser() {
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(), InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
_local_interfaces, _transitive_interfaces); _local_interfaces, _transitive_interfaces);
MetadataFactory::free_array<u1>(_loader_data, _annotations); if (_combined_annotations != NULL) {
MetadataFactory::free_array<u1>(_loader_data, _type_annotations); // After all annotations arrays have been created, they are installed into the
Annotations::free_contents(_loader_data, _fields_annotations); // Annotations object that will be assigned to the InstanceKlass being created.
Annotations::free_contents(_loader_data, _fields_type_annotations);
// Deallocate the Annotations object and the installed annotations arrays.
_combined_annotations->deallocate_contents(_loader_data);
// If the _combined_annotations pointer is non-NULL,
// then the other annotations fields should have been cleared.
assert(_annotations == NULL, "Should have been cleared");
assert(_type_annotations == NULL, "Should have been cleared");
assert(_fields_annotations == NULL, "Should have been cleared");
assert(_fields_type_annotations == NULL, "Should have been cleared");
} else {
// If the annotations arrays were not installed into the Annotations object,
// then they have to be deallocated explicitly.
MetadataFactory::free_array<u1>(_loader_data, _annotations);
MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
Annotations::free_contents(_loader_data, _fields_annotations);
Annotations::free_contents(_loader_data, _fields_type_annotations);
}
clear_class_metadata(); clear_class_metadata();
......
...@@ -75,6 +75,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { ...@@ -75,6 +75,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<u2>* _inner_classes; Array<u2>* _inner_classes;
Array<Klass*>* _local_interfaces; Array<Klass*>* _local_interfaces;
Array<Klass*>* _transitive_interfaces; Array<Klass*>* _transitive_interfaces;
Annotations* _combined_annotations;
AnnotationArray* _annotations; AnnotationArray* _annotations;
AnnotationArray* _type_annotations; AnnotationArray* _type_annotations;
Array<AnnotationArray*>* _fields_annotations; Array<AnnotationArray*>* _fields_annotations;
...@@ -86,6 +87,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { ...@@ -86,6 +87,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; } void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; } void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
void create_combined_annotations(TRAPS);
void init_parsed_class_attributes(ClassLoaderData* loader_data) { void init_parsed_class_attributes(ClassLoaderData* loader_data) {
_loader_data = loader_data; _loader_data = loader_data;
_synthetic_flag = false; _synthetic_flag = false;
...@@ -110,6 +113,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { ...@@ -110,6 +113,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_inner_classes = NULL; _inner_classes = NULL;
_local_interfaces = NULL; _local_interfaces = NULL;
_transitive_interfaces = NULL; _transitive_interfaces = NULL;
_combined_annotations = NULL;
_annotations = _type_annotations = NULL; _annotations = _type_annotations = NULL;
_fields_annotations = _fields_type_annotations = NULL; _fields_annotations = _fields_type_annotations = NULL;
} }
......
...@@ -398,6 +398,18 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea ...@@ -398,6 +398,18 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
int handler_bci; int handler_bci;
int current_bci = bci(thread); int current_bci = bci(thread);
if (thread->frames_to_pop_failed_realloc() > 0) {
// Allocation of scalar replaced object used in this frame
// failed. Unconditionally pop the frame.
thread->dec_frames_to_pop_failed_realloc();
thread->set_vm_result(h_exception());
// If the method is synchronized we already unlocked the monitor
// during deoptimization so the interpreter needs to skip it when
// the frame is popped.
thread->set_do_not_unlock_if_synchronized(true);
return Interpreter::remove_activation_entry();
}
// Need to do this check first since when _do_not_unlock_if_synchronized // Need to do this check first since when _do_not_unlock_if_synchronized
// is set, we don't want to trigger any classloading which may make calls // is set, we don't want to trigger any classloading which may make calls
// into java, or surprisingly find a matching exception handler for bci 0 // into java, or surprisingly find a matching exception handler for bci 0
......
...@@ -97,11 +97,11 @@ void FileMapInfo::fail_continue(const char *msg, ...) { ...@@ -97,11 +97,11 @@ void FileMapInfo::fail_continue(const char *msg, ...) {
tty->print_cr("UseSharedSpaces: %s", msg); tty->print_cr("UseSharedSpaces: %s", msg);
} }
} }
UseSharedSpaces = false;
assert(current_info() != NULL, "singleton must be registered");
current_info()->close();
} }
va_end(ap); va_end(ap);
UseSharedSpaces = false;
assert(current_info() != NULL, "singleton must be registered");
current_info()->close();
} }
// Fill in the fileMapInfo structure with data about this VM instance. // Fill in the fileMapInfo structure with data about this VM instance.
......
...@@ -967,7 +967,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { ...@@ -967,7 +967,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
#endif #endif
// If -Xshare:on is specified, print out the error message and exit VM, // If -Xshare:on is specified, print out the error message and exit VM,
// otherwise, set UseSharedSpaces to false and continue. // otherwise, set UseSharedSpaces to false and continue.
if (RequireSharedSpaces) { if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
} else { } else {
FLAG_SET_DEFAULT(UseSharedSpaces, false); FLAG_SET_DEFAULT(UseSharedSpaces, false);
......
...@@ -119,6 +119,7 @@ oop Universe::_out_of_memory_error_metaspace = NULL; ...@@ -119,6 +119,7 @@ oop Universe::_out_of_memory_error_metaspace = NULL;
oop Universe::_out_of_memory_error_class_metaspace = NULL; oop Universe::_out_of_memory_error_class_metaspace = NULL;
oop Universe::_out_of_memory_error_array_size = NULL; oop Universe::_out_of_memory_error_array_size = NULL;
oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
oop Universe::_out_of_memory_error_realloc_objects = NULL;
objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
bool Universe::_verify_in_progress = false; bool Universe::_verify_in_progress = false;
...@@ -190,6 +191,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) { ...@@ -190,6 +191,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_out_of_memory_error_class_metaspace); f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
f->do_oop((oop*)&_out_of_memory_error_array_size); f->do_oop((oop*)&_out_of_memory_error_array_size);
f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
f->do_oop((oop*)&_preallocated_out_of_memory_error_array); f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
f->do_oop((oop*)&_null_ptr_exception_instance); f->do_oop((oop*)&_null_ptr_exception_instance);
f->do_oop((oop*)&_arithmetic_exception_instance); f->do_oop((oop*)&_arithmetic_exception_instance);
...@@ -574,7 +576,8 @@ bool Universe::should_fill_in_stack_trace(Handle throwable) { ...@@ -574,7 +576,8 @@ bool Universe::should_fill_in_stack_trace(Handle throwable) {
(throwable() != Universe::_out_of_memory_error_metaspace) && (throwable() != Universe::_out_of_memory_error_metaspace) &&
(throwable() != Universe::_out_of_memory_error_class_metaspace) && (throwable() != Universe::_out_of_memory_error_class_metaspace) &&
(throwable() != Universe::_out_of_memory_error_array_size) && (throwable() != Universe::_out_of_memory_error_array_size) &&
(throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
(throwable() != Universe::_out_of_memory_error_realloc_objects));
} }
...@@ -1044,6 +1047,7 @@ bool universe_post_init() { ...@@ -1044,6 +1047,7 @@ bool universe_post_init() {
Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
Universe::_out_of_memory_error_gc_overhead_limit = Universe::_out_of_memory_error_gc_overhead_limit =
k_h->allocate_instance(CHECK_false); k_h->allocate_instance(CHECK_false);
Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
// Setup preallocated NullPointerException // Setup preallocated NullPointerException
// (this is currently used for a cheap & dirty solution in compiler exception handling) // (this is currently used for a cheap & dirty solution in compiler exception handling)
...@@ -1083,6 +1087,9 @@ bool universe_post_init() { ...@@ -1083,6 +1087,9 @@ bool universe_post_init() {
msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
msg = java_lang_String::create_from_str("/ by zero", CHECK_false); msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
......
...@@ -157,6 +157,7 @@ class Universe: AllStatic { ...@@ -157,6 +157,7 @@ class Universe: AllStatic {
static oop _out_of_memory_error_class_metaspace; static oop _out_of_memory_error_class_metaspace;
static oop _out_of_memory_error_array_size; static oop _out_of_memory_error_array_size;
static oop _out_of_memory_error_gc_overhead_limit; static oop _out_of_memory_error_gc_overhead_limit;
static oop _out_of_memory_error_realloc_objects;
static Array<int>* _the_empty_int_array; // Canonicalized int array static Array<int>* _the_empty_int_array; // Canonicalized int array
static Array<u2>* _the_empty_short_array; // Canonicalized short array static Array<u2>* _the_empty_short_array; // Canonicalized short array
...@@ -328,6 +329,7 @@ class Universe: AllStatic { ...@@ -328,6 +329,7 @@ class Universe: AllStatic {
static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); } static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); }
static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); }
static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); }
// Accessors needed for fast allocation // Accessors needed for fast allocation
static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; } static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; }
......
...@@ -462,7 +462,8 @@ const Type *CastIINode::Value(PhaseTransform *phase) const { ...@@ -462,7 +462,8 @@ const Type *CastIINode::Value(PhaseTransform *phase) const {
// Try to improve the type of the CastII if we recognize a CmpI/If // Try to improve the type of the CastII if we recognize a CmpI/If
// pattern. // pattern.
if (_carry_dependency) { if (_carry_dependency) {
if (in(0) != NULL && (in(0)->is_IfFalse() || in(0)->is_IfTrue())) { if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
Node* proj = in(0); Node* proj = in(0);
if (proj->in(0)->in(1)->is_Bool()) { if (proj->in(0)->in(1)->is_Bool()) {
Node* b = proj->in(0)->in(1); Node* b = proj->in(0)->in(1);
......
...@@ -820,6 +820,11 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) { ...@@ -820,6 +820,11 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff); static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
struct RangeCheck {
Node* ctl;
jint off;
};
//------------------------------Ideal------------------------------------------ //------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out // Return a node which is more "ideal" than the current node. Strip out
// control copies // control copies
...@@ -861,83 +866,141 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -861,83 +866,141 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
jint offset1; jint offset1;
int flip1 = is_range_check(range1, index1, offset1); int flip1 = is_range_check(range1, index1, offset1);
if( flip1 ) { if( flip1 ) {
Node *first_prev_dom = NULL;
// Try to remove extra range checks. All 'up_one_dom' gives up at merges // Try to remove extra range checks. All 'up_one_dom' gives up at merges
// so all checks we inspect post-dominate the top-most check we find. // so all checks we inspect post-dominate the top-most check we find.
// If we are going to fail the current check and we reach the top check // If we are going to fail the current check and we reach the top check
// then we are guaranteed to fail, so just start interpreting there. // then we are guaranteed to fail, so just start interpreting there.
// We 'expand' the top 2 range checks to include all post-dominating // We 'expand' the top 3 range checks to include all post-dominating
// checks. // checks.
// The top 2 range checks seen // The top 3 range checks seen
Node *prev_chk1 = NULL; const int NRC =3;
Node *prev_chk2 = NULL; RangeCheck prev_checks[NRC];
int nb_checks = 0;
// Low and high offsets seen so far // Low and high offsets seen so far
jint off_lo = offset1; jint off_lo = offset1;
jint off_hi = offset1; jint off_hi = offset1;
// Scan for the top 2 checks and collect range of offsets bool found_immediate_dominator = false;
for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit
if( dom->Opcode() == Op_If && // Not same opcode? // Scan for the top checks and collect range of offsets
prev_dom->in(0) == dom ) { // One path of test does dominate? for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
if( dom == this ) return NULL; // dead loop if (dom->Opcode() == Op_If && // Not same opcode?
prev_dom->in(0) == dom) { // One path of test does dominate?
if (dom == this) return NULL; // dead loop
// See if this is a range check // See if this is a range check
Node *index2, *range2; Node *index2, *range2;
jint offset2; jint offset2;
int flip2 = dom->as_If()->is_range_check(range2, index2, offset2); int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
// See if this is a _matching_ range check, checking against // See if this is a _matching_ range check, checking against
// the same array bounds. // the same array bounds.
if( flip2 == flip1 && range2 == range1 && index2 == index1 && if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
dom->outcnt() == 2 ) { dom->outcnt() == 2) {
if (nb_checks == 0 && dom->in(1) == in(1)) {
// Found an immediately dominating test at the same offset.
// This kind of back-to-back test can be eliminated locally,
// and there is no need to search further for dominating tests.
assert(offset2 == offset1, "Same test but different offsets");
found_immediate_dominator = true;
break;
}
// Gather expanded bounds // Gather expanded bounds
off_lo = MIN2(off_lo,offset2); off_lo = MIN2(off_lo,offset2);
off_hi = MAX2(off_hi,offset2); off_hi = MAX2(off_hi,offset2);
// Record top 2 range checks // Record top NRC range checks
prev_chk2 = prev_chk1; prev_checks[nb_checks%NRC].ctl = prev_dom;
prev_chk1 = prev_dom; prev_checks[nb_checks%NRC].off = offset2;
// If we match the test exactly, then the top test covers nb_checks++;
// both our lower and upper bounds.
if( dom->in(1) == in(1) )
prev_chk2 = prev_chk1;
} }
} }
prev_dom = dom; prev_dom = dom;
dom = up_one_dom( dom ); dom = up_one_dom(dom);
if( !dom ) break; if (!dom) break;
} }
if (!found_immediate_dominator) {
// Attempt to widen the dominating range check to cover some later
// ones. Since range checks "fail" by uncommon-trapping to the
// interpreter, widening a check can make us speculatively enter
// the interpreter. If we see range-check deopt's, do not widen!
if (!phase->C->allow_range_check_smearing()) return NULL;
// Attempt to widen the dominating range check to cover some later
// ones. Since range checks "fail" by uncommon-trapping to the
// interpreter, widening a check can make us speculative enter the
// interpreter. If we see range-check deopt's, do not widen!
if (!phase->C->allow_range_check_smearing()) return NULL;
// Constant indices only need to check the upper bound.
// Non-constance indices must check both low and high.
if( index1 ) {
// Didn't find 2 prior covering checks, so cannot remove anything.
if( !prev_chk2 ) return NULL;
// 'Widen' the offsets of the 1st and 2nd covering check
adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
// Do not call adjust_check twice on the same projection
// as the first call may have transformed the BoolNode to a ConI
if( prev_chk1 != prev_chk2 ) {
adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
}
// Test is now covered by prior checks, dominate it out
prev_dom = prev_chk2;
} else {
// Didn't find prior covering check, so cannot remove anything. // Didn't find prior covering check, so cannot remove anything.
if( !prev_chk1 ) return NULL; if (nb_checks == 0) {
// 'Widen' the offset of the 1st and only covering check return NULL;
adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn ); }
// Test is now covered by prior checks, dominate it out // Constant indices only need to check the upper bound.
prev_dom = prev_chk1; // Non-constant indices must check both low and high.
int chk0 = (nb_checks - 1) % NRC;
if (index1) {
if (nb_checks == 1) {
return NULL;
} else {
// If the top range check's constant is the min or max of
// all constants we widen the next one to cover the whole
// range of constants.
RangeCheck rc0 = prev_checks[chk0];
int chk1 = (nb_checks - 2) % NRC;
RangeCheck rc1 = prev_checks[chk1];
if (rc0.off == off_lo) {
adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
prev_dom = rc1.ctl;
} else if (rc0.off == off_hi) {
adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
prev_dom = rc1.ctl;
} else {
// If the top test's constant is not the min or max of all
// constants, we need 3 range checks. We must leave the
// top test unchanged because widening it would allow the
// accesses it protects to successfully read/write out of
// bounds.
if (nb_checks == 2) {
return NULL;
}
int chk2 = (nb_checks - 3) % NRC;
RangeCheck rc2 = prev_checks[chk2];
// The top range check a+i covers interval: -a <= i < length-a
// The second range check b+i covers interval: -b <= i < length-b
if (rc1.off <= rc0.off) {
// if b <= a, we change the second range check to:
// -min_of_all_constants <= i < length-min_of_all_constants
// Together top and second range checks now cover:
// -min_of_all_constants <= i < length-a
// which is more restrictive than -b <= i < length-b:
// -b <= -min_of_all_constants <= i < length-a <= length-b
// The third check is then changed to:
// -max_of_all_constants <= i < length-max_of_all_constants
// so 2nd and 3rd checks restrict allowed values of i to:
// -min_of_all_constants <= i < length-max_of_all_constants
adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
} else {
// if b > a, we change the second range check to:
// -max_of_all_constants <= i < length-max_of_all_constants
// Together top and second range checks now cover:
// -a <= i < length-max_of_all_constants
// which is more restrictive than -b <= i < length-b:
// -b < -a <= i < length-max_of_all_constants <= length-b
// The third check is then changed to:
// -max_of_all_constants <= i < length-max_of_all_constants
// so 2nd and 3rd checks restrict allowed values of i to:
// -min_of_all_constants <= i < length-max_of_all_constants
adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
}
prev_dom = rc2.ctl;
}
}
} else {
RangeCheck rc0 = prev_checks[chk0];
// 'Widen' the offset of the 1st and only covering check
adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
// Test is now covered by prior checks, dominate it out
prev_dom = rc0.ctl;
}
} }
} else { // Scan for an equivalent test } else { // Scan for an equivalent test
Node *cmp; Node *cmp;
...@@ -1019,7 +1082,7 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) { ...@@ -1019,7 +1082,7 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
// for lower and upper bounds. // for lower and upper bounds.
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
prev_dom = idom; prev_dom = idom;
// Now walk the current IfNode's projections. // Now walk the current IfNode's projections.
// Loop ends when 'this' has no more uses. // Loop ends when 'this' has no more uses.
......
...@@ -239,8 +239,13 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc ...@@ -239,8 +239,13 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
ProjNode* dp_proj = dp->as_Proj(); ProjNode* dp_proj = dp->as_Proj();
ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
if (exclude_loop_predicate && if (exclude_loop_predicate &&
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) ||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) {
// If this is a range check (IfNode::is_range_check), do not
// reorder because Compile::allow_range_check_smearing might have
// changed the check.
return; // Let IGVN transformation change control dependence. return; // Let IGVN transformation change control dependence.
}
IdealLoopTree *old_loop = get_loop(dp); IdealLoopTree *old_loop = get_loop(dp);
...@@ -896,23 +901,23 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) { ...@@ -896,23 +901,23 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
int n_op = n->Opcode(); int n_op = n->Opcode();
// Check for an IF being dominated by another IF same test // Check for an IF being dominated by another IF same test
if( n_op == Op_If ) { if (n_op == Op_If) {
Node *bol = n->in(1); Node *bol = n->in(1);
uint max = bol->outcnt(); uint max = bol->outcnt();
// Check for same test used more than once? // Check for same test used more than once?
if( n_op == Op_If && max > 1 && bol->is_Bool() ) { if (max > 1 && bol->is_Bool()) {
// Search up IDOMs to see if this IF is dominated. // Search up IDOMs to see if this IF is dominated.
Node *cutoff = get_ctrl(bol); Node *cutoff = get_ctrl(bol);
// Now search up IDOMs till cutoff, looking for a dominating test // Now search up IDOMs till cutoff, looking for a dominating test
Node *prevdom = n; Node *prevdom = n;
Node *dom = idom(prevdom); Node *dom = idom(prevdom);
while( dom != cutoff ) { while (dom != cutoff) {
if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) { if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
// Replace the dominated test with an obvious true or false. // Replace the dominated test with an obvious true or false.
// Place it on the IGVN worklist for later cleanup. // Place it on the IGVN worklist for later cleanup.
C->set_major_progress(); C->set_major_progress();
dominated_by( prevdom, n, false, true ); dominated_by(prevdom, n, false, true);
#ifndef PRODUCT #ifndef PRODUCT
if( VerifyLoopOptimizations ) verify(); if( VerifyLoopOptimizations ) verify();
#endif #endif
......
...@@ -964,7 +964,11 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { ...@@ -964,7 +964,11 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
} }
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
if (!EliminateAllocations || !alloc->_is_non_escaping) { // Don't do scalar replacement if the frame can be popped by JVMTI:
// if reallocation fails during deoptimization we'll pop all
// interpreter frames for this compiled frame and that won't play
// nice with JVMTI popframe.
if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
return false; return false;
} }
Node* klass = alloc->in(AllocateNode::KlassNode); Node* klass = alloc->in(AllocateNode::KlassNode);
......
...@@ -213,6 +213,8 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ...@@ -213,6 +213,8 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
assert(vf->is_compiled_frame(), "Wrong frame type"); assert(vf->is_compiled_frame(), "Wrong frame type");
chunk->push(compiledVFrame::cast(vf)); chunk->push(compiledVFrame::cast(vf));
bool realloc_failures = false;
#ifdef COMPILER2 #ifdef COMPILER2
// Reallocate the non-escaping objects and restore their fields. Then // Reallocate the non-escaping objects and restore their fields. Then
// relock objects if synchronization on them was eliminated. // relock objects if synchronization on them was eliminated.
...@@ -243,19 +245,16 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ...@@ -243,19 +245,16 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread); tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
} }
} }
bool reallocated = false;
if (objects != NULL) { if (objects != NULL) {
JRT_BLOCK JRT_BLOCK
reallocated = realloc_objects(thread, &deoptee, objects, THREAD); realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
JRT_END JRT_END
} reassign_fields(&deoptee, &map, objects, realloc_failures);
if (reallocated) {
reassign_fields(&deoptee, &map, objects);
#ifndef PRODUCT #ifndef PRODUCT
if (TraceDeoptimization) { if (TraceDeoptimization) {
ttyLocker ttyl; ttyLocker ttyl;
tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
print_objects(objects); print_objects(objects, realloc_failures);
} }
#endif #endif
} }
...@@ -273,7 +272,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ...@@ -273,7 +272,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
assert (cvf->scope() != NULL,"expect only compiled java frames"); assert (cvf->scope() != NULL,"expect only compiled java frames");
GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
if (monitors->is_nonempty()) { if (monitors->is_nonempty()) {
relock_objects(monitors, thread); relock_objects(monitors, thread, realloc_failures);
#ifndef PRODUCT #ifndef PRODUCT
if (TraceDeoptimization) { if (TraceDeoptimization) {
ttyLocker ttyl; ttyLocker ttyl;
...@@ -284,7 +283,12 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ...@@ -284,7 +283,12 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
first = false; first = false;
tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
} }
tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner()); if (mi->owner_is_scalar_replaced()) {
Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
tty->print_cr(" failed reallocation for klass %s", k->external_name());
} else {
tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
}
} }
} }
} }
...@@ -299,9 +303,14 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ...@@ -299,9 +303,14 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// out the java state residing in the vframeArray will be missed. // out the java state residing in the vframeArray will be missed.
No_Safepoint_Verifier no_safepoint; No_Safepoint_Verifier no_safepoint;
vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk); vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
#ifdef COMPILER2
if (realloc_failures) {
pop_frames_failed_reallocs(thread, array);
}
#endif
assert(thread->vframe_array_head() == NULL, "Pending deopt!");; assert(thread->vframe_array_head() == NULL, "Pending deopt!");
thread->set_vframe_array_head(array); thread->set_vframe_array_head(array);
// Now that the vframeArray has been created if we have any deferred local writes // Now that the vframeArray has been created if we have any deferred local writes
...@@ -753,6 +762,8 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArra ...@@ -753,6 +762,8 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArra
int exception_line = thread->exception_line(); int exception_line = thread->exception_line();
thread->clear_pending_exception(); thread->clear_pending_exception();
bool failures = false;
for (int i = 0; i < objects->length(); i++) { for (int i = 0; i < objects->length(); i++) {
assert(objects->at(i)->is_object(), "invalid debug information"); assert(objects->at(i)->is_object(), "invalid debug information");
ObjectValue* sv = (ObjectValue*) objects->at(i); ObjectValue* sv = (ObjectValue*) objects->at(i);
...@@ -762,27 +773,34 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArra ...@@ -762,27 +773,34 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArra
if (k->oop_is_instance()) { if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k()); InstanceKlass* ik = InstanceKlass::cast(k());
obj = ik->allocate_instance(CHECK_(false)); obj = ik->allocate_instance(THREAD);
} else if (k->oop_is_typeArray()) { } else if (k->oop_is_typeArray()) {
TypeArrayKlass* ak = TypeArrayKlass::cast(k()); TypeArrayKlass* ak = TypeArrayKlass::cast(k());
assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
int len = sv->field_size() / type2size[ak->element_type()]; int len = sv->field_size() / type2size[ak->element_type()];
obj = ak->allocate(len, CHECK_(false)); obj = ak->allocate(len, THREAD);
} else if (k->oop_is_objArray()) { } else if (k->oop_is_objArray()) {
ObjArrayKlass* ak = ObjArrayKlass::cast(k()); ObjArrayKlass* ak = ObjArrayKlass::cast(k());
obj = ak->allocate(sv->field_size(), CHECK_(false)); obj = ak->allocate(sv->field_size(), THREAD);
}
if (obj == NULL) {
failures = true;
} }
assert(obj != NULL, "allocation failed");
assert(sv->value().is_null(), "redundant reallocation"); assert(sv->value().is_null(), "redundant reallocation");
assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
CLEAR_PENDING_EXCEPTION;
sv->set_value(obj); sv->set_value(obj);
} }
if (pending_exception.not_null()) { if (failures) {
THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
} else if (pending_exception.not_null()) {
thread->set_pending_exception(pending_exception(), exception_file, exception_line); thread->set_pending_exception(pending_exception(), exception_file, exception_line);
} }
return true; return failures;
} }
// This assumes that the fields are stored in ObjectValue in the same order // This assumes that the fields are stored in ObjectValue in the same order
...@@ -920,12 +938,15 @@ void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_ ...@@ -920,12 +938,15 @@ void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_
// restore fields of all eliminated objects and arrays // restore fields of all eliminated objects and arrays
void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) { void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
for (int i = 0; i < objects->length(); i++) { for (int i = 0; i < objects->length(); i++) {
ObjectValue* sv = (ObjectValue*) objects->at(i); ObjectValue* sv = (ObjectValue*) objects->at(i);
KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
Handle obj = sv->value(); Handle obj = sv->value();
assert(obj.not_null(), "reallocation was missed"); assert(obj.not_null() || realloc_failures, "reallocation was missed");
if (obj.is_null()) {
continue;
}
if (k->oop_is_instance()) { if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k()); InstanceKlass* ik = InstanceKlass::cast(k());
...@@ -942,34 +963,36 @@ void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableAr ...@@ -942,34 +963,36 @@ void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableAr
// relock objects for which synchronization was eliminated // relock objects for which synchronization was eliminated
void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) { void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
for (int i = 0; i < monitors->length(); i++) { for (int i = 0; i < monitors->length(); i++) {
MonitorInfo* mon_info = monitors->at(i); MonitorInfo* mon_info = monitors->at(i);
if (mon_info->eliminated()) { if (mon_info->eliminated()) {
assert(mon_info->owner() != NULL, "reallocation was missed"); assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
Handle obj = Handle(mon_info->owner()); if (!mon_info->owner_is_scalar_replaced()) {
markOop mark = obj->mark(); Handle obj = Handle(mon_info->owner());
if (UseBiasedLocking && mark->has_bias_pattern()) { markOop mark = obj->mark();
// New allocated objects may have the mark set to anonymously biased. if (UseBiasedLocking && mark->has_bias_pattern()) {
// Also the deoptimized method may called methods with synchronization // New allocated objects may have the mark set to anonymously biased.
// where the thread-local object is bias locked to the current thread. // Also the deoptimized method may called methods with synchronization
assert(mark->is_biased_anonymously() || // where the thread-local object is bias locked to the current thread.
mark->biased_locker() == thread, "should be locked to current thread"); assert(mark->is_biased_anonymously() ||
// Reset mark word to unbiased prototype. mark->biased_locker() == thread, "should be locked to current thread");
markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); // Reset mark word to unbiased prototype.
obj->set_mark(unbiased_prototype); markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
obj->set_mark(unbiased_prototype);
}
BasicLock* lock = mon_info->lock();
ObjectSynchronizer::slow_enter(obj, lock, thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
} }
BasicLock* lock = mon_info->lock();
ObjectSynchronizer::slow_enter(obj, lock, thread);
} }
assert(mon_info->owner()->is_locked(), "object must be locked now");
} }
} }
#ifndef PRODUCT #ifndef PRODUCT
// print information about reallocated objects // print information about reallocated objects
void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) { void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
fieldDescriptor fd; fieldDescriptor fd;
for (int i = 0; i < objects->length(); i++) { for (int i = 0; i < objects->length(); i++) {
...@@ -979,10 +1002,15 @@ void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) { ...@@ -979,10 +1002,15 @@ void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
tty->print(" object <" INTPTR_FORMAT "> of type ", (void *)sv->value()()); tty->print(" object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
k->print_value(); k->print_value();
tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); assert(obj.not_null() || realloc_failures, "reallocation was missed");
if (obj.is_null()) {
tty->print(" allocation failed");
} else {
tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
}
tty->cr(); tty->cr();
if (Verbose) { if (Verbose && !obj.is_null()) {
k->oop_print_on(obj(), tty); k->oop_print_on(obj(), tty);
} }
} }
...@@ -990,7 +1018,7 @@ void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) { ...@@ -990,7 +1018,7 @@ void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
#endif #endif
#endif // COMPILER2 #endif // COMPILER2
vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) { vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp()); Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
#ifndef PRODUCT #ifndef PRODUCT
...@@ -1033,7 +1061,7 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re ...@@ -1033,7 +1061,7 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
// Since the Java thread being deoptimized will eventually adjust it's own stack, // Since the Java thread being deoptimized will eventually adjust it's own stack,
// the vframeArray containing the unpacking information is allocated in the C heap. // the vframeArray containing the unpacking information is allocated in the C heap.
// For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr); vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
// Compare the vframeArray to the collected vframes // Compare the vframeArray to the collected vframes
assert(array->structural_compare(thread, chunk), "just checking"); assert(array->structural_compare(thread, chunk), "just checking");
...@@ -1048,6 +1076,33 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re ...@@ -1048,6 +1076,33 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
return array; return array;
} }
#ifdef COMPILER2
void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
// Reallocation of some scalar replaced objects failed. Record
// that we need to pop all the interpreter frames for the
// deoptimized compiled frame.
assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
thread->set_frames_to_pop_failed_realloc(array->frames());
// Unlock all monitors here otherwise the interpreter will see a
// mix of locked and unlocked monitors (because of failed
// reallocations of synchronized objects) and be confused.
for (int i = 0; i < array->frames(); i++) {
MonitorChunk* monitors = array->element(i)->monitors();
if (monitors != NULL) {
for (int j = 0; j < monitors->number_of_monitors(); j++) {
BasicObjectLock* src = monitors->at(j);
if (src->obj() != NULL) {
ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
}
}
array->element(i)->free_monitors(thread);
#ifdef ASSERT
array->element(i)->set_removed_monitors();
#endif
}
}
}
#endif
static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
......
...@@ -120,13 +120,14 @@ class Deoptimization : AllStatic { ...@@ -120,13 +120,14 @@ class Deoptimization : AllStatic {
static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS); static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects); static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread); static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);) static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
#endif // COMPILER2 #endif // COMPILER2
public: public:
static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk); static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
// Interface used for unpacking deoptimized frames // Interface used for unpacking deoptimized frames
......
...@@ -482,6 +482,7 @@ JRT_END ...@@ -482,6 +482,7 @@ JRT_END
address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
// Reset method handle flag. // Reset method handle flag.
thread->set_is_method_handle_return(false); thread->set_is_method_handle_return(false);
......
...@@ -1495,6 +1495,7 @@ void JavaThread::initialize() { ...@@ -1495,6 +1495,7 @@ void JavaThread::initialize() {
_popframe_condition = popframe_inactive; _popframe_condition = popframe_inactive;
_popframe_preserved_args = NULL; _popframe_preserved_args = NULL;
_popframe_preserved_args_size = 0; _popframe_preserved_args_size = 0;
_frames_to_pop_failed_realloc = 0;
pd_initialize(); pd_initialize();
} }
......
...@@ -933,6 +933,12 @@ class JavaThread: public Thread { ...@@ -933,6 +933,12 @@ class JavaThread: public Thread {
// This is set to popframe_pending to signal that top Java frame should be popped immediately // This is set to popframe_pending to signal that top Java frame should be popped immediately
int _popframe_condition; int _popframe_condition;
// If reallocation of scalar replaced objects fails, we throw OOM
// and during exception propagation, pop the top
// _frames_to_pop_failed_realloc frames, the ones that reference
// failed reallocations.
int _frames_to_pop_failed_realloc;
#ifndef PRODUCT #ifndef PRODUCT
int _jmp_ring_index; int _jmp_ring_index;
struct { struct {
...@@ -1585,6 +1591,10 @@ public: ...@@ -1585,6 +1591,10 @@ public:
void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; }
#endif #endif
int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; }
void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; }
void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; }
private: private:
// Saved incoming arguments to popped frame. // Saved incoming arguments to popped frame.
// Used only when popped interpreted frame returns to deoptimized frame. // Used only when popped interpreted frame returns to deoptimized frame.
......
...@@ -56,7 +56,7 @@ void vframeArrayElement::free_monitors(JavaThread* jt) { ...@@ -56,7 +56,7 @@ void vframeArrayElement::free_monitors(JavaThread* jt) {
} }
} }
void vframeArrayElement::fill_in(compiledVFrame* vf) { void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
// Copy the information from the compiled vframe to the // Copy the information from the compiled vframe to the
// interpreter frame we will be creating to replace vf // interpreter frame we will be creating to replace vf
...@@ -64,6 +64,9 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { ...@@ -64,6 +64,9 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
_method = vf->method(); _method = vf->method();
_bci = vf->raw_bci(); _bci = vf->raw_bci();
_reexecute = vf->should_reexecute(); _reexecute = vf->should_reexecute();
#ifdef ASSERT
_removed_monitors = false;
#endif
int index; int index;
...@@ -81,11 +84,15 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { ...@@ -81,11 +84,15 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
// Migrate the BasicLocks from the stack to the monitor chunk // Migrate the BasicLocks from the stack to the monitor chunk
for (index = 0; index < list->length(); index++) { for (index = 0; index < list->length(); index++) {
MonitorInfo* monitor = list->at(index); MonitorInfo* monitor = list->at(index);
assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already"); assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
BasicObjectLock* dest = _monitors->at(index); BasicObjectLock* dest = _monitors->at(index);
dest->set_obj(monitor->owner()); if (monitor->owner_is_scalar_replaced()) {
monitor->lock()->move_to(monitor->owner(), dest->lock()); dest->set_obj(NULL);
} else {
assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
dest->set_obj(monitor->owner());
monitor->lock()->move_to(monitor->owner(), dest->lock());
}
} }
} }
...@@ -110,7 +117,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { ...@@ -110,7 +117,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
StackValue* value = locs->at(index); StackValue* value = locs->at(index);
switch(value->type()) { switch(value->type()) {
case T_OBJECT: case T_OBJECT:
assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
// preserve object type // preserve object type
_locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT )); _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
break; break;
...@@ -135,7 +142,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { ...@@ -135,7 +142,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
StackValue* value = exprs->at(index); StackValue* value = exprs->at(index);
switch(value->type()) { switch(value->type()) {
case T_OBJECT: case T_OBJECT:
assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
// preserve object type // preserve object type
_expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT )); _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
break; break;
...@@ -286,7 +293,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, ...@@ -286,7 +293,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
_frame.patch_pc(thread, pc); _frame.patch_pc(thread, pc);
assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors"); assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors");
BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin(); BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
for (int index = 0; index < locks; index++) { for (int index = 0; index < locks; index++) {
...@@ -438,7 +445,8 @@ int vframeArrayElement::on_stack_size(int callee_parameters, ...@@ -438,7 +445,8 @@ int vframeArrayElement::on_stack_size(int callee_parameters,
vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
RegisterMap *reg_map, frame sender, frame caller, frame self) { RegisterMap *reg_map, frame sender, frame caller, frame self,
bool realloc_failures) {
// Allocate the vframeArray // Allocate the vframeArray
vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
...@@ -450,19 +458,20 @@ vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableA ...@@ -450,19 +458,20 @@ vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableA
result->_caller = caller; result->_caller = caller;
result->_original = self; result->_original = self;
result->set_unroll_block(NULL); // initialize it result->set_unroll_block(NULL); // initialize it
result->fill_in(thread, frame_size, chunk, reg_map); result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
return result; return result;
} }
void vframeArray::fill_in(JavaThread* thread, void vframeArray::fill_in(JavaThread* thread,
int frame_size, int frame_size,
GrowableArray<compiledVFrame*>* chunk, GrowableArray<compiledVFrame*>* chunk,
const RegisterMap *reg_map) { const RegisterMap *reg_map,
bool realloc_failures) {
// Set owner first, it is used when adding monitor chunks // Set owner first, it is used when adding monitor chunks
_frame_size = frame_size; _frame_size = frame_size;
for(int i = 0; i < chunk->length(); i++) { for(int i = 0; i < chunk->length(); i++) {
element(i)->fill_in(chunk->at(i)); element(i)->fill_in(chunk->at(i), realloc_failures);
} }
// Copy registers for callee-saved registers // Copy registers for callee-saved registers
......
...@@ -58,6 +58,9 @@ class vframeArrayElement : public _ValueObj { ...@@ -58,6 +58,9 @@ class vframeArrayElement : public _ValueObj {
MonitorChunk* _monitors; // active monitors for this vframe MonitorChunk* _monitors; // active monitors for this vframe
StackValueCollection* _locals; StackValueCollection* _locals;
StackValueCollection* _expressions; StackValueCollection* _expressions;
#ifdef ASSERT
bool _removed_monitors;
#endif
public: public:
...@@ -78,7 +81,7 @@ class vframeArrayElement : public _ValueObj { ...@@ -78,7 +81,7 @@ class vframeArrayElement : public _ValueObj {
StackValueCollection* expressions(void) const { return _expressions; } StackValueCollection* expressions(void) const { return _expressions; }
void fill_in(compiledVFrame* vf); void fill_in(compiledVFrame* vf, bool realloc_failures);
// Formerly part of deoptimizedVFrame // Formerly part of deoptimizedVFrame
...@@ -99,6 +102,12 @@ class vframeArrayElement : public _ValueObj { ...@@ -99,6 +102,12 @@ class vframeArrayElement : public _ValueObj {
bool is_bottom_frame, bool is_bottom_frame,
int exec_mode); int exec_mode);
#ifdef ASSERT
void set_removed_monitors() {
_removed_monitors = true;
}
#endif
#ifndef PRODUCT #ifndef PRODUCT
void print(outputStream* st); void print(outputStream* st);
#endif /* PRODUCT */ #endif /* PRODUCT */
...@@ -160,13 +169,14 @@ class vframeArray: public CHeapObj<mtCompiler> { ...@@ -160,13 +169,14 @@ class vframeArray: public CHeapObj<mtCompiler> {
int frames() const { return _frames; } int frames() const { return _frames; }
static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
RegisterMap* reg_map, frame sender, frame caller, frame self); RegisterMap* reg_map, frame sender, frame caller, frame self,
bool realloc_failures);
vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; } vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; }
// Allocates a new vframe in the array and fills the array with vframe information in chunk // Allocates a new vframe in the array and fills the array with vframe information in chunk
void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map); void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map, bool realloc_failures);
// Returns the owner of this vframeArray // Returns the owner of this vframeArray
JavaThread* owner_thread() const { return _owner_thread; } JavaThread* owner_thread() const { return _owner_thread; }
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8066900
* @summary FP registers are not properly restored by C1 when handling exceptions
* @run main/othervm -Xbatch SumTest
*
*/
public class SumTest {
private static class Sum {
double[] sums;
/**
* Construct empty Sum
*/
public Sum() {
sums = new double[0];
}
/**
* Return the sum of all numbers added to this Sum
*
* @return the sum
*/
final public double getSum() {
double sum = 0;
for (final double s : sums) {
sum += s;
}
return sum;
}
/**
* Add a new number to this Sum
*
* @param a number to be added.
*/
final public void add(double a) {
try {
sums[sums.length] = -1; // Cause IndexOutOfBoundsException
} catch (final IndexOutOfBoundsException e) {
final double[] oldSums = sums;
sums = new double[oldSums.length + 1]; // Extend sums
System.arraycopy(oldSums, 0, sums, 0, oldSums.length);
sums[oldSums.length] = a; // Append a
}
}
}
public static void main(String[] args) throws Exception {
final Sum sum = new Sum();
for (int i = 1; i <= 10000; ++i) {
sum.add(1);
double ii = sum.getSum();
if (i != ii) {
throw new Exception("Failure: computed = " + ii + ", expected = " + i);
}
}
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8066103
* @summary C2's range check smearing allows out of bound array accesses
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox /testlibrary/com/oracle/java/testlibrary
* @build TestRangeCheckSmearing
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
* @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearing
*
*/
import java.lang.annotation.*;
import java.lang.reflect.*;
import java.util.*;
import sun.hotspot.WhiteBox;
import sun.hotspot.code.NMethod;
import com.oracle.java.testlibrary.Platform;
public class TestRangeCheckSmearing {
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
@Retention(RetentionPolicy.RUNTIME)
@interface Args { int[] value(); }
// first range check is i + max of all constants
@Args({0, 8})
static int m1(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+9];
if (allaccesses) {
res += array[i+8];
res += array[i+7];
res += array[i+6];
res += array[i+5];
res += array[i+4];
res += array[i+3];
res += array[i+2];
res += array[i+1];
}
return res;
}
// first range check is i + min of all constants
@Args({0, -9})
static int m2(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+1];
if (allaccesses) {
res += array[i+2];
res += array[i+3];
res += array[i+4];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
// first range check is not i + min/max of all constants
@Args({0, 8})
static int m3(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
if (allaccesses) {
res += array[i+2];
res += array[i+1];
res += array[i+4];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({0, -9})
static int m4(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
if (allaccesses) {
res += array[i+4];
res += array[i+1];
res += array[i+2];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({0, -3})
static int m5(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i+2];
if (allaccesses) {
res += array[i+1];
res += array[i+4];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({0, 6})
static int m6(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i+4];
if (allaccesses) {
res += array[i+2];
res += array[i+1];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({0, 6})
static int m7(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i+2];
res += array[i+4];
if (allaccesses) {
res += array[i+1];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({0, -3})
static int m8(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i+4];
res += array[i+2];
if (allaccesses) {
res += array[i+1];
res += array[i+5];
res += array[i+6];
res += array[i+7];
res += array[i+8];
res += array[i+9];
}
return res;
}
@Args({6, 15})
static int m9(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
if (allaccesses) {
res += array[i-2];
res += array[i-1];
res += array[i-4];
res += array[i-5];
res += array[i-6];
}
return res;
}
@Args({3, 12})
static int m10(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
if (allaccesses) {
res += array[i-2];
res += array[i-1];
res += array[i-3];
res += array[i+4];
res += array[i+5];
res += array[i+6];
}
return res;
}
@Args({3, -3})
static int m11(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i-2];
if (allaccesses) {
res += array[i+5];
res += array[i+6];
}
return res;
}
@Args({3, 6})
static int m12(int[] array, int i, boolean allaccesses) {
int res = 0;
res += array[i+3];
res += array[i+6];
if (allaccesses) {
res += array[i-2];
res += array[i-3];
}
return res;
}
// check that identical range check is replaced by dominating one
// only when correct
@Args({0})
static int m13(int[] array, int i, boolean ignore) {
int res = 0;
res += array[i+3];
res += array[i+3];
return res;
}
@Args({2, 0})
static int m14(int[] array, int i, boolean ignore) {
int res = 0;
res += array[i];
res += array[i-2];
res += array[i]; // If range check below were to be removed first this cannot be considered identical to first range check
res += array[i-1]; // range check removed so i-1 array access depends on previous check
return res;
}
static int[] m15_dummy = new int[10];
@Args({2, 0})
static int m15(int[] array, int i, boolean ignore) {
int res = 0;
res += array[i];
// When the loop is optimized out we don't want the
// array[i-1] access which is dependent on array[i]'s
// range check to become dependent on the identical range
// check above.
int[] array2 = m15_dummy;
int j = 0;
for (; j < 10; j++);
if (j == 10) {
array2 = array;
}
res += array2[i-2];
res += array2[i];
res += array2[i-1]; // range check removed so i-1 array access depends on previous check
return res;
}
@Args({2, 0})
static int m16(int[] array, int i, boolean ignore) {
int res = 0;
res += array[i];
res += array[i-1];
res += array[i-1];
res += array[i-2];
return res;
}
@Args({2, 0})
static int m17(int[] array, int i, boolean ignore) {
int res = 0;
res += array[i];
res += array[i-2];
res += array[i-2];
res += array[i+2];
res += array[i+2];
res += array[i-1];
res += array[i-1];
return res;
}
static public void main(String[] args) {
if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
throw new AssertionError("Background compilation enabled");
}
new TestRangeCheckSmearing().doTests();
}
boolean success = true;
boolean exception = false;
final int[] array = new int[10];
final HashMap<String,Method> tests = new HashMap<>();
{
final Class<?> TEST_PARAM_TYPES[] = { int[].class, int.class, boolean.class };
for (Method m : this.getClass().getDeclaredMethods()) {
if (m.getName().matches("m[0-9]+")) {
assert(Modifier.isStatic(m.getModifiers())) : m;
assert(m.getReturnType() == int.class) : m;
assert(Arrays.equals(m.getParameterTypes(), TEST_PARAM_TYPES)) : m;
tests.put(m.getName(), m);
}
}
}
void invokeTest(Method m, int[] array, int index, boolean z) {
try {
m.invoke(null, array, index, z);
} catch (ReflectiveOperationException roe) {
Throwable ex = roe.getCause();
if (ex instanceof ArrayIndexOutOfBoundsException)
throw (ArrayIndexOutOfBoundsException) ex;
throw new AssertionError(roe);
}
}
void doTest(String name) {
Method m = tests.get(name);
tests.remove(name);
int[] args = m.getAnnotation(Args.class).value();
int index0 = args[0], index1;
boolean exceptionRequired = true;
if (args.length == 2) {
index1 = args[1];
} else {
// no negative test for this one
assert(args.length == 1);
assert(name.equals("m13"));
exceptionRequired = false;
index1 = index0;
}
// Get the method compiled.
if (!WHITE_BOX.isMethodCompiled(m)) {
// If not, try to compile it with C2
if(!WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION)) {
// C2 compiler not available, try to compile with C1
WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE);
}
}
if (!WHITE_BOX.isMethodCompiled(m)) {
throw new RuntimeException(m + " not compiled");
}
// valid access
invokeTest(m, array, index0, true);
if (!WHITE_BOX.isMethodCompiled(m)) {
throw new RuntimeException(m + " deoptimized on valid array access");
}
exception = false;
boolean test_success = true;
try {
invokeTest(m, array, index1, false);
} catch(ArrayIndexOutOfBoundsException aioob) {
exception = true;
System.out.println("ArrayIndexOutOfBoundsException thrown in "+name);
}
if (!exception) {
System.out.println("ArrayIndexOutOfBoundsException was not thrown in "+name);
}
if (Platform.isServer()) {
if (exceptionRequired == WHITE_BOX.isMethodCompiled(m)) {
System.out.println((exceptionRequired?"Didn't deoptimized":"deoptimized") + " in "+name);
test_success = false;
}
}
if (exception != exceptionRequired) {
System.out.println((exceptionRequired?"exception required but not thrown":"not exception required but thrown") + " in "+name);
test_success = false;
}
if (!test_success) {
success = false;
System.out.println("TEST FAILED: "+name);
}
}
void doTests() {
doTest("m1");
doTest("m2");
doTest("m3");
doTest("m4");
doTest("m5");
doTest("m6");
doTest("m7");
doTest("m8");
doTest("m9");
doTest("m10");
doTest("m11");
doTest("m12");
doTest("m13");
doTest("m14");
doTest("m15");
doTest("m16");
doTest("m17");
if (!success) {
throw new RuntimeException("Some tests failed");
}
assert(tests.isEmpty()) : tests;
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8048170
* @summary Following range check smearing, range check cannot be replaced by dominating identical test.
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearingLoopOpts
*
*/
public class TestRangeCheckSmearingLoopOpts {
static int dummy;
static int m1(int[] array, int i) {
for (;;) {
for (;;) {
if (array[i] < 0) { // range check (i+0) dominates equivalent check below
break;
}
i++;
}
// A control flow that stops IfNode::up_one_dom()
if ((i % 2)== 0) {
if ((array[i] % 2) == 0) {
dummy = i;
}
}
// IfNode::Ideal will rewrite some range checks if Compile::allow_range_check_smearing
if (array[i-1] == 9) { // range check (i-1) unchanged
int res = array[i-3]; // range check (i-3) unchanged
res += array[i]; // range check (i+0) unchanged
res += array[i-2]; // removed redundant range check
// the previous access might be hoisted by
// PhaseIdealLoop::split_if_with_blocks_post because
// it appears to have the same guard, but it also
// depends on the previous guards
return res;
}
i++;
}
}
static public void main(String[] args) {
int[] array = { 0, 1, 2, -3, 4, 5, -2, 7, 8, 9, -1 };
for (int i = 0; i < 20000; i++) {
m1(array, 0);
}
array[0] = -1;
try {
m1(array, 0);
} catch(ArrayIndexOutOfBoundsException aioobe) {}
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 6898462
* @summary failed reallocations of scalar replaced objects during deoptimization causes crash
* @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=exclude,TestDeoptOOM::main -XX:CompileCommand=exclude,TestDeoptOOM::m9_1 -Xmx128M TestDeoptOOM
*
*/
public class TestDeoptOOM {
long f1;
long f2;
long f3;
long f4;
long f5;
static class LinkedList {
LinkedList l;
long[] array;
LinkedList(LinkedList l, int size) {
array = new long[size];
this.l = l;
}
}
static LinkedList ll;
static void consume_all_memory() {
int size = 128 * 1024 * 1024;
while(size > 0) {
try {
while(true) {
ll = new LinkedList(ll, size);
}
} catch(OutOfMemoryError oom) {
}
size = size / 2;
}
}
static void free_memory() {
ll = null;
}
static TestDeoptOOM m1(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (deopt) {
return tdoom;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m1");
}
return null;
}
static TestDeoptOOM m2_1(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (deopt) {
return tdoom;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m2_1");
}
return null;
}
static TestDeoptOOM m2(boolean deopt) {
try {
return m2_1(deopt);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m2");
}
return null;
}
static TestDeoptOOM m3_3(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (deopt) {
return tdoom;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m3_3");
}
return null;
}
static boolean m3_2(boolean deopt) {
try {
return m3_3(deopt) != null;
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m3_2");
}
return false;
}
static TestDeoptOOM m3_1(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (m3_2(deopt)) {
return tdoom;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m3_1");
}
return null;
}
static TestDeoptOOM m3(boolean deopt) {
try {
return m3_1(deopt);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m3");
}
return null;
}
static TestDeoptOOM m4(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (deopt) {
tdoom.f1 = 1l;
tdoom.f2 = 2l;
tdoom.f3 = 3l;
return tdoom;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m4");
}
return null;
}
static TestDeoptOOM m5(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
synchronized(tdoom) {
if (deopt) {
return tdoom;
}
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m5");
}
return null;
}
synchronized TestDeoptOOM m6_1(boolean deopt) {
if (deopt) {
return this;
}
return null;
}
static TestDeoptOOM m6(boolean deopt) {
try {
TestDeoptOOM tdoom = new TestDeoptOOM();
return tdoom.m6_1(deopt);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m6");
}
return null;
}
static TestDeoptOOM m7_1(boolean deopt, Object lock) {
try {
synchronized(lock) {
TestDeoptOOM tdoom = new TestDeoptOOM();
if (deopt) {
return tdoom;
}
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m7_1");
}
return null;
}
static TestDeoptOOM m7(boolean deopt, Object lock) {
try {
return m7_1(deopt, lock);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m7");
}
return null;
}
static class A {
long f1;
long f2;
long f3;
long f4;
long f5;
}
static class B {
long f1;
long f2;
long f3;
long f4;
long f5;
A a;
}
static B m8(boolean deopt) {
try {
A a = new A();
B b = new B();
b.a = a;
if (deopt) {
return b;
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m8");
}
return null;
}
static void m9_1(int i) {
if (i > 90000) {
consume_all_memory();
}
}
static TestDeoptOOM m9() {
try {
for (int i = 0; i < 100000; i++) {
TestDeoptOOM tdoom = new TestDeoptOOM();
m9_1(i);
if (i > 90000) {
return tdoom;
}
}
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in m1");
}
return null;
}
public static void main(String[] args) {
for (int i = 0; i < 20000; i++) {
m1(false);
}
consume_all_memory();
try {
m1(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main " + oom.getMessage());
}
free_memory();
for (int i = 0; i < 20000; i++) {
m2(false);
}
consume_all_memory();
try {
m2(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
for (int i = 0; i < 20000; i++) {
m3(false);
}
consume_all_memory();
try {
m3(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
for (int i = 0; i < 20000; i++) {
m4(false);
}
consume_all_memory();
try {
m4(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
for (int i = 0; i < 20000; i++) {
m5(false);
}
consume_all_memory();
try {
m5(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
for (int i = 0; i < 20000; i++) {
m6(false);
}
consume_all_memory();
try {
m6(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
final Object lock = new Object();
for (int i = 0; i < 20000; i++) {
m7(false, lock);
}
consume_all_memory();
try {
m7(true, lock);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
Thread thread = new Thread() {
public void run() {
System.out.println("Acquiring lock");
synchronized(lock) {
System.out.println("Lock acquired");
}
System.out.println("Lock released");
}
};
thread.start();
try {
thread.join();
} catch(InterruptedException ie) {
}
for (int i = 0; i < 20000; i++) {
m8(false);
}
consume_all_memory();
try {
m8(true);
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
try {
m9();
} catch(OutOfMemoryError oom) {
free_memory();
System.out.println("OOM caught in main");
}
free_memory();
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8067144
* @summary -XX:+TraceDeoptimization tries to print realloc'ed objects even when there are none
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceDeoptimization TraceDeoptimizationNoRealloc
*
*/
public class TraceDeoptimizationNoRealloc {
static void m(boolean some_condition) {
if (some_condition) {
return;
}
}
static public void main(String[] args) {
for (int i = 0; i < 20000; i++) {
m(false);
}
m(true);
}
}
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* @bug 8064667 * @bug 8064667
* @summary Sanity test for -XX:+CheckEndorsedAndExtDirs * @summary Sanity test for -XX:+CheckEndorsedAndExtDirs
* @library /testlibrary * @library /testlibrary
* @build com.oracle.java.testlibrary.*
* @run main/othervm EndorsedExtDirs * @run main/othervm EndorsedExtDirs
*/ */
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8066670
* @summary Testing -XX:+PrintSharedArchiveAndExit option
* @library /testlibrary
*/
import com.oracle.java.testlibrary.*;
public class PrintSharedArchiveAndExit {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
// (1) With a valid archive
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
"-XX:+PrintSharedArchiveAndExit", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("archive is valid");
output.shouldNotContain("java version"); // Should not print JVM version
output.shouldHaveExitValue(0); // Should report success in error code.
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
"-XX:+PrintSharedArchiveAndExit");
output = new OutputAnalyzer(pb.start());
output.shouldContain("archive is valid");
output.shouldNotContain("Usage:"); // Should not print JVM help message
output.shouldHaveExitValue(0); // Should report success in error code.
// (2) With an invalid archive (boot class path has been prepended)
pb = ProcessTools.createJavaProcessBuilder(
"-Xbootclasspath/p:foo.jar",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
"-XX:+PrintSharedArchiveAndExit", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("archive is invalid");
output.shouldNotContain("java version"); // Should not print JVM version
output.shouldHaveExitValue(1); // Should report failure in error code.
pb = ProcessTools.createJavaProcessBuilder(
"-Xbootclasspath/p:foo.jar",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
"-XX:+PrintSharedArchiveAndExit");
output = new OutputAnalyzer(pb.start());
output.shouldContain("archive is invalid");
output.shouldNotContain("Usage:"); // Should not print JVM help message
output.shouldHaveExitValue(1); // Should report failure in error code.
} catch (RuntimeException e) {
e.printStackTrace();
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册