提交 704ab198 编写于 作者: A asaha

Merge

...@@ -904,4 +904,12 @@ e4525db272634b980738003eff99ac1588bb79d3 jdk8u111-b05 ...@@ -904,4 +904,12 @@ e4525db272634b980738003eff99ac1588bb79d3 jdk8u111-b05
019b22dd8128840ecdcd1bfebcf4447e28e45068 jdk8u111-b06 019b22dd8128840ecdcd1bfebcf4447e28e45068 jdk8u111-b06
3f337aaf090769653ee0a746fbe661d09055a883 jdk8u111-b07 3f337aaf090769653ee0a746fbe661d09055a883 jdk8u111-b07
e180e364a40364a059a20c74b97ab4e928e2b676 jdk8u111-b08 e180e364a40364a059a20c74b97ab4e928e2b676 jdk8u111-b08
b09a69142dd3bf78ca66bb0c99046ca7cccbdda9 jdk8u112-b00
cf1faa9100dd8c8df6e1a604aaf613d037f51ebf jdk8u112-b01
f22b5be95347c669a1463d9e05ec3bf11420208e jdk8u112-b02
f2f59d888427b1db336766bc8cd272f2b2d84e1a jdk8u112-b03
c171546c49b5ff57bcb74bb54e8860647f83f268 jdk8u112-b04
4b7af794466ba22461ed043a1394df43e4993c4f jdk8u112-b06
55ed9b0a35e4ad4bb5ca3f393f6749e81ad9fef0 jdk8u112-b07
670f8169b83c6af14339fe37b2a2b8384dc2149b jdk8u112-b08
3b0e5f01891f5ebbf67797b1aae786196f1bb4f6 jdk8u121-b00 3b0e5f01891f5ebbf67797b1aae786196f1bb4f6 jdk8u121-b00
...@@ -3,7 +3,7 @@ The GNU General Public License (GPL) ...@@ -3,7 +3,7 @@ The GNU General Public License (GPL)
Version 2, June 1991 Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc. Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies of this license Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed. document, but changing it is not allowed.
...@@ -287,8 +287,8 @@ pointer to where the full notice is found. ...@@ -287,8 +287,8 @@ pointer to where the full notice is found.
more details. more details.
You should have received a copy of the GNU General Public License along You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 59 with this program; if not, write to the Free Software Foundation, Inc.,
Temple Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail. Also add information on how to contact you by electronic and paper mail.
......
...@@ -94,7 +94,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { ...@@ -94,7 +94,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
const int IC_pos_in_java_to_interp_stub = 8; const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm. #define __ _masm.
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2 #ifdef COMPILER2
// Get the mark within main instrs section which is set to the address of the call. // Get the mark within main instrs section which is set to the address of the call.
address call_addr = cbuf.insts_mark(); address call_addr = cbuf.insts_mark();
...@@ -106,8 +106,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -106,8 +106,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// Start the stub. // Start the stub.
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) { if (stub == NULL) {
Compile::current()->env()->record_out_of_memory_failure(); return NULL; // CodeCache is full
return;
} }
// For java_to_interp stubs we use R11_scratch1 as scratch register // For java_to_interp stubs we use R11_scratch1 as scratch register
...@@ -149,6 +148,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -149,6 +148,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// End the stub. // End the stub.
__ end_a_stub(); __ end_a_stub();
return stub;
#else #else
ShouldNotReachHere(); ShouldNotReachHere();
#endif #endif
......
...@@ -1171,7 +1171,7 @@ void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_t ...@@ -1171,7 +1171,7 @@ void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_t
// Start the stub. // Start the stub.
address stub = __ start_a_stub(Compile::MAX_stubs_size/2); address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
if (stub == NULL) { if (stub == NULL) {
Compile::current()->env()->record_out_of_memory_failure(); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
...@@ -1249,7 +1249,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en ...@@ -1249,7 +1249,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
// Emit the trampoline stub which will be related to the branch-and-link below. // Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset); CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full. if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype); __ relocate(rtype);
} }
...@@ -3488,7 +3488,7 @@ encode %{ ...@@ -3488,7 +3488,7 @@ encode %{
// Emit the trampoline stub which will be related to the branch-and-link below. // Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
if (Compile::current()->env()->failing()) { return; } // Code cache may be full. if (ciEnv::current()->failing()) { return; } // Code cache may be full.
__ relocate(_optimized_virtual ? __ relocate(_optimized_virtual ?
relocInfo::opt_virtual_call_type : relocInfo::static_call_type); relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
} }
...@@ -3501,7 +3501,11 @@ encode %{ ...@@ -3501,7 +3501,11 @@ encode %{
__ bl(__ pc()); // Emits a relocation. __ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter. // The stub for call to interpreter.
CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} }
%} %}
...@@ -3546,7 +3550,11 @@ encode %{ ...@@ -3546,7 +3550,11 @@ encode %{
assert(_method, "execute next statement conditionally"); assert(_method, "execute next statement conditionally");
// The stub for call to interpreter. // The stub for call to interpreter.
CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
// Restore original sp. // Restore original sp.
__ ld(R11_scratch1, 0, R1_SP); // Load caller sp. __ ld(R11_scratch1, 0, R1_SP); // Load caller sp.
...@@ -5461,7 +5469,7 @@ instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{ ...@@ -5461,7 +5469,7 @@ instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
%} %}
// Match loading integer and casting it to long. // Match loading integer and casting it to long.
instruct loadI2L(iRegLdst dst, memory mem) %{ instruct loadI2L(iRegLdst dst, memoryAlg4 mem) %{
match(Set dst (ConvI2L (LoadI mem))); match(Set dst (ConvI2L (LoadI mem)));
predicate(_kids[0]->_leaf->as_Load()->is_unordered()); predicate(_kids[0]->_leaf->as_Load()->is_unordered());
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
...@@ -5477,7 +5485,7 @@ instruct loadI2L(iRegLdst dst, memory mem) %{ ...@@ -5477,7 +5485,7 @@ instruct loadI2L(iRegLdst dst, memory mem) %{
%} %}
// Match loading integer and casting it to long - acquire. // Match loading integer and casting it to long - acquire.
instruct loadI2L_ac(iRegLdst dst, memory mem) %{ instruct loadI2L_ac(iRegLdst dst, memoryAlg4 mem) %{
match(Set dst (ConvI2L (LoadI mem))); match(Set dst (ConvI2L (LoadI mem)));
ins_cost(3*MEMORY_REF_COST); ins_cost(3*MEMORY_REF_COST);
......
...@@ -431,6 +431,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { ...@@ -431,6 +431,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ mov(length()->as_register(), O4); __ mov(length()->as_register(), O4);
ce->emit_static_call_stub(); ce->emit_static_call_stub();
if (ce->compilation()->bailed_out()) {
return; // CodeCache is full
}
__ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
__ delayed()->nop(); __ delayed()->nop();
......
...@@ -53,7 +53,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { ...@@ -53,7 +53,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
#define __ _masm. #define __ _masm.
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2 #ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling // Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code. // compiled code to calling interpreted code.
...@@ -64,9 +64,10 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -64,9 +64,10 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base = __ start_a_stub(to_interp_stub_size());
__ start_a_stub(to_interp_stub_size()*2); if (base == NULL) {
if (base == NULL) return; // CodeBuffer::expand failed. return NULL; // CodeBuffer::expand failed.
}
// Static stub relocation stores the instruction address of the call. // Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark)); __ relocate(static_stub_Relocation::spec(mark));
...@@ -81,6 +82,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -81,6 +82,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// Update current stubs pointer and restore code_end. // Update current stubs pointer and restore code_end.
__ end_a_stub(); __ end_a_stub();
return base;
#else #else
ShouldNotReachHere(); ShouldNotReachHere();
#endif #endif
......
...@@ -1775,9 +1775,11 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { ...@@ -1775,9 +1775,11 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base = __ start_a_stub(size_exception_handler());
__ start_a_stub(size_exception_handler()); if (base == NULL) {
if (base == NULL) return 0; // CodeBuffer::expand failed ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset(); int offset = __ offset();
...@@ -1798,9 +1800,11 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { ...@@ -1798,9 +1800,11 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base = __ start_a_stub(size_deopt_handler());
__ start_a_stub(size_deopt_handler()); if (base == NULL) {
if (base == NULL) return 0; // CodeBuffer::expand failed ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset(); int offset = __ offset();
__ save_frame(0); __ save_frame(0);
...@@ -2601,7 +2605,12 @@ encode %{ ...@@ -2601,7 +2605,12 @@ encode %{
emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
} }
if (_method) { // Emit stub for static call. if (_method) { // Emit stub for static call.
CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
// Stub does not fit into scratch buffer if TraceJumps is enabled
if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} }
%} %}
......
...@@ -502,6 +502,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { ...@@ -502,6 +502,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
ce->align_call(lir_static_call); ce->align_call(lir_static_call);
ce->emit_static_call_stub(); ce->emit_static_call_stub();
if (ce->compilation()->bailed_out()) {
return; // CodeCache is full
}
AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
relocInfo::static_call_type); relocInfo::static_call_type);
__ call(resolve); __ call(resolve);
......
...@@ -50,7 +50,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { ...@@ -50,7 +50,7 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
#define __ _masm. #define __ _masm.
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// Stub is fixed up when the corresponding call is converted from // Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code. // calling compiled code to calling interpreted code.
// movq rbx, 0 // movq rbx, 0
...@@ -62,9 +62,10 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -62,9 +62,10 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// That's why we must use the macroassembler to generate a stub. // That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base = __ start_a_stub(to_interp_stub_size());
__ start_a_stub(to_interp_stub_size()*2); if (base == NULL) {
if (base == NULL) return; // CodeBuffer::expand failed. return NULL; // CodeBuffer::expand failed.
}
// Static stub relocation stores the instruction address of the call. // Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
// Static stub relocation also tags the Method* in the code-stream. // Static stub relocation also tags the Method* in the code-stream.
...@@ -74,6 +75,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ...@@ -74,6 +75,7 @@ void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
// Update current stubs pointer and restore insts_end. // Update current stubs pointer and restore insts_end.
__ end_a_stub(); __ end_a_stub();
return base;
} }
#undef __ #undef __
......
...@@ -550,7 +550,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { ...@@ -550,7 +550,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
// That's why we must use the macroassembler to generate a handler. // That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler()); address base = __ start_a_stub(size_exception_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed if (base == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset(); int offset = __ offset();
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
...@@ -565,7 +568,10 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { ...@@ -565,7 +568,10 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
// That's why we must use the macroassembler to generate a handler. // That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler()); address base = __ start_a_stub(size_deopt_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed if (base == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset(); int offset = __ offset();
#ifdef _LP64 #ifdef _LP64
......
...@@ -1870,7 +1870,11 @@ encode %{ ...@@ -1870,7 +1870,11 @@ encode %{
static_call_Relocation::spec(), RELOC_IMM32 ); static_call_Relocation::spec(), RELOC_IMM32 );
} }
if (_method) { // Emit stub for static call. if (_method) { // Emit stub for static call.
CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} }
%} %}
......
...@@ -2125,7 +2125,11 @@ encode %{ ...@@ -2125,7 +2125,11 @@ encode %{
} }
if (_method) { if (_method) {
// Emit stub for static call. // Emit stub for static call.
CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} }
%} %}
......
...@@ -60,8 +60,9 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { ...@@ -60,8 +60,9 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
ShouldNotReachHere(); // Only needed for COMPILER2. ShouldNotReachHere(); // Only needed for COMPILER2.
return NULL;
} }
int CompiledStaticCall::to_interp_stub_size() { int CompiledStaticCall::to_interp_stub_size() {
......
...@@ -220,9 +220,16 @@ void CppInterpreter::main_loop(int recurse, TRAPS) { ...@@ -220,9 +220,16 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
// Push our result // Push our result
for (int i = 0; i < result_slots; i++) { for (int i = 0; i < result_slots; i++) {
// Adjust result to smaller // Adjust result to smaller
intptr_t res = result[-i]; union {
intptr_t res;
jint res_jint;
};
res = result[-i];
if (result_slots == 1) { if (result_slots == 1) {
res = narrow(method->result_type(), res); BasicType t = method->result_type();
if (is_subword_type(t)) {
res_jint = (jint)narrow(t, res_jint);
}
} }
stack->push(res); stack->push(res);
} }
......
...@@ -3047,6 +3047,48 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { ...@@ -3047,6 +3047,48 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
return addr == MAP_FAILED ? NULL : addr; return addr == MAP_FAILED ? NULL : addr;
} }
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
// (req_addr != NULL) or with a given alignment.
// - bytes shall be a multiple of alignment.
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
// - alignment sets the alignment at which memory shall be allocated.
// It must be a multiple of allocation granularity.
// Returns address of memory or NULL. If req_addr was not NULL, will only return
// req_addr or NULL.
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
size_t extra_size = bytes;
if (req_addr == NULL && alignment > 0) {
extra_size += alignment;
}
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
if (start == MAP_FAILED) {
start = NULL;
} else {
if (req_addr != NULL) {
if (start != req_addr) {
::munmap(start, extra_size);
start = NULL;
}
} else {
char* const start_aligned = (char*) align_ptr_up(start, alignment);
char* const end_aligned = start_aligned + bytes;
char* const end = start + extra_size;
if (start_aligned > start) {
::munmap(start, start_aligned - start);
}
if (end_aligned < end) {
::munmap(end_aligned, end - end_aligned);
}
start = start_aligned;
}
}
return start;
}
// Don't update _highest_vm_reserved_address, because there might be memory // Don't update _highest_vm_reserved_address, because there might be memory
// regions above addr + size. If so, releasing a memory region only creates // regions above addr + size. If so, releasing a memory region only creates
// a hole in the address space, it doesn't help prevent heap-stack collision. // a hole in the address space, it doesn't help prevent heap-stack collision.
...@@ -3331,54 +3373,133 @@ void os::large_page_init() { ...@@ -3331,54 +3373,133 @@ void os::large_page_init() {
#define SHM_HUGETLB 04000 #define SHM_HUGETLB 04000
#endif #endif
#define shm_warning_format(format, ...) \
do { \
if (UseLargePages && \
(!FLAG_IS_DEFAULT(UseLargePages) || \
!FLAG_IS_DEFAULT(UseSHM) || \
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
warning(format, __VA_ARGS__); \
} \
} while (0)
#define shm_warning(str) shm_warning_format("%s", str)
#define shm_warning_with_errno(str) \
do { \
int err = errno; \
shm_warning_format(str " (error = %d)", err); \
} while (0)
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
if (!is_size_aligned(alignment, SHMLBA)) {
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
return NULL;
}
// To ensure that we get 'alignment' aligned memory from shmat,
// we pre-reserve aligned virtual memory and then attach to that.
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
if (pre_reserved_addr == NULL) {
// Couldn't pre-reserve aligned memory.
shm_warning("Failed to pre-reserve aligned memory for shmat.");
return NULL;
}
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
if ((intptr_t)addr == -1) {
int err = errno;
shm_warning_with_errno("Failed to attach shared memory.");
assert(err != EACCES, "Unexpected error");
assert(err != EIDRM, "Unexpected error");
assert(err != EINVAL, "Unexpected error");
// Since we don't know if the kernel unmapped the pre-reserved memory area
// we can't unmap it, since that would potentially unmap memory that was
// mapped from other threads.
return NULL;
}
return addr;
}
static char* shmat_at_address(int shmid, char* req_addr) {
if (!is_ptr_aligned(req_addr, SHMLBA)) {
assert(false, "Requested address needs to be SHMLBA aligned");
return NULL;
}
char* addr = (char*)shmat(shmid, req_addr, 0);
if ((intptr_t)addr == -1) {
shm_warning_with_errno("Failed to attach shared memory.");
return NULL;
}
return addr;
}
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
// If a req_addr has been provided, we assume that the caller has already aligned the address.
if (req_addr != NULL) {
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
return shmat_at_address(shmid, req_addr);
}
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
// return large page size aligned memory addresses when req_addr == NULL.
// However, if the alignment is larger than the large page size, we have
// to manually ensure that the memory returned is 'alignment' aligned.
if (alignment > os::large_page_size()) {
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
return shmat_with_alignment(shmid, bytes, alignment);
} else {
return shmat_at_address(shmid, NULL);
}
}
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) { char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for // "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check. // the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseSHM, "only for SHM large pages"); assert(UseLargePages && UseSHM, "only for SHM large pages");
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { if (!is_size_aligned(bytes, os::large_page_size())) {
return NULL; // Fallback to small pages. return NULL; // Fallback to small pages.
} }
key_t key = IPC_PRIVATE;
char *addr;
bool warn_on_failure = UseLargePages &&
(!FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(UseSHM) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
);
char msg[128];
// Create a large shared memory region to attach to based on size. // Create a large shared memory region to attach to based on size.
// Currently, size is the total size of the heap // Currently, size is the total size of the heap.
int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
if (shmid == -1) { if (shmid == -1) {
// Possible reasons for shmget failure: // Possible reasons for shmget failure:
// 1. shmmax is too small for Java heap. // 1. shmmax is too small for Java heap.
// > check shmmax value: cat /proc/sys/kernel/shmmax // > check shmmax value: cat /proc/sys/kernel/shmmax
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
// 2. not enough large page memory. // 2. not enough large page memory.
// > check available large pages: cat /proc/meminfo // > check available large pages: cat /proc/meminfo
// > increase amount of large pages: // > increase amount of large pages:
// echo new_value > /proc/sys/vm/nr_hugepages // echo new_value > /proc/sys/vm/nr_hugepages
// Note 1: different Linux may use different name for this property, // Note 1: different Linux may use different name for this property,
// e.g. on Redhat AS-3 it is "hugetlb_pool". // e.g. on Redhat AS-3 it is "hugetlb_pool".
// Note 2: it's possible there's enough physical memory available but // Note 2: it's possible there's enough physical memory available but
// they are so fragmented after a long run that they can't // they are so fragmented after a long run that they can't
// coalesce into large pages. Try to reserve large pages when // coalesce into large pages. Try to reserve large pages when
// the system is still "fresh". // the system is still "fresh".
if (warn_on_failure) { shm_warning_with_errno("Failed to reserve shared memory.");
jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); return NULL;
warning("%s", msg);
}
return NULL;
} }
// attach to the region // Attach to the region.
addr = (char*)shmat(shmid, req_addr, 0); char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
int err = errno;
// Remove shmid. If shmat() is successful, the actual shared memory segment // Remove shmid. If shmat() is successful, the actual shared memory segment
// will be deleted when it's detached by shmdt() or when the process // will be deleted when it's detached by shmdt() or when the process
...@@ -3386,14 +3507,6 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char ...@@ -3386,14 +3507,6 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char
// segment immediately. // segment immediately.
shmctl(shmid, IPC_RMID, NULL); shmctl(shmid, IPC_RMID, NULL);
if ((intptr_t)addr == -1) {
if (warn_on_failure) {
jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
warning("%s", msg);
}
return NULL;
}
return addr; return addr;
} }
...@@ -3433,22 +3546,22 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_ ...@@ -3433,22 +3546,22 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_
return addr; return addr;
} }
// Reserve memory using mmap(MAP_HUGETLB).
// - bytes shall be a multiple of alignment.
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
// - alignment sets the alignment at which memory shall be allocated.
// It must be a multiple of allocation granularity.
// Returns address of memory or NULL. If req_addr was not NULL, will only return
// req_addr or NULL.
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) { char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
size_t large_page_size = os::large_page_size(); size_t large_page_size = os::large_page_size();
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
// Allocate small pages. assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(bytes, alignment), "Must be");
char* start; // First reserve - but not commit - the address range in small pages.
if (req_addr != NULL) { char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(bytes, alignment), "Must be");
start = os::reserve_memory(bytes, req_addr);
assert(start == NULL || start == req_addr, "Must be");
} else {
start = os::reserve_memory_aligned(bytes, alignment);
}
if (start == NULL) { if (start == NULL) {
return NULL; return NULL;
...@@ -3456,13 +3569,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3456,13 +3569,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
assert(is_ptr_aligned(start, alignment), "Must be"); assert(is_ptr_aligned(start, alignment), "Must be");
if (MemTracker::tracking_level() > NMT_minimal) {
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
tkr.record((address)start, bytes);
}
char* end = start + bytes; char* end = start + bytes;
// Find the regions of the allocated chunk that can be promoted to large pages. // Find the regions of the allocated chunk that can be promoted to large pages.
...@@ -3482,9 +3588,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3482,9 +3588,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
void* result; void* result;
// Commit small-paged leading area.
if (start != lp_start) { if (start != lp_start) {
result = ::mmap(start, lp_start - start, prot, result = ::mmap(start, lp_start - start, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
...@@ -3495,11 +3601,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3495,11 +3601,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
} }
} }
// Commit large-paged area.
result = ::mmap(lp_start, lp_bytes, prot, result = ::mmap(lp_start, lp_bytes, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
-1, 0); -1, 0);
if (result == MAP_FAILED) { if (result == MAP_FAILED) {
warn_on_large_pages_failure(req_addr, bytes, errno); warn_on_large_pages_failure(lp_start, lp_bytes, errno);
// If the mmap above fails, the large pages region will be unmapped and we // If the mmap above fails, the large pages region will be unmapped and we
// have regions before and after with small pages. Release these regions. // have regions before and after with small pages. Release these regions.
// //
...@@ -3512,6 +3619,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3512,6 +3619,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
return NULL; return NULL;
} }
// Commit small-paged trailing area.
if (lp_end != end) { if (lp_end != end) {
result = ::mmap(lp_end, end - lp_end, prot, result = ::mmap(lp_end, end - lp_end, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
...@@ -3528,7 +3636,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3528,7 +3636,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) { char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_ptr_aligned(req_addr, alignment), "Must be"); assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_power_of_2(alignment), "Must be"); assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(is_power_of_2(os::large_page_size()), "Must be"); assert(is_power_of_2(os::large_page_size()), "Must be");
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
...@@ -6102,47 +6210,100 @@ class TestReserveMemorySpecial : AllStatic { ...@@ -6102,47 +6210,100 @@ class TestReserveMemorySpecial : AllStatic {
} }
} }
static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) { static void test_reserve_memory_special_huge_tlbfs_mixed() {
if (!UseHugeTLBFS) { size_t lp = os::large_page_size();
return; size_t ag = os::vm_allocation_granularity();
}
test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
size, alignment);
assert(size >= os::large_page_size(), "Incorrect input to test");
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); // sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);
// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr
// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping1 != MAP_FAILED, "should work");
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping2 != MAP_FAILED, "should work");
// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);
// Case 1
test_log("%s, req_addr NULL:", __FUNCTION__);
test_log("size align result");
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s",
size, alignment, p, (p != NULL ? "" : "(failed)"));
if (p != NULL) {
assert(is_ptr_aligned(p, alignment), "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
if (addr != NULL) { // Case 2
small_page_write(addr, size); test_log("%s, req_addr non-NULL:", __FUNCTION__);
test_log("size align req_addr result");
os::Linux::release_memory_special_huge_tlbfs(addr, size); for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = (char*) align_ptr_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, req_addr, p,
((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
if (p != NULL) {
assert(p == req_addr, "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
} }
}
static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) { // Case 3
size_t lp = os::large_page_size(); test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
size_t ag = os::vm_allocation_granularity(); test_log("size align req_addr result");
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (int i = 0; i < num_sizes; i++) {
test_reserve_memory_special_huge_tlbfs_mixed(size, alignment); const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = (char*) align_ptr_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, req_addr, p,
((p != NULL ? "" : "(failed)")));
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
assert(p == NULL, "must be");
}
} }
}
static void test_reserve_memory_special_huge_tlbfs_mixed() { ::munmap(mapping2, mapping_size);
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
} }
static void test_reserve_memory_special_huge_tlbfs() { static void test_reserve_memory_special_huge_tlbfs() {
......
...@@ -464,6 +464,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { ...@@ -464,6 +464,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
// emit the static call stub stuff out of line // emit the static call stub stuff out of line
emit_static_call_stub(); emit_static_call_stub();
CHECK_BAILOUT();
switch (op->code()) { switch (op->code()) {
case lir_static_call: case lir_static_call:
......
...@@ -537,6 +537,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ...@@ -537,6 +537,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
int name_index = cp->name_ref_index_at(index); int name_index = cp->name_ref_index_at(index);
Symbol* name = cp->symbol_at(name_index); Symbol* name = cp->symbol_at(name_index);
Symbol* sig = cp->symbol_at(sig_index); Symbol* sig = cp->symbol_at(sig_index);
guarantee_property(sig->utf8_length() != 0,
"Illegal zero length constant pool entry at %d in class %s",
sig_index, CHECK_(nullHandle));
if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) { if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
verify_legal_method_signature(name, sig, CHECK_(nullHandle)); verify_legal_method_signature(name, sig, CHECK_(nullHandle));
} else { } else {
...@@ -560,8 +563,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ...@@ -560,8 +563,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
verify_legal_field_name(name, CHECK_(nullHandle)); verify_legal_field_name(name, CHECK_(nullHandle));
if (_need_verify && _major_version >= JAVA_7_VERSION) { if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info. // Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's the right type. // Need only to be sure it's non-zero length and the right type.
if (signature->byte_at(0) == JVM_SIGNATURE_FUNC) { if (signature->utf8_length() == 0 ||
signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
throwIllegalSignature( throwIllegalSignature(
"Field", name, signature, CHECK_(nullHandle)); "Field", name, signature, CHECK_(nullHandle));
} }
...@@ -572,8 +576,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ...@@ -572,8 +576,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
verify_legal_method_name(name, CHECK_(nullHandle)); verify_legal_method_name(name, CHECK_(nullHandle));
if (_need_verify && _major_version >= JAVA_7_VERSION) { if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info. // Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's the right type. // Need only to be sure it's non-zero length and the right type.
if (signature->byte_at(0) != JVM_SIGNATURE_FUNC) { if (signature->utf8_length() == 0 ||
signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
throwIllegalSignature( throwIllegalSignature(
"Method", name, signature, CHECK_(nullHandle)); "Method", name, signature, CHECK_(nullHandle));
} }
...@@ -584,8 +589,7 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ...@@ -584,8 +589,7 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
// 4509014: If a class method name begins with '<', it must be "<init>". // 4509014: If a class method name begins with '<', it must be "<init>".
assert(name != NULL, "method name in constant pool is null"); assert(name != NULL, "method name in constant pool is null");
unsigned int name_len = name->utf8_length(); unsigned int name_len = name->utf8_length();
assert(name_len > 0, "bad method name"); // already verified as legal name if (name_len != 0 && name->byte_at(0) == '<') {
if (name->byte_at(0) == '<') {
if (name != vmSymbols::object_initializer_name()) { if (name != vmSymbols::object_initializer_name()) {
classfile_parse_error( classfile_parse_error(
"Bad method name at constant pool index %u in class file %s", "Bad method name at constant pool index %u in class file %s",
......
...@@ -813,6 +813,12 @@ void ClassLoaderDataGraph::free_deallocate_lists() { ...@@ -813,6 +813,12 @@ void ClassLoaderDataGraph::free_deallocate_lists() {
// called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces. // called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces.
cld->free_deallocate_list(); cld->free_deallocate_list();
} }
// In some rare cases items added to the unloading list will not be freed elsewhere.
// To keep it simple, walk the _unloading list also.
for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
cld->free_deallocate_list();
}
} }
// CDS support // CDS support
......
...@@ -40,7 +40,7 @@ inline ClassLoaderData* ClassLoaderData::class_loader_data(oop loader) { ...@@ -40,7 +40,7 @@ inline ClassLoaderData* ClassLoaderData::class_loader_data(oop loader) {
inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAPS) { inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAPS) {
assert(loader() != NULL,"Must be a class loader"); guarantee(loader() != NULL && loader()->is_oop(), "Loader must be oop");
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null // Gets the class loader data out of the java/lang/ClassLoader object, if non-null
// it's already in the loader_data, so no need to add // it's already in the loader_data, so no need to add
ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader()); ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
......
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -517,6 +517,7 @@ ...@@ -517,6 +517,7 @@
template(int_StringBuffer_signature, "(I)Ljava/lang/StringBuffer;") \ template(int_StringBuffer_signature, "(I)Ljava/lang/StringBuffer;") \
template(char_StringBuffer_signature, "(C)Ljava/lang/StringBuffer;") \ template(char_StringBuffer_signature, "(C)Ljava/lang/StringBuffer;") \
template(int_String_signature, "(I)Ljava/lang/String;") \ template(int_String_signature, "(I)Ljava/lang/String;") \
template(codesource_permissioncollection_signature, "(Ljava/security/CodeSource;Ljava/security/PermissionCollection;)V") \
/* signature symbols needed by intrinsics */ \ /* signature symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \
\ \
......
...@@ -320,7 +320,7 @@ class CompiledStaticCall: public NativeCall { ...@@ -320,7 +320,7 @@ class CompiledStaticCall: public NativeCall {
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
// Code // Code
static void emit_to_interp_stub(CodeBuffer &cbuf); static address emit_to_interp_stub(CodeBuffer &cbuf);
static int to_interp_stub_size(); static int to_interp_stub_size();
static int reloc_to_interp_stub(); static int reloc_to_interp_stub();
......
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -133,10 +133,6 @@ static double percent_of(size_t numerator, size_t denominator) { ...@@ -133,10 +133,6 @@ static double percent_of(size_t numerator, size_t denominator) {
} }
} }
static size_t round_to_K(size_t value) {
return value / K;
}
class RegionTypeCounter VALUE_OBJ_CLASS_SPEC { class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
private: private:
const char* _name; const char* _name;
...@@ -187,8 +183,10 @@ public: ...@@ -187,8 +183,10 @@ public:
size_t code_root_elems() const { return _code_root_elems; } size_t code_root_elems() const { return _code_root_elems; }
void print_rs_mem_info_on(outputStream * out, size_t total) { void print_rs_mem_info_on(outputStream * out, size_t total) {
out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", out->print_cr(" "SIZE_FORMAT_W(8) "%s (%5.1f%%) by "SIZE_FORMAT" %s regions",
round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); byte_size_in_proper_unit(rs_mem_size()),
proper_unit_for_byte_size(rs_mem_size()),
rs_mem_size_percent_of(total), amount(), _name);
} }
void print_cards_occupied_info_on(outputStream * out, size_t total) { void print_cards_occupied_info_on(outputStream * out, size_t total) {
...@@ -197,8 +195,10 @@ public: ...@@ -197,8 +195,10 @@ public:
} }
void print_code_root_mem_info_on(outputStream * out, size_t total) { void print_code_root_mem_info_on(outputStream * out, size_t total) {
out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", out->print_cr(" "SIZE_FORMAT_W(8) "%s (%5.1f%%) by "SIZE_FORMAT" %s regions",
round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); byte_size_in_proper_unit(code_root_mem_size()),
proper_unit_for_byte_size(code_root_mem_size()),
code_root_mem_size_percent_of(total), amount(), _name);
} }
void print_code_root_elems_info_on(outputStream * out, size_t total) { void print_code_root_elems_info_on(outputStream * out, size_t total) {
...@@ -280,17 +280,23 @@ public: ...@@ -280,17 +280,23 @@ public:
RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL }; RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
out->print_cr("\n Current rem set statistics"); out->print_cr("\n Current rem set statistics");
out->print_cr(" Total per region rem sets sizes = "SIZE_FORMAT"K." out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT "%s."
" Max = "SIZE_FORMAT"K.", " Max = " SIZE_FORMAT "%s.",
round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz())); byte_size_in_proper_unit(total_rs_mem_sz()),
proper_unit_for_byte_size(total_rs_mem_sz()),
byte_size_in_proper_unit(max_rs_mem_sz()),
proper_unit_for_byte_size(max_rs_mem_sz()));
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_rs_mem_info_on(out, total_rs_mem_sz()); (*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
} }
out->print_cr(" Static structures = "SIZE_FORMAT"K," out->print_cr(" Static structures = " SIZE_FORMAT "%s,"
" free_lists = "SIZE_FORMAT"K.", " free_lists = " SIZE_FORMAT "%s.",
round_to_K(HeapRegionRemSet::static_mem_size()), byte_size_in_proper_unit(HeapRegionRemSet::static_mem_size()),
round_to_K(HeapRegionRemSet::fl_mem_size())); proper_unit_for_byte_size(HeapRegionRemSet::static_mem_size()),
byte_size_in_proper_unit(HeapRegionRemSet::fl_mem_size()),
proper_unit_for_byte_size(HeapRegionRemSet::fl_mem_size()));
out->print_cr(" "SIZE_FORMAT" occupied cards represented.", out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
total_cards_occupied()); total_cards_occupied());
...@@ -301,17 +307,21 @@ public: ...@@ -301,17 +307,21 @@ public:
// Largest sized rem set region statistics // Largest sized rem set region statistics
HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set(); HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
out->print_cr(" Region with largest rem set = "HR_FORMAT", " out->print_cr(" Region with largest rem set = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.", "size = "SIZE_FORMAT "%s, occupied = "SIZE_FORMAT "%s.",
HR_FORMAT_PARAMS(max_rs_mem_sz_region()), HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
round_to_K(rem_set->mem_size()), byte_size_in_proper_unit(rem_set->mem_size()),
round_to_K(rem_set->occupied())); proper_unit_for_byte_size(rem_set->mem_size()),
byte_size_in_proper_unit(rem_set->occupied()),
proper_unit_for_byte_size(rem_set->occupied()));
// Strong code root statistics // Strong code root statistics
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set(); HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
out->print_cr(" Total heap region code root sets sizes = "SIZE_FORMAT"K." out->print_cr(" Total heap region code root sets sizes = " SIZE_FORMAT "%s."
" Max = "SIZE_FORMAT"K.", " Max = " SIZE_FORMAT "%s.",
round_to_K(total_code_root_mem_sz()), byte_size_in_proper_unit(total_code_root_mem_sz()),
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size())); proper_unit_for_byte_size(total_code_root_mem_sz()),
byte_size_in_proper_unit(max_code_root_rem_set->strong_code_roots_mem_size()),
proper_unit_for_byte_size(max_code_root_rem_set->strong_code_roots_mem_size()));
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz()); (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
} }
...@@ -323,10 +333,12 @@ public: ...@@ -323,10 +333,12 @@ public:
} }
out->print_cr(" Region with largest amount of code roots = "HR_FORMAT", " out->print_cr(" Region with largest amount of code roots = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".", "size = "SIZE_FORMAT "%s, num_elems = "SIZE_FORMAT".",
HR_FORMAT_PARAMS(max_code_root_mem_sz_region()), HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()), byte_size_in_proper_unit(max_code_root_rem_set->strong_code_roots_mem_size()),
round_to_K(max_code_root_rem_set->strong_code_roots_list_length())); proper_unit_for_byte_size(max_code_root_rem_set->strong_code_roots_mem_size()),
max_code_root_rem_set->strong_code_roots_list_length());
} }
}; };
......
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -28,22 +28,23 @@ ...@@ -28,22 +28,23 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp" #include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
#include "memory/gcLocker.hpp" #include "memory/gcLocker.hpp"
#include "memory/padded.inline.hpp" #include "memory/padded.inline.hpp"
#include "oops/typeArrayOop.hpp" #include "oops/typeArrayOop.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
// //
// Freelist in the deduplication table entry cache. Links table // List of deduplication table entries. Links table
// entries together using their _next fields. // entries together using their _next fields.
// //
class G1StringDedupEntryFreeList : public CHeapObj<mtGC> { class G1StringDedupEntryList : public CHeapObj<mtGC> {
private: private:
G1StringDedupEntry* _list; G1StringDedupEntry* _list;
size_t _length; size_t _length;
public: public:
G1StringDedupEntryFreeList() : G1StringDedupEntryList() :
_list(NULL), _list(NULL),
_length(0) { _length(0) {
} }
...@@ -63,6 +64,12 @@ public: ...@@ -63,6 +64,12 @@ public:
return entry; return entry;
} }
G1StringDedupEntry* remove_all() {
G1StringDedupEntry* list = _list;
_list = NULL;
return list;
}
size_t length() { size_t length() {
return _length; return _length;
} }
...@@ -84,43 +91,53 @@ public: ...@@ -84,43 +91,53 @@ public:
// //
class G1StringDedupEntryCache : public CHeapObj<mtGC> { class G1StringDedupEntryCache : public CHeapObj<mtGC> {
private: private:
// One freelist per GC worker to allow lock less freeing of // One cache/overflow list per GC worker to allow lock less freeing of
// entries while doing a parallel scan of the table. Using // entries while doing a parallel scan of the table. Using PaddedEnd to
// PaddedEnd to avoid false sharing. // avoid false sharing.
PaddedEnd<G1StringDedupEntryFreeList>* _lists; size_t _nlists;
size_t _nlists; size_t _max_list_length;
PaddedEnd<G1StringDedupEntryList>* _cached;
PaddedEnd<G1StringDedupEntryList>* _overflowed;
public: public:
G1StringDedupEntryCache(); G1StringDedupEntryCache(size_t max_size);
~G1StringDedupEntryCache(); ~G1StringDedupEntryCache();
// Get a table entry from the cache freelist, or allocate a new // Set max number of table entries to cache.
// entry if the cache is empty. void set_max_size(size_t max_size);
// Get a table entry from the cache, or allocate a new entry if the cache is empty.
G1StringDedupEntry* alloc(); G1StringDedupEntry* alloc();
// Insert a table entry into the cache freelist. // Insert a table entry into the cache.
void free(G1StringDedupEntry* entry, uint worker_id); void free(G1StringDedupEntry* entry, uint worker_id);
// Returns current number of entries in the cache. // Returns current number of entries in the cache.
size_t size(); size_t size();
// If the cache has grown above the given max size, trim it down // Deletes overflowed entries.
// and deallocate the memory occupied by trimmed of entries. void delete_overflowed();
void trim(size_t max_size);
}; };
G1StringDedupEntryCache::G1StringDedupEntryCache() { G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) :
_nlists = MAX2(ParallelGCThreads, (size_t)1); _nlists(MAX2(ParallelGCThreads, (size_t)1)),
_lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists); _max_list_length(0),
_cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
_overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
set_max_size(max_size);
} }
G1StringDedupEntryCache::~G1StringDedupEntryCache() { G1StringDedupEntryCache::~G1StringDedupEntryCache() {
ShouldNotReachHere(); ShouldNotReachHere();
} }
void G1StringDedupEntryCache::set_max_size(size_t size) {
_max_list_length = size / _nlists;
}
G1StringDedupEntry* G1StringDedupEntryCache::alloc() { G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
for (size_t i = 0; i < _nlists; i++) { for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntry* entry = _lists[i].remove(); G1StringDedupEntry* entry = _cached[i].remove();
if (entry != NULL) { if (entry != NULL) {
return entry; return entry;
} }
...@@ -131,31 +148,55 @@ G1StringDedupEntry* G1StringDedupEntryCache::alloc() { ...@@ -131,31 +148,55 @@ G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) { void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
assert(entry->obj() != NULL, "Double free"); assert(entry->obj() != NULL, "Double free");
assert(worker_id < _nlists, "Invalid worker id"); assert(worker_id < _nlists, "Invalid worker id");
entry->set_obj(NULL); entry->set_obj(NULL);
entry->set_hash(0); entry->set_hash(0);
_lists[worker_id].add(entry);
if (_cached[worker_id].length() < _max_list_length) {
// Cache is not full
_cached[worker_id].add(entry);
} else {
// Cache is full, add to overflow list for later deletion
_overflowed[worker_id].add(entry);
}
} }
size_t G1StringDedupEntryCache::size() { size_t G1StringDedupEntryCache::size() {
size_t size = 0; size_t size = 0;
for (size_t i = 0; i < _nlists; i++) { for (size_t i = 0; i < _nlists; i++) {
size += _lists[i].length(); size += _cached[i].length();
} }
return size; return size;
} }
void G1StringDedupEntryCache::trim(size_t max_size) { void G1StringDedupEntryCache::delete_overflowed() {
size_t cache_size = 0; double start = os::elapsedTime();
uintx count = 0;
for (size_t i = 0; i < _nlists; i++) { for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntryFreeList* list = &_lists[i]; G1StringDedupEntry* entry;
cache_size += list->length();
while (cache_size > max_size) { {
G1StringDedupEntry* entry = list->remove(); // The overflow list can be modified during safepoints, therefore
assert(entry != NULL, "Should not be null"); // we temporarily join the suspendible thread set while removing
cache_size--; // all entries from the list.
SuspendibleThreadSetJoiner sts_join;
entry = _overflowed[i].remove_all();
}
// Delete all entries
while (entry != NULL) {
G1StringDedupEntry* next = entry->next();
delete entry; delete entry;
entry = next;
count++;
} }
} }
double end = os::elapsedTime();
if (PrintStringDeduplicationStatistics) {
gclog_or_tty->print_cr("[GC concurrent-string-deduplication, deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT "]", count, end - start);
}
} }
G1StringDedupTable* G1StringDedupTable::_table = NULL; G1StringDedupTable* G1StringDedupTable::_table = NULL;
...@@ -192,7 +233,7 @@ G1StringDedupTable::~G1StringDedupTable() { ...@@ -192,7 +233,7 @@ G1StringDedupTable::~G1StringDedupTable() {
void G1StringDedupTable::create() { void G1StringDedupTable::create() {
assert(_table == NULL, "One string deduplication table allowed"); assert(_table == NULL, "One string deduplication table allowed");
_entry_cache = new G1StringDedupEntryCache(); _entry_cache = new G1StringDedupEntryCache((size_t)(_min_size * _max_cache_factor));
_table = new G1StringDedupTable(_min_size); _table = new G1StringDedupTable(_min_size);
} }
...@@ -375,6 +416,9 @@ G1StringDedupTable* G1StringDedupTable::prepare_resize() { ...@@ -375,6 +416,9 @@ G1StringDedupTable* G1StringDedupTable::prepare_resize() {
// Update statistics // Update statistics
_resize_count++; _resize_count++;
// Update max cache size
_entry_cache->set_max_size((size_t)(size * _max_cache_factor));
// Allocate the new table. The new table will be populated by workers // Allocate the new table. The new table will be populated by workers
// calling unlink_or_oops_do() and finally installed by finish_resize(). // calling unlink_or_oops_do() and finally installed by finish_resize().
return new G1StringDedupTable(size, _table->_hash_seed); return new G1StringDedupTable(size, _table->_hash_seed);
...@@ -427,7 +471,7 @@ void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* c ...@@ -427,7 +471,7 @@ void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* c
removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id); removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
} }
// Delayed update avoid contention on the table lock // Delayed update to avoid contention on the table lock
if (removed > 0) { if (removed > 0) {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
_table->_entries -= removed; _table->_entries -= removed;
...@@ -545,10 +589,8 @@ void G1StringDedupTable::verify() { ...@@ -545,10 +589,8 @@ void G1StringDedupTable::verify() {
} }
} }
void G1StringDedupTable::trim_entry_cache() { void G1StringDedupTable::clean_entry_cache() {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); _entry_cache->delete_overflowed();
size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor);
_entry_cache->trim(max_cache_size);
} }
void G1StringDedupTable::print_statistics(outputStream* st) { void G1StringDedupTable::print_statistics(outputStream* st) {
......
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -218,8 +218,8 @@ public: ...@@ -218,8 +218,8 @@ public:
// and deletes the previously active table. // and deletes the previously active table.
static void finish_rehash(G1StringDedupTable* rehashed_table); static void finish_rehash(G1StringDedupTable* rehashed_table);
// If the table entry cache has grown too large, trim it down according to policy // If the table entry cache has grown too large, delete overflowed entries.
static void trim_entry_cache(); static void clean_entry_cache();
static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id); static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);
......
...@@ -100,14 +100,14 @@ void G1StringDedupThread::run() { ...@@ -100,14 +100,14 @@ void G1StringDedupThread::run() {
} }
} }
G1StringDedupTable::trim_entry_cache();
stat.mark_done(); stat.mark_done();
// Print statistics // Print statistics
total_stat.add(stat); total_stat.add(stat);
print(gclog_or_tty, stat, total_stat); print(gclog_or_tty, stat, total_stat);
} }
G1StringDedupTable::clean_entry_cache();
} }
terminate(); terminate();
......
...@@ -593,8 +593,9 @@ BytecodeInterpreter::run(interpreterState istate) { ...@@ -593,8 +593,9 @@ BytecodeInterpreter::run(interpreterState istate) {
/* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w,
/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, /* 0xE8 */ &&opc_return_register_finalizer,
&&opc_invokehandle, &&opc_default, &&opc_default,
/* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
......
...@@ -1806,7 +1806,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> { ...@@ -1806,7 +1806,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
void clear_all_methods() { void clear_all_methods() {
for (JNIMethodBlock* b = this; b != NULL; b = b->_next) { for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
for (int i = 0; i< number_of_methods; i++) { for (int i = 0; i< number_of_methods; i++) {
_methods[i] = NULL; b->_methods[i] = NULL;
} }
} }
} }
...@@ -1816,7 +1816,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> { ...@@ -1816,7 +1816,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
int count = 0; int count = 0;
for (JNIMethodBlock* b = this; b != NULL; b = b->_next) { for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
for (int i = 0; i< number_of_methods; i++) { for (int i = 0; i< number_of_methods; i++) {
if (_methods[i] != _free_method) count++; if (b->_methods[i] != _free_method) count++;
} }
} }
return count; return count;
...@@ -1874,6 +1874,9 @@ bool Method::is_method_id(jmethodID mid) { ...@@ -1874,6 +1874,9 @@ bool Method::is_method_id(jmethodID mid) {
Method* m = resolve_jmethod_id(mid); Method* m = resolve_jmethod_id(mid);
assert(m != NULL, "should be called with non-null method"); assert(m != NULL, "should be called with non-null method");
InstanceKlass* ik = m->method_holder(); InstanceKlass* ik = m->method_holder();
if (ik == NULL) {
return false;
}
ClassLoaderData* cld = ik->class_loader_data(); ClassLoaderData* cld = ik->class_loader_data();
if (cld->jmethod_ids() == NULL) return false; if (cld->jmethod_ids() == NULL) return false;
return (cld->jmethod_ids()->contains((Method**)mid)); return (cld->jmethod_ids()->contains((Method**)mid));
...@@ -1881,6 +1884,9 @@ bool Method::is_method_id(jmethodID mid) { ...@@ -1881,6 +1884,9 @@ bool Method::is_method_id(jmethodID mid) {
Method* Method::checked_resolve_jmethod_id(jmethodID mid) { Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
if (mid == NULL) return NULL; if (mid == NULL) return NULL;
if (!Method::is_method_id(mid)) {
return NULL;
}
Method* o = resolve_jmethod_id(mid); Method* o = resolve_jmethod_id(mid);
if (o == NULL || o == JNIMethodBlock::_free_method || !((Metadata*)o)->is_method()) { if (o == NULL || o == JNIMethodBlock::_free_method || !((Metadata*)o)->is_method()) {
return NULL; return NULL;
......
...@@ -608,6 +608,10 @@ uint Compile::scratch_emit_size(const Node* n) { ...@@ -608,6 +608,10 @@ uint Compile::scratch_emit_size(const Node* n) {
n->as_MachBranch()->label_set(&fakeL, 0); n->as_MachBranch()->label_set(&fakeL, 0);
} }
n->emit(buf, this->regalloc()); n->emit(buf, this->regalloc());
// Emitting into the scratch buffer should not fail
assert (!failing(), err_msg_res("Must not have pending failure. Reason is: %s", failure_reason()));
if (is_branch) // Restore label. if (is_branch) // Restore label.
n->as_MachBranch()->label_set(saveL, save_bnum); n->as_MachBranch()->label_set(saveL, save_bnum);
......
...@@ -446,7 +446,9 @@ uint CastIINode::size_of() const { ...@@ -446,7 +446,9 @@ uint CastIINode::size_of() const {
} }
uint CastIINode::cmp(const Node &n) const { uint CastIINode::cmp(const Node &n) const {
return TypeNode::cmp(n) && ((CastIINode&)n)._carry_dependency == _carry_dependency; return TypeNode::cmp(n) &&
((CastIINode&)n)._carry_dependency == _carry_dependency &&
((CastIINode&)n)._range_check_dependency == _range_check_dependency;
} }
Node *CastIINode::Identity(PhaseTransform *phase) { Node *CastIINode::Identity(PhaseTransform *phase) {
...@@ -523,7 +525,7 @@ const Type *CastIINode::Value(PhaseTransform *phase) const { ...@@ -523,7 +525,7 @@ const Type *CastIINode::Value(PhaseTransform *phase) const {
} }
Node *CastIINode::Ideal_DU_postCCP(PhaseCCP *ccp) { Node *CastIINode::Ideal_DU_postCCP(PhaseCCP *ccp) {
if (_carry_dependency) { if (_carry_dependency || _range_check_dependency) {
return NULL; return NULL;
} }
return ConstraintCastNode::Ideal_DU_postCCP(ccp); return ConstraintCastNode::Ideal_DU_postCCP(ccp);
......
...@@ -1502,6 +1502,13 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { ...@@ -1502,6 +1502,13 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
n->emit(*cb, _regalloc); n->emit(*cb, _regalloc);
current_offset = cb->insts_size(); current_offset = cb->insts_size();
// Above we only verified that there is enough space in the instruction section.
// However, the instruction may emit stubs that cause code buffer expansion.
// Bail out here if expansion failed due to a lack of code cache space.
if (failing()) {
return;
}
#ifdef ASSERT #ifdef ASSERT
if (n->size(_regalloc) < (current_offset-instr_offset)) { if (n->size(_regalloc) < (current_offset-instr_offset)) {
n->dump(); n->dump();
...@@ -1630,11 +1637,14 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { ...@@ -1630,11 +1637,14 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (_method) { if (_method) {
// Emit the exception handler code. // Emit the exception handler code.
_code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb)); _code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
if (failing()) {
return; // CodeBuffer::expand failed
}
// Emit the deopt handler code. // Emit the deopt handler code.
_code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb)); _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
// Emit the MethodHandle deopt handler code (if required). // Emit the MethodHandle deopt handler code (if required).
if (has_method_handle_invokes()) { if (has_method_handle_invokes() && !failing()) {
// We can use the same code as for the normal deopt handler, we // We can use the same code as for the normal deopt handler, we
// just need a different entry point address. // just need a different entry point address.
_code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb)); _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
......
...@@ -1641,16 +1641,11 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { ...@@ -1641,16 +1641,11 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
} }
kit.store_String_value(kit.control(), result, char_array); kit.store_String_value(kit.control(), result, char_array);
// Do not let stores that initialize this object be reordered with // The value field is final. Emit a barrier here to ensure that the effect
// a subsequent store that would make this object accessible by // of the initialization is committed to memory before any code publishes
// other threads. // a reference to the newly constructed object (see Parse::do_exits()).
// Record what AllocateNode this StoreStore protects so that assert(AllocateNode::Ideal_allocation(result, _gvn) != NULL, "should be newly allocated");
// escape analysis can go from the MemBarStoreStoreNode to the kit.insert_mem_bar(Op_MemBarRelease, result);
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
// based on the escape status of the AllocateNode.
AllocateNode* alloc = AllocateNode::Ideal_allocation(result, _gvn);
assert(alloc != NULL, "should be newly allocated");
kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
} else { } else {
result = C->top(); result = C->top();
} }
......
...@@ -547,8 +547,12 @@ const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const { ...@@ -547,8 +547,12 @@ const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const {
// All unsigned values are LE -1 and GE 0. // All unsigned values are LE -1 and GE 0.
if (lo0 == 0 && hi0 == 0) { if (lo0 == 0 && hi0 == 0) {
return TypeInt::CC_LE; // 0 <= bot return TypeInt::CC_LE; // 0 <= bot
} else if ((jint)lo0 == -1 && (jint)hi0 == -1) {
return TypeInt::CC_GE; // -1 >= bot
} else if (lo1 == 0 && hi1 == 0) { } else if (lo1 == 0 && hi1 == 0) {
return TypeInt::CC_GE; // bot >= 0 return TypeInt::CC_GE; // bot >= 0
} else if ((jint)lo1 == -1 && (jint)hi1 == -1) {
return TypeInt::CC_LE; // bot <= -1
} }
} else { } else {
// We can use ranges of the form [lo..hi] if signs are the same. // We can use ranges of the form [lo..hi] if signs are the same.
......
...@@ -461,16 +461,11 @@ oop jniCheck::validate_handle(JavaThread* thr, jobject obj) { ...@@ -461,16 +461,11 @@ oop jniCheck::validate_handle(JavaThread* thr, jobject obj) {
Method* jniCheck::validate_jmethod_id(JavaThread* thr, jmethodID method_id) { Method* jniCheck::validate_jmethod_id(JavaThread* thr, jmethodID method_id) {
ASSERT_OOPS_ALLOWED; ASSERT_OOPS_ALLOWED;
// do the fast jmethodID check first // Do the jmethodID check
Method* moop = Method::checked_resolve_jmethod_id(method_id); Method* moop = Method::checked_resolve_jmethod_id(method_id);
if (moop == NULL) { if (moop == NULL) {
ReportJNIFatalError(thr, fatal_wrong_class_or_method); ReportJNIFatalError(thr, fatal_wrong_class_or_method);
} }
// jmethodIDs are supposed to be weak handles in the class loader data,
// but that can be expensive so check it last
else if (!Method::is_method_id(method_id)) {
ReportJNIFatalError(thr, fatal_non_weak_method);
}
return moop; return moop;
} }
......
...@@ -1290,18 +1290,22 @@ static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) { ...@@ -1290,18 +1290,22 @@ static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) {
// and null permissions - which gives no permissions. // and null permissions - which gives no permissions.
oop create_dummy_access_control_context(TRAPS) { oop create_dummy_access_control_context(TRAPS) {
InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass()); InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass());
// new ProtectionDomain(null,null); Handle obj = pd_klass->allocate_instance_handle(CHECK_NULL);
oop null_protection_domain = pd_klass->allocate_instance(CHECK_NULL); // Call constructor ProtectionDomain(null, null);
Handle null_pd(THREAD, null_protection_domain); JavaValue result(T_VOID);
JavaCalls::call_special(&result, obj, KlassHandle(THREAD, pd_klass),
vmSymbols::object_initializer_name(),
vmSymbols::codesource_permissioncollection_signature(),
Handle(), Handle(), CHECK_NULL);
// new ProtectionDomain[] {pd}; // new ProtectionDomain[] {pd};
objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL); objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL);
context->obj_at_put(0, null_pd()); context->obj_at_put(0, obj());
// new AccessControlContext(new ProtectionDomain[] {pd}) // new AccessControlContext(new ProtectionDomain[] {pd})
objArrayHandle h_context(THREAD, context); objArrayHandle h_context(THREAD, context);
oop result = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL); oop acc = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL);
return result; return acc;
} }
JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException)) JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException))
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
/** /**
* @test * @test
* @bug 8058828 * @bug 8058828
* @run main/bootclasspath -Xbatch VMAnonymousClasses * @run main/bootclasspath/othervm -Xbatch VMAnonymousClasses
*/ */
import jdk.internal.org.objectweb.asm.ClassWriter; import jdk.internal.org.objectweb.asm.ClassWriter;
......
...@@ -30,20 +30,19 @@ ...@@ -30,20 +30,19 @@
## @run shell/timeout=30 TestDirtyInt.sh ## @run shell/timeout=30 TestDirtyInt.sh
## ##
if [ "${TESTSRC}" = "" ] if [ -z "${TESTSRC}" ]; then
then TESTSRC="${PWD}"
TESTSRC=${PWD} echo "TESTSRC not set. Using "${TESTSRC}" as default"
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi fi
echo "TESTSRC=${TESTSRC}" echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests. ## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../../test_env.sh . ${TESTSRC}/../../test_env.sh
# set platform-dependent variables # set platform-dependent variables
if [ $VM_OS == "linux" -a $VM_CPU == "sparcv9" ]; then if [ "$VM_OS" = "linux" -a "$VM_CPU" = "sparcv9" ]; then
echo "Testing on linux-sparc" echo "Testing on linux-sparc"
gcc_cmd=`which gcc` gcc_cmd=`which gcc`
if [ "x$gcc_cmd" == "x" ]; then if [ -z "$gcc_cmd" ]; then
echo "WARNING: gcc not found. Cannot execute test." 2>&1 echo "WARNING: gcc not found. Cannot execute test." 2>&1
exit 0; exit 0;
fi fi
...@@ -68,10 +67,9 @@ cmd="${TESTJAVA}${FS}bin${FS}java \ ...@@ -68,10 +67,9 @@ cmd="${TESTJAVA}${FS}bin${FS}java \
-Djava.library.path=${TESTSRC}${FS} TestDirtyInt" -Djava.library.path=${TESTSRC}${FS} TestDirtyInt"
echo "$cmd" echo "$cmd"
eval $cmd eval $cmd
if [ $? = 0 ] if [ $? = 0 ]; then
then
echo "Test Passed" echo "Test Passed"
exit 0 exit 0
fi fi
......
...@@ -27,6 +27,7 @@ import java.util.Arrays; ...@@ -27,6 +27,7 @@ import java.util.Arrays;
/* /*
* @test * @test
* @bug 8159244 * @bug 8159244
* @requires vm.gc == "Parallel" | vm.gc == "null"
* @summary Verifies that no partially initialized String object escapes from * @summary Verifies that no partially initialized String object escapes from
* C2's String concat optimization in a highly concurrent setting. * C2's String concat optimization in a highly concurrent setting.
* This test triggers the bug in about 1 out of 10 runs. * This test triggers the bug in about 1 out of 10 runs.
......
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @bug 8042660
* @summary Constant pool NameAndType entries must point to non-zero length Utf8 strings
* @compile emptySigUtf8.jcod
* @compile emptyNameUtf8.jcod
* @run main/othervm -Xverify:all BadNameAndType
*/
// Test that a constant pool NameAndType descriptor_index and/or name_index
// that points to a zero length Utf8 string causes a ClassFormatError.
public class BadNameAndType {
public static void main(String args[]) throws Throwable {
System.out.println("Regression test for bug 8042660");
// Test descriptor_index pointing to zero-length string.
try {
Class newClass = Class.forName("emptySigUtf8");
throw new RuntimeException("Expected ClassFormatError exception not thrown");
} catch (java.lang.ClassFormatError e) {
System.out.println("Test BadNameAndType passed test case emptySigUtf8");
}
// Test name_index pointing to zero-length string.
try {
Class newClass = Class.forName("emptyNameUtf8");
throw new RuntimeException("Expected ClassFormatError exception not thrown");
} catch (java.lang.ClassFormatError e) {
System.out.println("Test BadNameAndType passed test case emptyNameUtf8");
}
}
}
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// This class has an illegal NameAndType at constant pool #4. It's illegal because
// the Utf8 that it points to at #27 is a zero length string which is not a valid
// name. Loading this class should cause a ClassFormatError exception.
class emptyNameUtf8 {
0xCAFEBABE;
0; // minor version
52; // version
[29] { // Constant Pool
; // first element is empty
Method #6 #15; // #1 at 0x0A
Field #16 #17; // #2 at 0x0F
String #18; // #3 at 0x14
NameAndType #27 #28; // #4 at 0x9F
class #21; // #5 at 0x1C
class #22; // #6 at 0x1F
Utf8 "<init>"; // #7 at 0x22
Utf8 "()V"; // #8 at 0x2B
Utf8 "Code"; // #9 at 0x2E
Utf8 "LineNumberTable"; // #10 at 0x35
Utf8 "main"; // #11 at 0x47
Utf8 "([Ljava/lang/String;)V"; // #12 at 0x4E
Utf8 "SourceFile"; // #13 at 0x67
Utf8 "emptyNameUtf8.java"; // #14 at 0x74
NameAndType #7 #8; // #15 at 0x81
class #23; // #16 at 0x86
NameAndType #24 #25; // #17 at 0x89
Utf8 "Hello World"; // #18 at 0x8E
class #26; // #19 at 0x9C
Method #19 #4; // #20 at 0x17
Utf8 "emptyNameUtf8"; // #21 at 0xA4
Utf8 "java/lang/Object"; // #22 at 0xAC
Utf8 "java/lang/System"; // #23 at 0xBF
Utf8 "out"; // #24 at 0xD2
Utf8 "Ljava/io/PrintStream;"; // #25 at 0xD8
Utf8 "java/io/PrintStream"; // #26 at 0xF0
Utf8 ""; // #27 at 0x0106
Utf8 "()V"; // #28 at 0x0110
} // Constant Pool
0x0021; // access
#5;// this_cpx
#6;// super_cpx
[0] { // Interfaces
} // Interfaces
[0] { // fields
} // fields
[2] { // methods
{ // Member at 0x0134
0x0001; // access
#7; // name_cpx
#8; // sig_cpx
[1] { // Attributes
Attr(#9, 29) { // Code at 0x013C
1; // max_stack
1; // max_locals
Bytes[5]{
0x2AB70001B1;
};
[0] { // Traps
} // end Traps
[1] { // Attributes
Attr(#10, 6) { // LineNumberTable at 0x0153
[1] { // LineNumberTable
0 2; // at 0x015F
}
} // end LineNumberTable
} // Attributes
} // end Code
} // Attributes
} // Member
;
{ // Member at 0x015F
0x0009; // access
#11; // name_cpx
#12; // sig_cpx
[1] { // Attributes
Attr(#9, 37) { // Code at 0x0167
2; // max_stack
1; // max_locals
Bytes[9]{
0xB200021203B60004;
0xB1;
};
[0] { // Traps
} // end Traps
[1] { // Attributes
Attr(#10, 10) { // LineNumberTable at 0x0182
[2] { // LineNumberTable
0 4; // at 0x018E
8 5; // at 0x0192
}
} // end LineNumberTable
} // Attributes
} // end Code
} // Attributes
} // Member
} // methods
[1] { // Attributes
Attr(#13, 2) { // SourceFile at 0x0194
#14;
} // end SourceFile
} // Attributes
} // end class emptyNameUtf8
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// This class has an illegal NameAndType at constant pool #4. It's illegal because
// the type that it points to at #28 is a zero length Utf8 string which is not a
// valid signature. Loading this class should cause a ClassFormatError exception.
class emptySigUtf8 {
0xCAFEBABE;
0; // minor version
52; // version
[29] { // Constant Pool
; // first element is empty
Method #6 #15; // #1 at 0x0A
Field #16 #17; // #2 at 0x0F
String #18; // #3 at 0x14
NameAndType #27 #28; // #4 at 0x9F
class #21; // #5 at 0x1C
class #22; // #6 at 0x1F
Utf8 "<init>"; // #7 at 0x22
Utf8 "()V"; // #8 at 0x2B
Utf8 "Code"; // #9 at 0x2E
Utf8 "LineNumberTable"; // #10 at 0x35
Utf8 "main"; // #11 at 0x47
Utf8 "([Ljava/lang/String;)V"; // #12 at 0x4E
Utf8 "SourceFile"; // #13 at 0x67
Utf8 "emptySigUtf8.java"; // #14 at 0x74
NameAndType #7 #8; // #15 at 0x81
class #23; // #16 at 0x86
NameAndType #24 #25; // #17 at 0x89
Utf8 "Hello World"; // #18 at 0x8E
class #26; // #19 at 0x9C
Method #19 #4; // #20 at 0x17
Utf8 "emptySigUtf8"; // #21 at 0xA4
Utf8 "java/lang/Object"; // #22 at 0xAC
Utf8 "java/lang/System"; // #23 at 0xBF
Utf8 "out"; // #24 at 0xD2
Utf8 "Ljava/io/PrintStream;"; // #25 at 0xD8
Utf8 "java/io/PrintStream"; // #26 at 0xF0
Utf8 "hi"; // #27 at 0x0106
Utf8 ""; // #28 at 0x0110
} // Constant Pool
0x0021; // access
#5;// this_cpx
#6;// super_cpx
[0] { // Interfaces
} // Interfaces
[0] { // fields
} // fields
[2] { // methods
{ // Member at 0x0134
0x0001; // access
#7; // name_cpx
#8; // sig_cpx
[1] { // Attributes
Attr(#9, 29) { // Code at 0x013C
1; // max_stack
1; // max_locals
Bytes[5]{
0x2AB70001B1;
};
[0] { // Traps
} // end Traps
[1] { // Attributes
Attr(#10, 6) { // LineNumberTable at 0x0153
[1] { // LineNumberTable
0 2; // at 0x015F
}
} // end LineNumberTable
} // Attributes
} // end Code
} // Attributes
} // Member
;
{ // Member at 0x015F
0x0009; // access
#11; // name_cpx
#12; // sig_cpx
[1] { // Attributes
Attr(#9, 37) { // Code at 0x0167
2; // max_stack
1; // max_locals
Bytes[9]{
0xB200021203B60004;
0xB1;
};
[0] { // Traps
} // end Traps
[1] { // Attributes
Attr(#10, 10) { // LineNumberTable at 0x0182
[2] { // LineNumberTable
0 4; // at 0x018E
8 5; // at 0x0192
}
} // end LineNumberTable
} // Attributes
} // end Code
} // Attributes
} // Member
} // methods
[1] { // Attributes
Attr(#13, 2) { // SourceFile at 0x0194
#14;
} // end SourceFile
} // Attributes
} // end class emptySigUtf8
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
package com.oracle.java.testlibrary; package com.oracle.java.testlibrary;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
...@@ -69,6 +71,58 @@ public final class OutputAnalyzer { ...@@ -69,6 +71,58 @@ public final class OutputAnalyzer {
} }
/** /**
* Verify that the stdout contents of output buffer is empty
*
* @throws RuntimeException
* If stdout was not empty
*/
public void stdoutShouldBeEmpty() {
if (!getStdout().isEmpty()) {
reportDiagnosticSummary();
throw new RuntimeException("stdout was not empty");
}
}
/**
* Verify that the stderr contents of output buffer is empty
*
* @throws RuntimeException
* If stderr was not empty
*/
public void stderrShouldBeEmpty() {
if (!getStderr().isEmpty()) {
reportDiagnosticSummary();
throw new RuntimeException("stderr was not empty");
}
}
/**
* Verify that the stdout contents of output buffer is not empty
*
* @throws RuntimeException
* If stdout was empty
*/
public void stdoutShouldNotBeEmpty() {
if (getStdout().isEmpty()) {
reportDiagnosticSummary();
throw new RuntimeException("stdout was empty");
}
}
/**
* Verify that the stderr contents of output buffer is not empty
*
* @throws RuntimeException
* If stderr was empty
*/
public void stderrShouldNotBeEmpty() {
if (getStderr().isEmpty()) {
reportDiagnosticSummary();
throw new RuntimeException("stderr was empty");
}
}
/**
* Verify that the stdout and stderr contents of output buffer contains the string * Verify that the stdout and stderr contents of output buffer contains the string
* *
* @param expectedString String that buffer should contain * @param expectedString String that buffer should contain
...@@ -365,4 +419,18 @@ public final class OutputAnalyzer { ...@@ -365,4 +419,18 @@ public final class OutputAnalyzer {
public int getExitValue() { public int getExitValue() {
return exitValue; return exitValue;
} }
/**
* Get the contents of the output buffer (stdout and stderr) as list of strings.
* Output will be split by newlines.
*
* @return Contents of the output buffer as list of strings
*/
public List<String> asLines() {
return asLines(getOutput());
}
private List<String> asLines(String buffer) {
return Arrays.asList(buffer.split("(\\r\\n|\\n|\\r)"));
}
} }
...@@ -187,23 +187,36 @@ public final class ProcessTools { ...@@ -187,23 +187,36 @@ public final class ProcessTools {
return executeProcess(pb); return executeProcess(pb);
} }
/** /**
* Executes a process, waits for it to finish and returns the process output. * Executes a process, waits for it to finish and returns the process output.
* @param pb The ProcessBuilder to execute. * The process will have exited before this method returns.
* @return The output from the process. * @param pb The ProcessBuilder to execute.
*/ * @return The {@linkplain OutputAnalyzer} instance wrapping the process.
public static OutputAnalyzer executeProcess(ProcessBuilder pb) throws Throwable { */
OutputAnalyzer output = null; public static OutputAnalyzer executeProcess(ProcessBuilder pb) throws Exception {
try { OutputAnalyzer output = null;
output = new OutputAnalyzer(pb.start()); Process p = null;
return output; boolean failed = false;
} catch (Throwable t) { try {
System.out.println("executeProcess() failed: " + t); p = pb.start();
throw t; output = new OutputAnalyzer(p);
} finally { p.waitFor();
System.out.println(getProcessLog(pb, output));
return output;
} catch (Throwable t) {
if (p != null) {
p.destroyForcibly().waitFor();
}
failed = true;
System.out.println("executeProcess() failed: " + t);
throw t;
} finally {
if (failed) {
System.err.println(getProcessLog(pb, output));
}
}
} }
}
/** /**
* Executes a process, waits for it to finish and returns the process output. * Executes a process, waits for it to finish and returns the process output.
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
import com.oracle.java.testlibrary.Platform; import com.oracle.java.testlibrary.Platform;
public class UintxTest { public class UintxTest {
private static final String FLAG_NAME = "TypeProfileLevel"; private static final String FLAG_NAME = "VerifyGCStartAt";
private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE, private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
(1L << 32L) - 1L, 1L << 32L}; (1L << 32L) - 1L, 1L << 32L};
private static final Long[] EXPECTED_64 = TESTS; private static final Long[] EXPECTED_64 = TESTS;
......
...@@ -55,9 +55,7 @@ public final class VmFlagTest<T> { ...@@ -55,9 +55,7 @@ public final class VmFlagTest<T> {
} }
private T getValue() { private T getValue() {
T t = get.apply(flagName); return get.apply(flagName);
System.out.println("T = " + t);
return t;
} }
protected static <T> void runTest(String existentFlag, T[] tests, protected static <T> void runTest(String existentFlag, T[] tests,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册