diff --git a/.hgtags b/.hgtags index cd22a2778113d81c82e7f8aac111be3a3ac5a798..eb90bd9ba243aacb4b7bd047baabfd0cdb53f0da 100644 --- a/.hgtags +++ b/.hgtags @@ -453,3 +453,5 @@ c3d92e04873788275eeebec6bcd2948cdbd143a7 jdk8u20-b06 f0ea4d3df1299b6c958e1a72f892c695fca055ad jdk8u20-b07 2627c7be4279478b880d7f643a252d185e4915ec hs25.20-b08 e9ffa408f7af28205a7114ca78bce29846f5a8df jdk8u20-b08 +5186bc5047c1725888ed99f423bdfaa116e05abe hs25.20-b09 +4d73f1e99f97d1444e16ee5ef4634eb2129969ad jdk8u20-b09 diff --git a/make/hotspot_version b/make/hotspot_version index 265f3d19713c108dddd5e80317a6c3de800d7704..53dcdecd2d47d059211a4ec7e2912cc697228084 100644 --- a/make/hotspot_version +++ b/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014 HS_MAJOR_VER=25 HS_MINOR_VER=20 -HS_BUILD_NUMBER=08 +HS_BUILD_NUMBER=09 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff --git a/make/linux/Makefile b/make/linux/Makefile index 8321f8e15db1f6737fa0424e8b1833b8d8684c10..df450934810f2c227fb83c8c52d87c6066ddc7f1 100644 --- a/make/linux/Makefile +++ b/make/linux/Makefile @@ -66,8 +66,8 @@ ifndef CC_INTERP FORCE_TIERED=1 endif endif -# C1 is not ported on ppc64(le), so we cannot build a tiered VM: -ifneq (,$(filter $(ARCH),ppc64 pp64le)) +# C1 is not ported on ppc64, so we cannot build a tiered VM: +ifeq ($(ARCH),ppc64) FORCE_TIERED=0 endif diff --git a/make/linux/makefiles/defs.make b/make/linux/makefiles/defs.make index 377d4f9c59fbeb184b6181fdaeff792f6e0dad83..8922fdd8728e5139c4b9ccc7c87fa13190cf33ba 100644 --- a/make/linux/makefiles/defs.make +++ b/make/linux/makefiles/defs.make @@ -33,6 +33,11 @@ SLASH_JAVA ?= /java # ARCH can be set explicitly in spec.gmk ifndef ARCH ARCH := $(shell uname -m) + # Fold little endian PowerPC64 into big-endian (if ARCH is set in + # hotspot-spec.gmk, this will be done by the configure script). + ifeq ($(ARCH),ppc64le) + ARCH := ppc64 + endif endif PATH_SEP ?= : diff --git a/make/linux/makefiles/ppc64.make b/make/linux/makefiles/ppc64.make index b3e6f27b65e243dab4cfda2e39015e19d98b38d5..f3392de49a63fedbf28f567725798fa810b88e4b 100644 --- a/make/linux/makefiles/ppc64.make +++ b/make/linux/makefiles/ppc64.make @@ -26,14 +26,26 @@ # make c code know it is on a 64 bit platform. CFLAGS += -D_LP64=1 -# fixes `relocation truncated to fit' error for gcc 4.1. -CFLAGS += -mminimal-toc +ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined) + # This can happen during hotspot standalone build. Set endianness from + # uname. We assume build and target machines are the same. + OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big) +endif -# finds use ppc64 instructions, but schedule for power5 -CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),) + $(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little') +endif -# let linker find external 64 bit libs. -LFLAGS_VM += -L/lib64 +ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big) + # fixes `relocation truncated to fit' error for gcc 4.1. + CFLAGS += -mminimal-toc -# specify lib format. -LFLAGS_VM += -Wl,-melf64ppc + # finds use ppc64 instructions, but schedule for power5 + CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +else + # Little endian machine uses ELFv2 ABI. + CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2 + + # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI. + CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string +endif diff --git a/make/windows/makefiles/defs.make b/make/windows/makefiles/defs.make index acf4d24d7e0cff83ce383ed8d162c390b1cc2abf..fe4b6c72fbf503e4717ee678b34f4310a122b98d 100644 --- a/make/windows/makefiles/defs.make +++ b/make/windows/makefiles/defs.make @@ -260,7 +260,6 @@ ifeq ($(JVM_VARIANT_SERVER),true) EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map endif endif - EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib endif ifeq ($(JVM_VARIANT_CLIENT),true) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt @@ -275,6 +274,8 @@ ifeq ($(JVM_VARIANT_CLIENT),true) endif endif +EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib + ifeq ($(BUILD_WIN_SA), 1) EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX) ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) diff --git a/src/cpu/ppc/vm/assembler_ppc.hpp b/src/cpu/ppc/vm/assembler_ppc.hpp index 56b0b92db736106fd14cdf1d5046a92f6c9ce95d..5a1c0f1d9c8fbc7d72543eeb039049478d85a9f5 100644 --- a/src/cpu/ppc/vm/assembler_ppc.hpp +++ b/src/cpu/ppc/vm/assembler_ppc.hpp @@ -1025,15 +1025,14 @@ class Assembler : public AbstractAssembler { } static void set_imm(int* instr, short s) { - short* p = ((short *)instr) + 1; - *p = s; + // imm is always in the lower 16 bits of the instruction, + // so this is endian-neutral. Same for the get_imm below. + uint32_t w = *(uint32_t *)instr; + *instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF)); } static int get_imm(address a, int instruction_number) { - short imm; - short *p =((short *)a)+2*instruction_number+1; - imm = *p; - return (int)imm; + return (short)((int *)a)[instruction_number]; } static inline int hi16_signed( int x) { return (int)(int16_t)(x >> 16); } diff --git a/src/cpu/ppc/vm/bytes_ppc.hpp b/src/cpu/ppc/vm/bytes_ppc.hpp index 872aa6b26240d449fa50a7d16fc16ec0aa813440..2a8fc59c8921aaaa4bbfed3a29a4a39faea56419 100644 --- a/src/cpu/ppc/vm/bytes_ppc.hpp +++ b/src/cpu/ppc/vm/bytes_ppc.hpp @@ -35,6 +35,126 @@ class Bytes: AllStatic { // Can I count on address always being a pointer to an unsigned char? Yes. +#if defined(VM_LITTLE_ENDIAN) + + // Returns true, if the byte ordering used by Java is different from the native byte ordering + // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc. + static inline bool is_Java_byte_ordering_different() { return true; } + + // Forward declarations of the compiler-dependent implementation + static inline u2 swap_u2(u2 x); + static inline u4 swap_u4(u4 x); + static inline u8 swap_u8(u8 x); + + static inline u2 get_native_u2(address p) { + return (intptr_t(p) & 1) == 0 + ? *(u2*)p + : ( u2(p[1]) << 8 ) + | ( u2(p[0]) ); + } + + static inline u4 get_native_u4(address p) { + switch (intptr_t(p) & 3) { + case 0: return *(u4*)p; + + case 2: return ( u4( ((u2*)p)[1] ) << 16 ) + | ( u4( ((u2*)p)[0] ) ); + + default: return ( u4(p[3]) << 24 ) + | ( u4(p[2]) << 16 ) + | ( u4(p[1]) << 8 ) + | u4(p[0]); + } + } + + static inline u8 get_native_u8(address p) { + switch (intptr_t(p) & 7) { + case 0: return *(u8*)p; + + case 4: return ( u8( ((u4*)p)[1] ) << 32 ) + | ( u8( ((u4*)p)[0] ) ); + + case 2: return ( u8( ((u2*)p)[3] ) << 48 ) + | ( u8( ((u2*)p)[2] ) << 32 ) + | ( u8( ((u2*)p)[1] ) << 16 ) + | ( u8( ((u2*)p)[0] ) ); + + default: return ( u8(p[7]) << 56 ) + | ( u8(p[6]) << 48 ) + | ( u8(p[5]) << 40 ) + | ( u8(p[4]) << 32 ) + | ( u8(p[3]) << 24 ) + | ( u8(p[2]) << 16 ) + | ( u8(p[1]) << 8 ) + | u8(p[0]); + } + } + + + + static inline void put_native_u2(address p, u2 x) { + if ( (intptr_t(p) & 1) == 0 ) *(u2*)p = x; + else { + p[1] = x >> 8; + p[0] = x; + } + } + + static inline void put_native_u4(address p, u4 x) { + switch ( intptr_t(p) & 3 ) { + case 0: *(u4*)p = x; + break; + + case 2: ((u2*)p)[1] = x >> 16; + ((u2*)p)[0] = x; + break; + + default: ((u1*)p)[3] = x >> 24; + ((u1*)p)[2] = x >> 16; + ((u1*)p)[1] = x >> 8; + ((u1*)p)[0] = x; + break; + } + } + + static inline void put_native_u8(address p, u8 x) { + switch ( intptr_t(p) & 7 ) { + case 0: *(u8*)p = x; + break; + + case 4: ((u4*)p)[1] = x >> 32; + ((u4*)p)[0] = x; + break; + + case 2: ((u2*)p)[3] = x >> 48; + ((u2*)p)[2] = x >> 32; + ((u2*)p)[1] = x >> 16; + ((u2*)p)[0] = x; + break; + + default: ((u1*)p)[7] = x >> 56; + ((u1*)p)[6] = x >> 48; + ((u1*)p)[5] = x >> 40; + ((u1*)p)[4] = x >> 32; + ((u1*)p)[3] = x >> 24; + ((u1*)p)[2] = x >> 16; + ((u1*)p)[1] = x >> 8; + ((u1*)p)[0] = x; + } + } + + // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering) + // (no byte-order reversal is needed since Power CPUs are big-endian oriented). + static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); } + static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); } + static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); } + + static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); } + static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); } + static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); } + +#else // !defined(VM_LITTLE_ENDIAN) + // Returns true, if the byte ordering used by Java is different from the nativ byte ordering // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc. static inline bool is_Java_byte_ordering_different() { return false; } @@ -150,6 +270,12 @@ class Bytes: AllStatic { static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); } static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); } static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); } + +#endif // VM_LITTLE_ENDIAN }; +#if defined(TARGET_OS_ARCH_linux_ppc) +#include "bytes_linux_ppc.inline.hpp" +#endif + #endif // CPU_PPC_VM_BYTES_PPC_HPP diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp index bf9c934738051937872765fc5ac7e56bfb8f7a8d..ed1a7637a5430e1fd0f470c76bc95e852770cd72 100644 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @@ -801,7 +801,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi if (UseCompressedOops && !wide) { __ movl(as_Address(addr), (int32_t)NULL_WORD); } else { +#ifdef _LP64 + __ xorptr(rscratch1, rscratch1); + null_check_here = code_offset(); + __ movptr(as_Address(addr), rscratch1); +#else __ movptr(as_Address(addr), NULL_WORD); +#endif } } else { if (is_literal_address(addr)) { diff --git a/src/cpu/x86/vm/vm_version_x86.cpp b/src/cpu/x86/vm/vm_version_x86.cpp index 28b79b784f264b8d056b483f98a7b46066079d09..ba5fcb383c62f4de01ec49ecc87a1153d64c8fd7 100644 --- a/src/cpu/x86/vm/vm_version_x86.cpp +++ b/src/cpu/x86/vm/vm_version_x86.cpp @@ -59,9 +59,9 @@ static BufferBlob* stub_blob; static const int stub_size = 600; extern "C" { - typedef void (*getPsrInfo_stub_t)(void*); + typedef void (*get_cpu_info_stub_t)(void*); } -static getPsrInfo_stub_t getPsrInfo_stub = NULL; +static get_cpu_info_stub_t get_cpu_info_stub = NULL; class VM_Version_StubGenerator: public StubCodeGenerator { @@ -69,7 +69,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator { VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} - address generate_getPsrInfo() { + address generate_get_cpu_info() { // Flags to test CPU type. const uint32_t HS_EFL_AC = 0x40000; const uint32_t HS_EFL_ID = 0x200000; @@ -81,13 +81,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator { Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done; - StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); + StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); # define __ _masm-> address start = __ pc(); // - // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); + // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); // // LP64: rcx and rdx are first and second argument registers on windows @@ -385,6 +385,14 @@ class VM_Version_StubGenerator: public StubCodeGenerator { }; +void VM_Version::get_cpu_info_wrapper() { + get_cpu_info_stub(&_cpuid_info); +} + +#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED + #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() +#endif + void VM_Version::get_processor_features() { _cpu = 4; // 486 by default @@ -395,7 +403,11 @@ void VM_Version::get_processor_features() { if (!Use486InstrsOnly) { // Get raw processor info - getPsrInfo_stub(&_cpuid_info); + + // Some platforms (like Win*) need a wrapper around here + // in order to properly handle SEGV for YMM registers test. + CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper); + assert_is_initialized(); _cpu = extended_cpu_family(); _model = extended_cpu_model(); @@ -986,14 +998,14 @@ void VM_Version::initialize() { ResourceMark rm; // Making this stub must be FIRST use of assembler - stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size); + stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); if (stub_blob == NULL) { - vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); + vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); } CodeBuffer c(stub_blob); VM_Version_StubGenerator g(&c); - getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, - g.generate_getPsrInfo()); + get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, + g.generate_get_cpu_info()); get_processor_features(); } diff --git a/src/cpu/x86/vm/vm_version_x86.hpp b/src/cpu/x86/vm/vm_version_x86.hpp index a3be0995ae13d90761de4bff00a70d2bbdc1016c..51f6e4f2f4a30ea0933451c2a840d41191e15571 100644 --- a/src/cpu/x86/vm/vm_version_x86.hpp +++ b/src/cpu/x86/vm/vm_version_x86.hpp @@ -507,6 +507,7 @@ public: // The value used to check ymm register after signal handle static int ymm_test_value() { return 0xCAFEBABE; } + static void get_cpu_info_wrapper(); static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; } static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; } static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; } diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp index 1d7437a730af9cde45d980fcc34c5d5b97dc845c..aa8522389c0e97fb7992f06f325c47afbd9c4505 100644 --- a/src/os/linux/vm/os_linux.cpp +++ b/src/os/linux/vm/os_linux.cpp @@ -1963,7 +1963,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, +#if defined(VM_LITTLE_ENDIAN) + {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"}, +#else {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, +#endif {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"}, {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp index b973c32663c6b02a6ac96659ba749eec132f3e1a..360643c8340f737d16c27623c51b8dd5c7fa85fa 100644 --- a/src/os/windows/vm/os_windows.cpp +++ b/src/os/windows/vm/os_windows.cpp @@ -2716,7 +2716,6 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) { } #endif -#ifndef PRODUCT void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { // Install a win32 structured exception handler around the test // function call so the VM can generate an error dump if needed. @@ -2727,7 +2726,6 @@ void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { // Nothing to do. } } -#endif // Virtual Memory diff --git a/src/os/windows/vm/os_windows.hpp b/src/os/windows/vm/os_windows.hpp index c9c4840bb6cfca416495690496c7be67f6c594ae..f7c3790ff2ceae589234b79814747f1de9f629a3 100644 --- a/src/os/windows/vm/os_windows.hpp +++ b/src/os/windows/vm/os_windows.hpp @@ -97,9 +97,7 @@ class win32 { static address fast_jni_accessor_wrapper(BasicType); #endif -#ifndef PRODUCT static void call_test_func_with_wrapper(void (*funcPtr)(void)); -#endif // filter function to ignore faults on serializations page static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e); diff --git a/src/os/windows/vm/os_windows.inline.hpp b/src/os/windows/vm/os_windows.inline.hpp index 5303743aee4415c3211a79e384ca0d8b7fcc5263..a824b84fae15495b73d67314069b4d3f15ac766b 100644 --- a/src/os/windows/vm/os_windows.inline.hpp +++ b/src/os/windows/vm/os_windows.inline.hpp @@ -107,9 +107,7 @@ inline int os::close(int fd) { return ::close(fd); } -#ifndef PRODUCT - #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \ - os::win32::call_test_func_with_wrapper(f) -#endif +#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \ + os::win32::call_test_func_with_wrapper(f) #endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP diff --git a/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp b/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d1a9e98d677b7ee3b461307e6efc078ff33ff2b1 --- /dev/null +++ b/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright 2014 Google Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP +#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP + +#if defined(VM_LITTLE_ENDIAN) +#include + +// Efficient swapping of data bytes from Java byte +// ordering to native byte ordering and vice versa. +inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); } +inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); } +inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); } +#endif // VM_LITTLE_ENDIAN + +#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP diff --git a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp index 7bdb98f161f4d100e36213af3fc62053bbd5ec17..ddebcf530880ffc28edfceebfed32437a6faee47 100644 --- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp +++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/g1/dirtyCardQueue.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index c13eec97499662475d45d38312e8da54c2f78b47..f73eab90b0c1c9133aeca1f7fd30fd44489257fa 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -3547,6 +3547,29 @@ public: } }; +bool G1CollectedHeap::is_obj_dead_cond(const oop obj, + const HeapRegion* hr, + const VerifyOption vo) const { + switch (vo) { + case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); + case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); + case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); + default: ShouldNotReachHere(); + } + return false; // keep some compilers happy +} + +bool G1CollectedHeap::is_obj_dead_cond(const oop obj, + const VerifyOption vo) const { + switch (vo) { + case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); + case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); + case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); + default: ShouldNotReachHere(); + } + return false; // keep some compilers happy +} + void G1CollectedHeap::print_on(outputStream* st) const { st->print(" %-20s", "garbage-first heap"); st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index d50897d35ab46c725150d05f8d4fb060dde30e51..a18436eec9203a8ae17eab5c266cd1033ec65325 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -706,19 +706,7 @@ public: // This is a fast test on whether a reference points into the // collection set or not. Assume that the reference // points into the heap. - bool in_cset_fast_test(oop obj) { - assert(_in_cset_fast_test != NULL, "sanity"); - assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj)); - // no need to subtract the bottom of the heap from obj, - // _in_cset_fast_test is biased - uintx index = cast_from_oop(obj) >> HeapRegion::LogOfHRGrainBytes; - bool ret = _in_cset_fast_test[index]; - // let's make sure the result is consistent with what the slower - // test returns - assert( ret || !obj_in_cs(obj), "sanity"); - assert(!ret || obj_in_cs(obj), "sanity"); - return ret; - } + inline bool in_cset_fast_test(oop obj); void clear_cset_fast_test() { assert(_in_cset_fast_test_base != NULL, "sanity"); @@ -1264,9 +1252,7 @@ public: } } - void old_set_remove(HeapRegion* hr) { - _old_set.remove(hr); - } + inline void old_set_remove(HeapRegion* hr); size_t non_young_capacity_bytes() { return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); @@ -1357,7 +1343,7 @@ public: void heap_region_iterate(HeapRegionClosure* blk) const; // Return the region with the given index. It assumes the index is valid. - HeapRegion* region_at(uint index) const { return _hrs.at(index); } + inline HeapRegion* region_at(uint index) const; // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some @@ -1486,10 +1472,7 @@ public: return true; } - bool is_in_young(const oop obj) { - HeapRegion* hr = heap_region_containing(obj); - return hr != NULL && hr->is_young(); - } + inline bool is_in_young(const oop obj); #ifdef ASSERT virtual bool is_in_partial_collection(const void* p); @@ -1502,9 +1485,7 @@ public: // pre-value that needs to be remembered; for the remembered-set // update logging post-barrier, we don't maintain remembered set // information for young gen objects. - virtual bool can_elide_initializing_store_barrier(oop new_obj) { - return is_in_young(new_obj); - } + virtual inline bool can_elide_initializing_store_barrier(oop new_obj); // Returns "true" iff the given word_size is "very large". static bool isHumongous(size_t word_size) { @@ -1598,23 +1579,9 @@ public: // Added if it is NULL it isn't dead. - bool is_obj_dead(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; - } - else return is_obj_dead(obj, hr); - } + inline bool is_obj_dead(const oop obj) const; - bool is_obj_ill(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; - } - else return is_obj_ill(obj, hr); - } + inline bool is_obj_ill(const oop obj) const; bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); @@ -1708,26 +1675,10 @@ public: bool is_obj_dead_cond(const oop obj, const HeapRegion* hr, - const VerifyOption vo) const { - switch (vo) { - case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); - case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); - case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); - default: ShouldNotReachHere(); - } - return false; // keep some compilers happy - } + const VerifyOption vo) const; bool is_obj_dead_cond(const oop obj, - const VerifyOption vo) const { - switch (vo) { - case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); - case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); - case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); - default: ShouldNotReachHere(); - } - return false; // keep some compilers happy - } + const VerifyOption vo) const; // Printing @@ -1821,11 +1772,7 @@ protected: DirtyCardQueue& dirty_card_queue() { return _dcq; } G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } - template void immediate_rs_update(HeapRegion* from, T* p, int tid) { - if (!from->is_survivor()) { - _g1_rem->par_write_ref(from, p, tid); - } - } + template inline void immediate_rs_update(HeapRegion* from, T* p, int tid); template void deferred_rs_update(HeapRegion* from, T* p, int tid) { // If the new value of the field points to the same region or @@ -1867,13 +1814,7 @@ public: refs()->push(ref); } - template void update_rs(HeapRegion* from, T* p, int tid) { - if (G1DeferredRSUpdate) { - deferred_rs_update(from, p, tid); - } else { - immediate_rs_update(from, p, tid); - } - } + template inline void update_rs(HeapRegion* from, T* p, int tid); HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { HeapWord* obj = NULL; @@ -1997,54 +1938,7 @@ private: return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); } - void do_oop_partial_array(oop* p) { - assert(has_partial_array_mask(p), "invariant"); - oop from_obj = clear_partial_array_mask(p); - - assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); - objArrayOop from_obj_array = objArrayOop(from_obj); - // The from-space object contains the real length. - int length = from_obj_array->length(); - - assert(from_obj->is_forwarded(), "must be forwarded"); - oop to_obj = from_obj->forwardee(); - assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); - objArrayOop to_obj_array = objArrayOop(to_obj); - // We keep track of the next start index in the length field of the - // to-space object. - int next_index = to_obj_array->length(); - assert(0 <= next_index && next_index < length, - err_msg("invariant, next index: %d, length: %d", next_index, length)); - - int start = next_index; - int end = length; - int remainder = end - start; - // We'll try not to push a range that's smaller than ParGCArrayScanChunk. - if (remainder > 2 * ParGCArrayScanChunk) { - end = start + ParGCArrayScanChunk; - to_obj_array->set_length(end); - // Push the remainder before we process the range in case another - // worker has run out of things to do and can steal it. - oop* from_obj_p = set_partial_array_mask(from_obj); - push_on_queue(from_obj_p); - } else { - assert(length == end, "sanity"); - // We'll process the final range for this object. Restore the length - // so that the heap remains parsable in case of evacuation failure. - to_obj_array->set_length(end); - } - _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); - // Process indexes [start,end). It will also process the header - // along with the first chunk (i.e., the chunk with start == 0). - // Note that at this point the length field of to_obj_array is not - // correct given that we are using it to keep track of the next - // start index. oop_iterate_range() (thankfully!) ignores the length - // field and only relies on the start / end parameters. It does - // however return the size of the object which will be incorrect. So - // we have to ignore it even if we wanted to use it. - to_obj_array->oop_iterate_range(&_scanner, start, end); - } + inline void do_oop_partial_array(oop* p); // This method is applied to the fields of the objects that have just been copied. template void do_oop_evac(T* p, HeapRegion* from) { @@ -2074,26 +1968,9 @@ public: oop copy_to_survivor_space(oop const obj); - template void deal_with_reference(T* ref_to_scan) { - if (!has_partial_array_mask(ref_to_scan)) { - // Note: we can use "raw" versions of "region_containing" because - // "obj_to_scan" is definitely in the heap, and is not in a - // humongous region. - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); - do_oop_evac(ref_to_scan, r); - } else { - do_oop_partial_array((oop*)ref_to_scan); - } - } + template inline void deal_with_reference(T* ref_to_scan); - void deal_with_reference(StarTask ref) { - assert(verify_task(ref), "sanity"); - if (ref.is_narrow()) { - deal_with_reference((narrowOop*)ref); - } else { - deal_with_reference((oop*)ref); - } - } + inline void deal_with_reference(StarTask ref); public: void trim_queue(); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp index 91289d649c31e2ac6dd2ec98092bfe163bd8caee..e3b8fd061a3b9ae5d6c8743a87c30dacd19a0505 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" @@ -36,6 +37,9 @@ // Inline functions for G1CollectedHeap +// Return the region with the given index. It assumes the index is valid. +inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } + template inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { @@ -55,6 +59,10 @@ G1CollectedHeap::heap_region_containing_raw(const T addr) const { return res; } +inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { + _old_set.remove(hr); +} + inline bool G1CollectedHeap::obj_in_cs(oop obj) { HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); return r != NULL && r->in_collection_set(); @@ -151,6 +159,24 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const { return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); } + +// This is a fast test on whether a reference points into the +// collection set or not. Assume that the reference +// points into the heap. +inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { + assert(_in_cset_fast_test != NULL, "sanity"); + assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj)); + // no need to subtract the bottom of the heap from obj, + // _in_cset_fast_test is biased + uintx index = cast_from_oop(obj) >> HeapRegion::LogOfHRGrainBytes; + bool ret = _in_cset_fast_test[index]; + // let's make sure the result is consistent with what the slower + // test returns + assert( ret || !obj_in_cs(obj), "sanity"); + assert(!ret || obj_in_cs(obj), "sanity"); + return ret; +} + #ifndef PRODUCT // Support for G1EvacuationFailureALot @@ -224,4 +250,121 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() { } #endif // #ifndef PRODUCT +inline bool G1CollectedHeap::is_in_young(const oop obj) { + HeapRegion* hr = heap_region_containing(obj); + return hr != NULL && hr->is_young(); +} + +// We don't need barriers for initializing stores to objects +// in the young gen: for the SATB pre-barrier, there is no +// pre-value that needs to be remembered; for the remembered-set +// update logging post-barrier, we don't maintain remembered set +// information for young gen objects. +inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { + return is_in_young(new_obj); +} + +inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { + const HeapRegion* hr = heap_region_containing(obj); + if (hr == NULL) { + if (obj == NULL) return false; + else return true; + } + else return is_obj_dead(obj, hr); +} + +inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { + const HeapRegion* hr = heap_region_containing(obj); + if (hr == NULL) { + if (obj == NULL) return false; + else return true; + } + else return is_obj_ill(obj, hr); +} + +template inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { + if (!from->is_survivor()) { + _g1_rem->par_write_ref(from, p, tid); + } +} + +template void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { + if (G1DeferredRSUpdate) { + deferred_rs_update(from, p, tid); + } else { + immediate_rs_update(from, p, tid); + } +} + + +inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { + assert(has_partial_array_mask(p), "invariant"); + oop from_obj = clear_partial_array_mask(p); + + assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); + assert(from_obj->is_objArray(), "must be obj array"); + objArrayOop from_obj_array = objArrayOop(from_obj); + // The from-space object contains the real length. + int length = from_obj_array->length(); + + assert(from_obj->is_forwarded(), "must be forwarded"); + oop to_obj = from_obj->forwardee(); + assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); + objArrayOop to_obj_array = objArrayOop(to_obj); + // We keep track of the next start index in the length field of the + // to-space object. + int next_index = to_obj_array->length(); + assert(0 <= next_index && next_index < length, + err_msg("invariant, next index: %d, length: %d", next_index, length)); + + int start = next_index; + int end = length; + int remainder = end - start; + // We'll try not to push a range that's smaller than ParGCArrayScanChunk. + if (remainder > 2 * ParGCArrayScanChunk) { + end = start + ParGCArrayScanChunk; + to_obj_array->set_length(end); + // Push the remainder before we process the range in case another + // worker has run out of things to do and can steal it. + oop* from_obj_p = set_partial_array_mask(from_obj); + push_on_queue(from_obj_p); + } else { + assert(length == end, "sanity"); + // We'll process the final range for this object. Restore the length + // so that the heap remains parsable in case of evacuation failure. + to_obj_array->set_length(end); + } + _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); + // Process indexes [start,end). It will also process the header + // along with the first chunk (i.e., the chunk with start == 0). + // Note that at this point the length field of to_obj_array is not + // correct given that we are using it to keep track of the next + // start index. oop_iterate_range() (thankfully!) ignores the length + // field and only relies on the start / end parameters. It does + // however return the size of the object which will be incorrect. So + // we have to ignore it even if we wanted to use it. + to_obj_array->oop_iterate_range(&_scanner, start, end); +} + +template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { + if (!has_partial_array_mask(ref_to_scan)) { + // Note: we can use "raw" versions of "region_containing" because + // "obj_to_scan" is definitely in the heap, and is not in a + // humongous region. + HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); + do_oop_evac(ref_to_scan, r); + } else { + do_oop_partial_array((oop*)ref_to_scan); + } +} + +inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { + assert(verify_task(ref), "sanity"); + if (ref.is_narrow()) { + deal_with_reference((narrowOop*)ref); + } else { + deal_with_reference((oop*)ref); + } +} + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP diff --git a/src/share/vm/gc_implementation/g1/sparsePRT.hpp b/src/share/vm/gc_implementation/g1/sparsePRT.hpp index 86d5db16298c2a68fcebd7fd4643bd371759d36d..5cc884621a608c22bd4e5d19f8d9886f49386b1b 100644 --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "memory/allocation.hpp" #include "memory/cardTableModRefBS.hpp" diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp index 6ee15100612bfc91ba8c3a03ddf871055c83379f..eb7de64f4cdae126662a4bf6c45ac5aad1446b93 100644 --- a/src/share/vm/prims/jni.cpp +++ b/src/share/vm/prims/jni.cpp @@ -5208,7 +5208,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v } #ifndef PRODUCT - #ifndef TARGET_OS_FAMILY_windows + #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() #endif diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp index 9f28fc2f1bfbd852317600009e792c8e58d412d5..7441f26100e029fdb2bf1b4ed45068497960090a 100644 --- a/src/share/vm/runtime/arguments.cpp +++ b/src/share/vm/runtime/arguments.cpp @@ -1880,24 +1880,22 @@ static bool verify_serial_gc_flags() { // check if do gclog rotation // +UseGCLogFileRotation is a must, // no gc log rotation when log file not supplied or -// NumberOfGCLogFiles is 0, or GCLogFileSize is 0 +// NumberOfGCLogFiles is 0 void check_gclog_consistency() { if (UseGCLogFileRotation) { - if ((Arguments::gc_log_filename() == NULL) || - (NumberOfGCLogFiles == 0) || - (GCLogFileSize == 0)) { + if ((Arguments::gc_log_filename() == NULL) || (NumberOfGCLogFiles == 0)) { jio_fprintf(defaultStream::output_stream(), - "To enable GC log rotation, use -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles= -XX:GCLogFileSize=[k|K|m|M|g|G]\n" - "where num_of_file > 0 and num_of_size > 0\n" + "To enable GC log rotation, use -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=\n" + "where num_of_file > 0\n" "GC log rotation is turned off\n"); UseGCLogFileRotation = false; } } - if (UseGCLogFileRotation && GCLogFileSize < 8*K) { - FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K); - jio_fprintf(defaultStream::output_stream(), - "GCLogFileSize changed to minimum 8K\n"); + if (UseGCLogFileRotation && (GCLogFileSize != 0) && (GCLogFileSize < 8*K)) { + FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K); + jio_fprintf(defaultStream::output_stream(), + "GCLogFileSize changed to minimum 8K\n"); } } diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp index 74f7acdbab424cc0e81c5b73aa9b88702ae1a913..801f7b89addf54a48dc5e7f4376122aa75426f5a 100644 --- a/src/share/vm/runtime/globals.cpp +++ b/src/share/vm/runtime/globals.cpp @@ -325,7 +325,7 @@ void Flag::print_on(outputStream* st, bool withComments) { else st->print("%-16s", ""); } - st->print("%-20"); + st->print("%-20s", " "); print_kind(st); if (withComments) { diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp index ff05111e904eb60d3542030affcccc64dec4cb16..9927eb79fba4b94ccd389582ec4d687c23d870e3 100644 --- a/src/share/vm/runtime/globals.hpp +++ b/src/share/vm/runtime/globals.hpp @@ -2422,9 +2422,9 @@ class CommandLineFlags { "Number of gclog files in rotation " \ "(default: 0, no rotation)") \ \ - product(uintx, GCLogFileSize, 0, \ - "GC log file size (default: 0 bytes, no rotation). " \ - "It requires UseGCLogFileRotation") \ + product(uintx, GCLogFileSize, 8*K, \ + "GC log file size, requires UseGCLogFileRotation. " \ + "Set to 0 to only trigger rotation via jcmd") \ \ /* JVMTI heap profiling */ \ \ diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp index 48e523f5aa0deece0a5f93825f247d65649f9e57..cac23e97e906cceff28845b63c970291ab7a4218 100644 --- a/src/share/vm/runtime/safepoint.cpp +++ b/src/share/vm/runtime/safepoint.cpp @@ -535,7 +535,7 @@ void SafepointSynchronize::do_cleanup_tasks() { // rotate log files? if (UseGCLogFileRotation) { - gclog_or_tty->rotate_log(); + gclog_or_tty->rotate_log(false); } if (MemTracker::is_on()) { diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp index ca616a52cf4c6887828fa3e33641026cd1440526..0cb3a18f972b3afe2d78432680b5e4ac8718088c 100644 --- a/src/share/vm/runtime/vm_operations.hpp +++ b/src/share/vm/runtime/vm_operations.hpp @@ -94,6 +94,7 @@ template(JFRCheckpoint) \ template(Exit) \ template(LinuxDllLoad) \ + template(RotateGCLog) \ class VM_Operation: public CHeapObj { public: @@ -397,4 +398,15 @@ class VM_Exit: public VM_Operation { void doit(); }; + +class VM_RotateGCLog: public VM_Operation { + private: + outputStream* _out; + + public: + VM_RotateGCLog(outputStream* st) : _out(st) {} + VMOp_Type type() const { return VMOp_RotateGCLog; } + void doit() { gclog_or_tty->rotate_log(true, _out); } +}; + #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP diff --git a/src/share/vm/services/diagnosticCommand.cpp b/src/share/vm/services/diagnosticCommand.cpp index 71b552801aa587ed71b7219f18e74ad663b17d99..1245b5aaf885b3ead1e2c2f78267b4cba5e47801 100644 --- a/src/share/vm/services/diagnosticCommand.cpp +++ b/src/share/vm/services/diagnosticCommand.cpp @@ -53,6 +53,7 @@ void DCmdRegistrant::register_dcmds(){ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_SERVICES DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); // Enhanced JMX Agent Support // These commands won't be exported via the DiagnosticCommandMBean until an @@ -650,3 +651,11 @@ void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) { JavaCalls::call_static(&result, ik, vmSymbols::stopRemoteAgent_name(), vmSymbols::void_method_signature(), CHECK); } +void RotateGCLogDCmd::execute(DCmdSource source, TRAPS) { + if (UseGCLogFileRotation) { + VM_RotateGCLog rotateop(output()); + VMThread::execute(&rotateop); + } else { + output()->print_cr("Target VM does not support GC log file rotation."); + } +} diff --git a/src/share/vm/services/diagnosticCommand.hpp b/src/share/vm/services/diagnosticCommand.hpp index 5485b119decc46e2ab50ae5cd34c76173d3a21fa..2ce4109986c03d67807ded93a2348411547ed732 100644 --- a/src/share/vm/services/diagnosticCommand.hpp +++ b/src/share/vm/services/diagnosticCommand.hpp @@ -360,4 +360,21 @@ public: virtual void execute(DCmdSource source, TRAPS); }; +class RotateGCLogDCmd : public DCmd { +public: + RotateGCLogDCmd(outputStream* output, bool heap) : DCmd(output, heap) {} + static const char* name() { return "GC.rotate_log"; } + static const char* description() { + return "Force the GC log file to be rotated."; + } + static const char* impact() { return "Low"; } + virtual void execute(DCmdSource source, TRAPS); + static int num_arguments() { return 0; } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "control", NULL}; + return p; + } +}; + #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP diff --git a/src/share/vm/utilities/ostream.cpp b/src/share/vm/utilities/ostream.cpp index 90b3559e48c6ad81837da9d174fbb775be881299..e09dfccf123468d5ed8509979c2947e7a7c8af22 100644 --- a/src/share/vm/utilities/ostream.cpp +++ b/src/share/vm/utilities/ostream.cpp @@ -662,13 +662,13 @@ void gcLogFileStream::write(const char* s, size_t len) { // write to gc log file at safepoint. If in future, changes made for mutator threads or // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log // must be synchronized. -void gcLogFileStream::rotate_log() { +void gcLogFileStream::rotate_log(bool force, outputStream* out) { char time_msg[FILENAMEBUFLEN]; char time_str[EXTRACHARLEN]; char current_file_name[FILENAMEBUFLEN]; char renamed_file_name[FILENAMEBUFLEN]; - if (_bytes_written < (jlong)GCLogFileSize) { + if (!should_rotate(force)) { return; } @@ -685,6 +685,11 @@ void gcLogFileStream::rotate_log() { jio_snprintf(time_msg, sizeof(time_msg), "File %s rotated at %s\n", _file_name, os::local_time_string((char *)time_str, sizeof(time_str))); write(time_msg, strlen(time_msg)); + + if (out != NULL) { + out->print(time_msg); + } + dump_loggc_header(); return; } @@ -706,12 +711,18 @@ void gcLogFileStream::rotate_log() { _file_name, _cur_file_num); jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX, _file_name, _cur_file_num); - jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the" - " maximum size. Saved as %s\n", - os::local_time_string((char *)time_str, sizeof(time_str)), - renamed_file_name); + + const char* msg = force ? "GC log rotation request has been received." + : "GC log file has reached the maximum size."; + jio_snprintf(time_msg, sizeof(time_msg), "%s %s Saved as %s\n", + os::local_time_string((char *)time_str, sizeof(time_str)), + msg, renamed_file_name); write(time_msg, strlen(time_msg)); + if (out != NULL) { + out->print(time_msg); + } + fclose(_file); _file = NULL; @@ -752,6 +763,11 @@ void gcLogFileStream::rotate_log() { os::local_time_string((char *)time_str, sizeof(time_str)), current_file_name); write(time_msg, strlen(time_msg)); + + if (out != NULL) { + out->print(time_msg); + } + dump_loggc_header(); // remove the existing file if (access(current_file_name, F_OK) == 0) { diff --git a/src/share/vm/utilities/ostream.hpp b/src/share/vm/utilities/ostream.hpp index 20e6f30bf37d7d536a80323e39d59eb7d79a4e64..b783787e02a6dadd8480a6957b0aabab192db4a4 100644 --- a/src/share/vm/utilities/ostream.hpp +++ b/src/share/vm/utilities/ostream.hpp @@ -115,7 +115,7 @@ class outputStream : public ResourceObj { // flushing virtual void flush() {} virtual void write(const char* str, size_t len) = 0; - virtual void rotate_log() {} // GC log rotation + virtual void rotate_log(bool force, outputStream* out = NULL) {} // GC log rotation virtual ~outputStream() {} // close properly on deletion void dec_cr() { dec(); cr(); } @@ -240,8 +240,15 @@ class gcLogFileStream : public fileStream { gcLogFileStream(const char* file_name); ~gcLogFileStream(); virtual void write(const char* c, size_t len); - virtual void rotate_log(); + virtual void rotate_log(bool force, outputStream* out = NULL); void dump_loggc_header(); + + /* If "force" sets true, force log file rotation from outside JVM */ + bool should_rotate(bool force) { + return force || + ((GCLogFileSize != 0) && ((uintx)_bytes_written >= GCLogFileSize)); + } + }; #ifndef PRODUCT diff --git a/test/compiler/codegen/C1NullCheckOfNullStore.java b/test/compiler/codegen/C1NullCheckOfNullStore.java new file mode 100644 index 0000000000000000000000000000000000000000..0bec2c1ab42ac9d7e714f627022788d694aefb46 --- /dev/null +++ b/test/compiler/codegen/C1NullCheckOfNullStore.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8039043 + * @summary Null check is placed in a wrong place when storing a null to an object field on x64 with compressed oops off + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CompileCommand=compileonly,C1NullCheckOfNullStore::test -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-UseCompressedOops C1NullCheckOfNullStore + * + */ + +public class C1NullCheckOfNullStore { + private static class Foo { + Object bar; + } + static private void test(Foo x) { + x.bar = null; + } + static public void main(String args[]) { + Foo x = new Foo(); + for (int i = 0; i < 10000; i++) { + test(x); + } + boolean gotNPE = false; + try { + for (int i = 0; i < 10000; i++) { + test(null); + } + } + catch(NullPointerException e) { + gotNPE = true; + } + if (!gotNPE) { + throw new Error("Expecting a NullPointerException"); + } + } +} diff --git a/test/gc/TestGCLogRotationViaJcmd.java b/test/gc/TestGCLogRotationViaJcmd.java new file mode 100644 index 0000000000000000000000000000000000000000..fd71d368b887e1a87880867f1efe50d0e4be7bef --- /dev/null +++ b/test/gc/TestGCLogRotationViaJcmd.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestGCLogRotationViaJcmd.java + * @bug 7090324 + * @summary test for gc log rotation via jcmd + * @library /testlibrary + * @run main/othervm -Xloggc:test.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=3 TestGCLogRotationViaJcmd + * + */ +import com.oracle.java.testlibrary.*; +import java.io.File; +import java.io.FilenameFilter; + +public class TestGCLogRotationViaJcmd { + + static final File currentDirectory = new File("."); + static final String LOG_FILE_NAME = "test.log"; + static final int NUM_LOGS = 3; + + static FilenameFilter logFilter = new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name.startsWith(LOG_FILE_NAME); + } + }; + + public static void main(String[] args) throws Exception { + // Grab the pid from the current java process + String pid = Integer.toString(ProcessTools.getProcessId()); + + // Create a JDKToolLauncher + JDKToolLauncher jcmd = JDKToolLauncher.create("jcmd") + .addToolArg(pid) + .addToolArg("GC.rotate_log"); + + for (int times = 1; times < NUM_LOGS; times++) { + // Run jcmd GC.rotate_log + ProcessBuilder pb = new ProcessBuilder(jcmd.getCommand()); + + // Make sure we didn't crash + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + // GC log check + File[] logs = currentDirectory.listFiles(logFilter); + if (logs.length != NUM_LOGS) { + throw new Error("There are only " + logs.length + + " logs instead " + NUM_LOGS); + } + + } + +} +