From 376db8da44f0c965ff3b4027ee5fba2be08fdb03 Mon Sep 17 00:00:00 2001 From: stefank Date: Mon, 25 Apr 2016 11:36:14 +0200 Subject: [PATCH] 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages Reviewed-by: pliden, sjohanss, stuefe --- src/os/linux/vm/os_linux.cpp | 243 ++++++++++++++++++++++------------- 1 file changed, 156 insertions(+), 87 deletions(-) diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp index 49b8dc1fc..af661443d 100644 --- a/src/os/linux/vm/os_linux.cpp +++ b/src/os/linux/vm/os_linux.cpp @@ -3047,6 +3047,48 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { return addr == MAP_FAILED ? NULL : addr; } +// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address +// (req_addr != NULL) or with a given alignment. +// - bytes shall be a multiple of alignment. +// - req_addr can be NULL. If not NULL, it must be a multiple of alignment. +// - alignment sets the alignment at which memory shall be allocated. +// It must be a multiple of allocation granularity. +// Returns address of memory or NULL. If req_addr was not NULL, will only return +// req_addr or NULL. +static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) { + + size_t extra_size = bytes; + if (req_addr == NULL && alignment > 0) { + extra_size += alignment; + } + + char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, + -1, 0); + if (start == MAP_FAILED) { + start = NULL; + } else { + if (req_addr != NULL) { + if (start != req_addr) { + ::munmap(start, extra_size); + start = NULL; + } + } else { + char* const start_aligned = (char*) align_ptr_up(start, alignment); + char* const end_aligned = start_aligned + bytes; + char* const end = start + extra_size; + if (start_aligned > start) { + ::munmap(start, start_aligned - start); + } + if (end_aligned < end) { + ::munmap(end_aligned, end - end_aligned); + } + start = start_aligned; + } + } + return start; +} + // Don't update _highest_vm_reserved_address, because there might be memory // regions above addr + size. If so, releasing a memory region only creates // a hole in the address space, it doesn't help prevent heap-stack collision. @@ -3331,54 +3373,133 @@ void os::large_page_init() { #define SHM_HUGETLB 04000 #endif +#define shm_warning_format(format, ...) \ + do { \ + if (UseLargePages && \ + (!FLAG_IS_DEFAULT(UseLargePages) || \ + !FLAG_IS_DEFAULT(UseSHM) || \ + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \ + warning(format, __VA_ARGS__); \ + } \ + } while (0) + +#define shm_warning(str) shm_warning_format("%s", str) + +#define shm_warning_with_errno(str) \ + do { \ + int err = errno; \ + shm_warning_format(str " (error = %d)", err); \ + } while (0) + +static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) { + assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment"); + + if (!is_size_aligned(alignment, SHMLBA)) { + assert(false, "Code below assumes that alignment is at least SHMLBA aligned"); + return NULL; + } + + // To ensure that we get 'alignment' aligned memory from shmat, + // we pre-reserve aligned virtual memory and then attach to that. + + char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL); + if (pre_reserved_addr == NULL) { + // Couldn't pre-reserve aligned memory. + shm_warning("Failed to pre-reserve aligned memory for shmat."); + return NULL; + } + + // SHM_REMAP is needed to allow shmat to map over an existing mapping. + char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP); + + if ((intptr_t)addr == -1) { + int err = errno; + shm_warning_with_errno("Failed to attach shared memory."); + + assert(err != EACCES, "Unexpected error"); + assert(err != EIDRM, "Unexpected error"); + assert(err != EINVAL, "Unexpected error"); + + // Since we don't know if the kernel unmapped the pre-reserved memory area + // we can't unmap it, since that would potentially unmap memory that was + // mapped from other threads. + return NULL; + } + + return addr; +} + +static char* shmat_at_address(int shmid, char* req_addr) { + if (!is_ptr_aligned(req_addr, SHMLBA)) { + assert(false, "Requested address needs to be SHMLBA aligned"); + return NULL; + } + + char* addr = (char*)shmat(shmid, req_addr, 0); + + if ((intptr_t)addr == -1) { + shm_warning_with_errno("Failed to attach shared memory."); + return NULL; + } + + return addr; +} + +static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) { + // If a req_addr has been provided, we assume that the caller has already aligned the address. + if (req_addr != NULL) { + assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size"); + assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment"); + return shmat_at_address(shmid, req_addr); + } + + // Since shmid has been setup with SHM_HUGETLB, shmat will automatically + // return large page size aligned memory addresses when req_addr == NULL. + // However, if the alignment is larger than the large page size, we have + // to manually ensure that the memory returned is 'alignment' aligned. + if (alignment > os::large_page_size()) { + assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size"); + return shmat_with_alignment(shmid, bytes, alignment); + } else { + return shmat_at_address(shmid, NULL); + } +} + char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. assert(UseLargePages && UseSHM, "only for SHM large pages"); assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); + assert(is_ptr_aligned(req_addr, alignment), "Unaligned address"); - if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { + if (!is_size_aligned(bytes, os::large_page_size())) { return NULL; // Fallback to small pages. } - key_t key = IPC_PRIVATE; - char *addr; - - bool warn_on_failure = UseLargePages && - (!FLAG_IS_DEFAULT(UseLargePages) || - !FLAG_IS_DEFAULT(UseSHM) || - !FLAG_IS_DEFAULT(LargePageSizeInBytes) - ); - char msg[128]; - // Create a large shared memory region to attach to based on size. - // Currently, size is the total size of the heap - int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); + // Currently, size is the total size of the heap. + int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); if (shmid == -1) { - // Possible reasons for shmget failure: - // 1. shmmax is too small for Java heap. - // > check shmmax value: cat /proc/sys/kernel/shmmax - // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax - // 2. not enough large page memory. - // > check available large pages: cat /proc/meminfo - // > increase amount of large pages: - // echo new_value > /proc/sys/vm/nr_hugepages - // Note 1: different Linux may use different name for this property, - // e.g. on Redhat AS-3 it is "hugetlb_pool". - // Note 2: it's possible there's enough physical memory available but - // they are so fragmented after a long run that they can't - // coalesce into large pages. Try to reserve large pages when - // the system is still "fresh". - if (warn_on_failure) { - jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); - warning("%s", msg); - } - return NULL; + // Possible reasons for shmget failure: + // 1. shmmax is too small for Java heap. + // > check shmmax value: cat /proc/sys/kernel/shmmax + // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax + // 2. not enough large page memory. + // > check available large pages: cat /proc/meminfo + // > increase amount of large pages: + // echo new_value > /proc/sys/vm/nr_hugepages + // Note 1: different Linux may use different name for this property, + // e.g. on Redhat AS-3 it is "hugetlb_pool". + // Note 2: it's possible there's enough physical memory available but + // they are so fragmented after a long run that they can't + // coalesce into large pages. Try to reserve large pages when + // the system is still "fresh". + shm_warning_with_errno("Failed to reserve shared memory."); + return NULL; } - // attach to the region - addr = (char*)shmat(shmid, req_addr, 0); - int err = errno; + // Attach to the region. + char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr); // Remove shmid. If shmat() is successful, the actual shared memory segment // will be deleted when it's detached by shmdt() or when the process @@ -3386,14 +3507,6 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char // segment immediately. shmctl(shmid, IPC_RMID, NULL); - if ((intptr_t)addr == -1) { - if (warn_on_failure) { - jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); - warning("%s", msg); - } - return NULL; - } - return addr; } @@ -3433,50 +3546,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_ return addr; } -// Helper for os::Linux::reserve_memory_special_huge_tlbfs_mixed(). -// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address -// (req_addr != NULL) or with a given alignment. -// - bytes shall be a multiple of alignment. -// - req_addr can be NULL. If not NULL, it must be a multiple of alignment. -// - alignment sets the alignment at which memory shall be allocated. -// It must be a multiple of allocation granularity. -// Returns address of memory or NULL. If req_addr was not NULL, will only return -// req_addr or NULL. -static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) { - - size_t extra_size = bytes; - if (req_addr == NULL && alignment > 0) { - extra_size += alignment; - } - - char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE, - MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, - -1, 0); - if (start == MAP_FAILED) { - start = NULL; - } else { - if (req_addr != NULL) { - if (start != req_addr) { - ::munmap(start, extra_size); - start = NULL; - } - } else { - char* const start_aligned = (char*) align_ptr_up(start, alignment); - char* const end_aligned = start_aligned + bytes; - char* const end = start + extra_size; - if (start_aligned > start) { - ::munmap(start, start_aligned - start); - } - if (end_aligned < end) { - ::munmap(end_aligned, end - end_aligned); - } - start = start_aligned; - } - } - return start; - -} - // Reserve memory using mmap(MAP_HUGETLB). // - bytes shall be a multiple of alignment. // - req_addr can be NULL. If not NULL, it must be a multiple of alignment. -- GitLab