提交 cead20d1 编写于 作者: C ctornqvi

Merge

......@@ -2074,6 +2074,13 @@ void bsd_wrap_code(char* base, size_t size) {
}
}
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
}
// NOTE: Bsd kernel does not really reserve the pages for us.
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
......@@ -2082,18 +2089,45 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
#ifdef __OpenBSD__
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
return ::mprotect(addr, size, prot) == 0;
if (::mprotect(addr, size, prot) == 0) {
return true;
}
#else
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
if (res != (uintptr_t) MAP_FAILED) {
return true;
}
#endif
}
// Warn about any commit errors we see in non-product builds just
// in case mmap() doesn't work as described on the man page.
NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
return false;
}
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
// alignment_hint is ignored on this OS
return pd_commit_memory(addr, size, exec);
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
if (!pd_commit_memory(addr, size, exec)) {
// add extra info in product mode for vm_exit_out_of_memory():
PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
// alignment_hint is ignored on this OS
pd_commit_memory_or_exit(addr, size, exec, mesg);
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
......@@ -2148,7 +2182,7 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
......@@ -3512,7 +3546,7 @@ jint os::init_2(void)
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
......
......@@ -2612,11 +2612,49 @@ void linux_wrap_code(char* base, size_t size) {
}
}
static bool recoverable_mmap_error(int err) {
// See if the error is one we can let the caller handle. This
// list of errno values comes from JBS-6843484. I can't find a
// Linux man page that documents this specific set of errno
// values so while this list currently matches Solaris, it may
// change as we gain experience with this failure mode.
switch (err) {
case EBADF:
case EINVAL:
case ENOTSUP:
// let the caller deal with these errors
return true;
default:
// Any remaining errors on this OS can cause our reserved mapping
// to be lost. That can cause confusion where different data
// structures think they have the same memory mapped. The worst
// scenario is if both the VM and a library think they have the
// same memory mapped.
return false;
}
}
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t size,
size_t alignment_hint, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
alignment_hint, exec, strerror(err), err);
}
// NOTE: Linux kernel does not really reserve the pages for us.
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
// problem.
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
......@@ -2624,9 +2662,32 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
}
return true;
return 0;
}
int err = errno; // save errno from mmap() call above
if (!recoverable_mmap_error(err)) {
warn_fail_commit_memory(addr, size, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
}
return err;
}
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Linux::commit_memory_impl(addr, size, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, size, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
return false;
}
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
......@@ -2639,8 +2700,9 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
#define MADV_HUGEPAGE 14
#endif
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
int os::Linux::commit_memory_impl(char* addr, size_t size,
size_t alignment_hint, bool exec) {
int err;
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res =
......@@ -2651,16 +2713,46 @@ bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
}
return true;
return 0;
}
err = errno; // save errno from mmap() call above
if (!recoverable_mmap_error(err)) {
// However, it is not clear that this loss of our reserved mapping
// happens with large pages on Linux or that we cannot recover
// from the loss. For now, we just issue a warning and we don't
// call vm_exit_out_of_memory(). This issue is being tracked by
// JBS-8007074.
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
// "committing reserved memory.");
}
// Fall through and try to use small pages
}
if (commit_memory(addr, size, exec)) {
err = os::Linux::commit_memory_impl(addr, size, exec);
if (err == 0) {
realign_memory(addr, size, alignment_hint);
return true;
}
return false;
return err;
}
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
......@@ -2678,7 +2770,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
commit_memory(addr, bytes, alignment_hint, false);
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
......@@ -2931,7 +3023,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
}
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
......@@ -3053,7 +3145,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-1, 0);
if (p != (void *) -1) {
if (p != MAP_FAILED) {
// We don't know if this really is a huge page or not.
FILE *fp = fopen("/proc/self/maps", "r");
if (fp) {
......@@ -4393,7 +4485,7 @@ jint os::init_2(void)
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
......
......@@ -76,6 +76,10 @@ class Linux {
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
static int commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec);
static void set_glibc_version(const char *s) { _glibc_version = s; }
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
......
......@@ -2784,7 +2784,42 @@ int os::vm_allocation_granularity() {
return page_size;
}
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
static bool recoverable_mmap_error(int err) {
// See if the error is one we can let the caller handle. This
// list of errno values comes from the Solaris mmap(2) man page.
switch (err) {
case EBADF:
case EINVAL:
case ENOTSUP:
// let the caller deal with these errors
return true;
default:
// Any remaining errors on this OS can cause our reserved mapping
// to be lost. That can cause confusion where different data
// structures think they have the same memory mapped. The worst
// scenario is if both the VM and a library think they have the
// same memory mapped.
return false;
}
}
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t bytes,
size_t alignment_hint, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
alignment_hint, exec, strerror(err), err);
}
int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
size_t size = bytes;
char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
......@@ -2792,14 +2827,38 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
if (UseNUMAInterleaving) {
numa_make_global(addr, bytes);
}
return true;
return 0;
}
return false;
int err = errno; // save errno from mmap() call in mmap_chunk()
if (!recoverable_mmap_error(err)) {
warn_fail_commit_memory(addr, bytes, exec, err);
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
}
return err;
}
bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
bool exec) {
if (commit_memory(addr, bytes, exec)) {
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, bytes, exec, err);
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
}
}
int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec) {
int err = Solaris::commit_memory_impl(addr, bytes, exec);
if (err == 0) {
if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
// If the large page size has been set and the VM
// is using large pages, use the large page size
......@@ -2821,9 +2880,25 @@ bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
// Since this is a hint, ignore any failures.
(void)Solaris::set_mpss_range(addr, bytes, page_size);
}
return true;
}
return false;
return err;
}
bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
bool exec) {
return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
size_t alignment_hint, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
}
}
// Uncommit the pages in a specified region.
......@@ -2835,7 +2910,7 @@ void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
......
......@@ -168,6 +168,9 @@ class Solaris {
static int _dev_zero_fd;
static int get_dev_zero_fd() { return _dev_zero_fd; }
static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
static int commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec);
static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
static bool mpss_sanity_check(bool warn, size_t * page_size);
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -62,7 +62,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
......
......@@ -2524,7 +2524,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
false );
!ExecMem);
return EXCEPTION_CONTINUE_EXECUTION;
}
else
......@@ -3172,6 +3172,15 @@ bool os::release_memory_special(char* base, size_t bytes) {
void os::print_statistics() {
}
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
int err = os::get_last_error();
char buf[256];
size_t buf_len = os::lasterror(buf, sizeof(buf));
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
exec, buf_len != 0 ? buf : "<no_error_string>", err);
}
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
if (bytes == 0) {
// Don't bother the OS with noops.
......@@ -3186,11 +3195,17 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
// is always within a reserve covered by a single VirtualAlloc
// in that case we can just do a single commit for the requested size
if (!UseNUMAInterleaving) {
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
return false;
}
if (exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
return false;
}
}
return true;
} else {
......@@ -3205,12 +3220,20 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
MEMORY_BASIC_INFORMATION alloc_info;
VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
PAGE_READWRITE) == NULL) {
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
exec);)
return false;
}
if (exec) {
DWORD oldprot;
if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
PAGE_EXECUTE_READWRITE, &oldprot)) {
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
exec);)
return false;
}
}
bytes_remaining -= bytes_to_rq;
next_alloc_addr += bytes_to_rq;
......@@ -3222,7 +3245,24 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
// alignment_hint is ignored on this OS
return pd_commit_memory(addr, size, exec);
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
if (!pd_commit_memory(addr, size, exec)) {
warn_fail_commit_memory(addr, size, exec);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
// alignment_hint is ignored on this OS
pd_commit_memory_or_exit(addr, size, exec, mesg);
}
bool os::pd_uncommit_memory(char* addr, size_t bytes) {
......@@ -3240,7 +3280,7 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
......@@ -3264,8 +3304,9 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
// Strange enough, but on Win32 one can change protection only for committed
// memory, not a big deal anyway, as bytes less or equal than 64K
if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
fatal("cannot commit protection page");
if (!is_committed) {
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
"cannot commit protection page");
}
// One cannot use os::guard_memory() here, as on Win32 guard page
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -58,7 +58,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -565,11 +565,9 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
if(new_start_aligned < new_end_for_commit) {
MemRegion new_committed =
MemRegion(new_start_aligned, new_end_for_commit);
if (!os::commit_memory((char*)new_committed.start(),
new_committed.byte_size())) {
vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
"card table expansion");
}
os::commit_memory_or_exit((char*)new_committed.start(),
new_committed.byte_size(), !ExecMem,
"card table expansion");
}
result = true;
} else if (new_start_aligned > cur_committed.start()) {
......
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -101,7 +101,8 @@ bool PSVirtualSpace::expand_by(size_t bytes) {
}
char* const base_addr = committed_high_addr();
bool result = special() || os::commit_memory(base_addr, bytes, alignment());
bool result = special() ||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
if (result) {
_committed_high_addr += bytes;
}
......@@ -154,7 +155,7 @@ PSVirtualSpace::expand_into(PSVirtualSpace* other_space, size_t bytes) {
if (tmp_bytes > 0) {
char* const commit_base = committed_high_addr();
if (other_space->special() ||
os::commit_memory(commit_base, tmp_bytes, alignment())) {
os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
// Reduce the reserved region in the other space.
other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
other_space->reserved_high_addr(),
......@@ -269,7 +270,8 @@ bool PSVirtualSpaceHighToLow::expand_by(size_t bytes) {
}
char* const base_addr = committed_low_addr() - bytes;
bool result = special() || os::commit_memory(base_addr, bytes, alignment());
bool result = special() ||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
if (result) {
_committed_low_addr -= bytes;
}
......@@ -322,7 +324,7 @@ size_t PSVirtualSpaceHighToLow::expand_into(PSVirtualSpace* other_space,
if (tmp_bytes > 0) {
char* const commit_base = committed_low_addr() - tmp_bytes;
if (other_space->special() ||
os::commit_memory(commit_base, tmp_bytes, alignment())) {
os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
// Reduce the reserved region in the other space.
other_space->set_reserved(other_space->reserved_low_addr(),
other_space->reserved_high_addr() - tmp_bytes,
......
......@@ -146,10 +146,7 @@ E* ArrayAllocator<E, F>::allocate(size_t length) {
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
bool success = os::commit_memory(_addr, _size, false /* executable */);
if (!success) {
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
}
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
return (E*)_addr;
}
......
......@@ -110,11 +110,8 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
jbyte* guard_card = &_byte_map[_guard_index];
uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
_guard_region = MemRegion((HeapWord*)guard_page, _page_size);
if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
// Do better than this for Merlin
vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
}
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
!ExecMem, "card table last card");
*guard_card = last_card;
_lowest_non_clean =
......@@ -312,12 +309,9 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
MemRegion(cur_committed.end(), new_end_for_commit);
assert(!new_committed.is_empty(), "Region should not be empty here");
if (!os::commit_memory((char*)new_committed.start(),
new_committed.byte_size(), _page_size)) {
// Do better than this for Merlin
vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
"card table expansion");
}
os::commit_memory_or_exit((char*)new_committed.start(),
new_committed.byte_size(), _page_size,
!ExecMem, "card table expansion");
// Use new_end_aligned (as opposed to new_end_for_commit) because
// the cur_committed region may include the guard region.
} else if (new_end_aligned < cur_committed.end()) {
......
......@@ -159,7 +159,7 @@ WB_END
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
os::commit_memory((char *)(uintptr_t)addr, size);
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
WB_END
......
......@@ -1503,6 +1503,18 @@ bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
return res;
}
void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
const char* mesg) {
pd_commit_memory_or_exit(addr, bytes, executable, mesg);
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
}
void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
bool executable, const char* mesg) {
os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
}
bool os::uncommit_memory(char* addr, size_t bytes) {
bool res = pd_uncommit_memory(addr, bytes);
if (res) {
......
......@@ -78,6 +78,10 @@ enum ThreadPriority { // JLS 20.20.1-3
CriticalPriority = 11 // Critical thread priority
};
// Executable parameter flag for os::commit_memory() and
// os::commit_memory_or_exit().
const bool ExecMem = true;
// Typedef for structured exception handling support
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
......@@ -104,9 +108,16 @@ class os: AllStatic {
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
static void pd_split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false);
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool executable = false);
bool executable);
// Same as pd_commit_memory() that either succeeds or calls
// vm_exit_out_of_memory() with the specified mesg.
static void pd_commit_memory_or_exit(char* addr, size_t bytes,
bool executable, const char* mesg);
static void pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint,
bool executable, const char* mesg);
static bool pd_uncommit_memory(char* addr, size_t bytes);
static bool pd_release_memory(char* addr, size_t bytes);
......@@ -261,9 +272,16 @@ class os: AllStatic {
static char* attempt_reserve_memory_at(size_t bytes, char* addr);
static void split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
static bool commit_memory(char* addr, size_t bytes, bool executable = false);
static bool commit_memory(char* addr, size_t bytes, bool executable);
static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
bool executable = false);
bool executable);
// Same as commit_memory() that either succeeds or calls
// vm_exit_out_of_memory() with the specified mesg.
static void commit_memory_or_exit(char* addr, size_t bytes,
bool executable, const char* mesg);
static void commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint,
bool executable, const char* mesg);
static bool uncommit_memory(char* addr, size_t bytes);
static bool release_memory(char* addr, size_t bytes);
......
......@@ -533,11 +533,13 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
lower_high() + lower_needs <= lower_high_boundary(),
"must not expand beyond region");
if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
debug_only(warning("os::commit_memory failed"));
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
", lower_needs=" SIZE_FORMAT ", %d) failed",
lower_high(), lower_needs, _executable);)
return false;
} else {
_lower_high += lower_needs;
}
}
}
if (middle_needs > 0) {
assert(lower_high_boundary() <= middle_high() &&
......@@ -545,7 +547,10 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
"must not expand beyond region");
if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
_executable)) {
debug_only(warning("os::commit_memory failed"));
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
", %d) failed", middle_high(), middle_needs,
middle_alignment(), _executable);)
return false;
}
_middle_high += middle_needs;
......@@ -555,7 +560,9 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
upper_high() + upper_needs <= upper_high_boundary(),
"must not expand beyond region");
if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
debug_only(warning("os::commit_memory failed"));
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
", upper_needs=" SIZE_FORMAT ", %d) failed",
upper_high(), upper_needs, _executable);)
return false;
} else {
_upper_high += upper_needs;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册