You need to sign in or sign up before continuing.
提交 2e15df11 编写于 作者: B brutisso

7173959: Jvm crashed during coherence exabus (tmb) testing

Summary: Mapping of aligned memory needs to be MT safe. Also reviewed by: vitalyd@gmail.com
Reviewed-by: dholmes, coleenp, zgu
上级 12356b4f
...@@ -93,6 +93,47 @@ void os::wait_for_keypress_at_exit(void) { ...@@ -93,6 +93,47 @@ void os::wait_for_keypress_at_exit(void) {
return; return;
} }
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
// [ | | ]
// ^ extra_base
// ^ extra_base + begin_offset == aligned_base
// extra_base + begin_offset + size ^
// extra_base + extra_size ^
// |<>| == begin_offset
// end_offset == |<>|
size_t begin_offset = aligned_base - extra_base;
size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
if (begin_offset > 0) {
os::release_memory(extra_base, begin_offset);
}
if (end_offset > 0) {
os::release_memory(extra_base + begin_offset + size, end_offset);
}
return aligned_base;
}
void os::Posix::print_load_average(outputStream* st) { void os::Posix::print_load_average(outputStream* st) {
st->print("load average:"); st->print("load average:");
double loadavg[3]; double loadavg[3];
......
...@@ -2895,6 +2895,36 @@ void os::pd_split_reserved_memory(char *base, size_t size, size_t split, ...@@ -2895,6 +2895,36 @@ void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
} }
} }
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* aligned_base = NULL;
do {
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
os::release_memory(extra_base, extra_size);
aligned_base = os::reserve_memory(size, aligned_base);
} while (aligned_base == NULL);
return aligned_base;
}
char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0, assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment"); "reserve alignment");
......
...@@ -255,6 +255,7 @@ class os: AllStatic { ...@@ -255,6 +255,7 @@ class os: AllStatic {
static int vm_allocation_granularity(); static int vm_allocation_granularity();
static char* reserve_memory(size_t bytes, char* addr = 0, static char* reserve_memory(size_t bytes, char* addr = 0,
size_t alignment_hint = 0); size_t alignment_hint = 0);
static char* reserve_memory_aligned(size_t size, size_t alignment);
static char* attempt_reserve_memory_at(size_t bytes, char* addr); static char* attempt_reserve_memory_at(size_t bytes, char* addr);
static void split_reserved_memory(char *base, size_t size, static void split_reserved_memory(char *base, size_t size,
size_t split, bool realloc); size_t split, bool realloc);
......
...@@ -329,20 +329,9 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large, ...@@ -329,20 +329,9 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) { if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
// Base not aligned, retry // Base not aligned, retry
if (!os::release_memory(base, size)) fatal("os::release_memory failed"); if (!os::release_memory(base, size)) fatal("os::release_memory failed");
// Reserve size large enough to do manual alignment and // Make sure that size is aligned
// increase size to a multiple of the desired alignment
size = align_size_up(size, alignment); size = align_size_up(size, alignment);
size_t extra_size = size + alignment; base = os::reserve_memory_aligned(size, alignment);
do {
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) return;
// Do manual alignement
base = (char*) align_size_up((uintptr_t) extra_base, alignment);
assert(base >= extra_base, "just checking");
// Re-reserve the region at the aligned base address.
os::release_memory(extra_base, extra_size);
base = os::reserve_memory(size, base);
} while (base == NULL);
if (requested_address != 0 && if (requested_address != 0 &&
failed_to_reserve_as_requested(base, requested_address, size, false)) { failed_to_reserve_as_requested(base, requested_address, size, false)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册