提交 0384fa17 编写于 作者: S stuefe

8077276: allocating heap with UseLargePages and HugeTLBFS may trash existing...

8077276: allocating heap with UseLargePages and HugeTLBFS may trash existing memory mappings (linux)
Summary: Remove MAP_FIXED from initial mapping allocation; add tests
Reviewed-by: stefank, coleenp
上级 a04c00e5
...@@ -3433,22 +3433,66 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_ ...@@ -3433,22 +3433,66 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_
return addr; return addr;
} }
// Helper for os::Linux::reserve_memory_special_huge_tlbfs_mixed().
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
// (req_addr != NULL) or with a given alignment.
// - bytes shall be a multiple of alignment.
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
// - alignment sets the alignment at which memory shall be allocated.
// It must be a multiple of allocation granularity.
// Returns address of memory or NULL. If req_addr was not NULL, will only return
// req_addr or NULL.
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
size_t extra_size = bytes;
if (req_addr == NULL && alignment > 0) {
extra_size += alignment;
}
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
if (start == MAP_FAILED) {
start = NULL;
} else {
if (req_addr != NULL) {
if (start != req_addr) {
::munmap(start, extra_size);
start = NULL;
}
} else {
char* const start_aligned = (char*) align_ptr_up(start, alignment);
char* const end_aligned = start_aligned + bytes;
char* const end = start + extra_size;
if (start_aligned > start) {
::munmap(start, start_aligned - start);
}
if (end_aligned < end) {
::munmap(end_aligned, end - end_aligned);
}
start = start_aligned;
}
}
return start;
}
// Reserve memory using mmap(MAP_HUGETLB).
// - bytes shall be a multiple of alignment.
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
// - alignment sets the alignment at which memory shall be allocated.
// It must be a multiple of allocation granularity.
// Returns address of memory or NULL. If req_addr was not NULL, will only return
// req_addr or NULL.
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) { char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
size_t large_page_size = os::large_page_size(); size_t large_page_size = os::large_page_size();
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
// Allocate small pages. assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(bytes, alignment), "Must be");
char* start; // First reserve - but not commit - the address range in small pages.
if (req_addr != NULL) { char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(bytes, alignment), "Must be");
start = os::reserve_memory(bytes, req_addr);
assert(start == NULL || start == req_addr, "Must be");
} else {
start = os::reserve_memory_aligned(bytes, alignment);
}
if (start == NULL) { if (start == NULL) {
return NULL; return NULL;
...@@ -3456,13 +3500,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3456,13 +3500,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
assert(is_ptr_aligned(start, alignment), "Must be"); assert(is_ptr_aligned(start, alignment), "Must be");
if (MemTracker::tracking_level() > NMT_minimal) {
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
tkr.record((address)start, bytes);
}
char* end = start + bytes; char* end = start + bytes;
// Find the regions of the allocated chunk that can be promoted to large pages. // Find the regions of the allocated chunk that can be promoted to large pages.
...@@ -3482,9 +3519,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3482,9 +3519,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
void* result; void* result;
// Commit small-paged leading area.
if (start != lp_start) { if (start != lp_start) {
result = ::mmap(start, lp_start - start, prot, result = ::mmap(start, lp_start - start, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
...@@ -3495,11 +3532,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3495,11 +3532,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
} }
} }
// Commit large-paged area.
result = ::mmap(lp_start, lp_bytes, prot, result = ::mmap(lp_start, lp_bytes, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
-1, 0); -1, 0);
if (result == MAP_FAILED) { if (result == MAP_FAILED) {
warn_on_large_pages_failure(req_addr, bytes, errno); warn_on_large_pages_failure(lp_start, lp_bytes, errno);
// If the mmap above fails, the large pages region will be unmapped and we // If the mmap above fails, the large pages region will be unmapped and we
// have regions before and after with small pages. Release these regions. // have regions before and after with small pages. Release these regions.
// //
...@@ -3512,6 +3550,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3512,6 +3550,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
return NULL; return NULL;
} }
// Commit small-paged trailing area.
if (lp_end != end) { if (lp_end != end) {
result = ::mmap(lp_end, end - lp_end, prot, result = ::mmap(lp_end, end - lp_end, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
...@@ -3528,7 +3567,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al ...@@ -3528,7 +3567,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) { char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_ptr_aligned(req_addr, alignment), "Must be"); assert(is_ptr_aligned(req_addr, alignment), "Must be");
assert(is_power_of_2(alignment), "Must be"); assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(is_power_of_2(os::large_page_size()), "Must be"); assert(is_power_of_2(os::large_page_size()), "Must be");
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
...@@ -6102,47 +6141,100 @@ class TestReserveMemorySpecial : AllStatic { ...@@ -6102,47 +6141,100 @@ class TestReserveMemorySpecial : AllStatic {
} }
} }
static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) { static void test_reserve_memory_special_huge_tlbfs_mixed() {
if (!UseHugeTLBFS) { size_t lp = os::large_page_size();
return; size_t ag = os::vm_allocation_granularity();
}
test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
size, alignment);
assert(size >= os::large_page_size(), "Incorrect input to test");
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); // sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);
// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr
// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping1 != MAP_FAILED, "should work");
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping2 != MAP_FAILED, "should work");
// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);
// Case 1
test_log("%s, req_addr NULL:", __FUNCTION__);
test_log("size align result");
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s",
size, alignment, p, (p != NULL ? "" : "(failed)"));
if (p != NULL) {
assert(is_ptr_aligned(p, alignment), "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
if (addr != NULL) { // Case 2
small_page_write(addr, size); test_log("%s, req_addr non-NULL:", __FUNCTION__);
test_log("size align req_addr result");
os::Linux::release_memory_special_huge_tlbfs(addr, size); for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = (char*) align_ptr_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, req_addr, p,
((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
if (p != NULL) {
assert(p == req_addr, "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
} }
}
static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) { // Case 3
size_t lp = os::large_page_size(); test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
size_t ag = os::vm_allocation_granularity(); test_log("size align req_addr result");
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (int i = 0; i < num_sizes; i++) {
test_reserve_memory_special_huge_tlbfs_mixed(size, alignment); const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = (char*) align_ptr_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, req_addr, p,
((p != NULL ? "" : "(failed)")));
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
assert(p == NULL, "must be");
}
} }
}
static void test_reserve_memory_special_huge_tlbfs_mixed() { ::munmap(mapping2, mapping_size);
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
} }
static void test_reserve_memory_special_huge_tlbfs() { static void test_reserve_memory_special_huge_tlbfs() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册