提交 89430373 编写于 作者: T tschatzl

8058354: SPECjvm2008-Derby -2.7% performance regression on Solaris-X64 starting with 9-b29

Summary: Allow use of large pages for auxiliary data structures in G1. Clean up existing interfaces.
Reviewed-by: jmasa, pliden, stefank
上级 c57a4399
...@@ -114,7 +114,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { ...@@ -114,7 +114,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
} }
size_t CMBitMap::compute_size(size_t heap_size) { size_t CMBitMap::compute_size(size_t heap_size) {
return heap_size / mark_distance(); return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
} }
size_t CMBitMap::mark_distance() { size_t CMBitMap::mark_distance() {
......
...@@ -1904,6 +1904,25 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : ...@@ -1904,6 +1904,25 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
guarantee(_task_queues != NULL, "task_queues allocation failure."); guarantee(_task_queues != NULL, "task_queues allocation failure.");
} }
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
size_t size,
size_t translation_factor) {
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs(size, true);
G1RegionToSpaceMapper* result =
G1RegionToSpaceMapper::create_mapper(rs,
size,
rs.alignment(),
HeapRegion::GrainBytes,
translation_factor,
mtGC);
if (TracePageSizes) {
gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
description, rs.alignment(), p2i(rs.base()), rs.size(), rs.alignment(), size);
}
return result;
}
jint G1CollectedHeap::initialize() { jint G1CollectedHeap::initialize() {
CollectedHeap::pre_initialize(); CollectedHeap::pre_initialize();
os::enable_vtime(); os::enable_vtime();
...@@ -1977,57 +1996,35 @@ jint G1CollectedHeap::initialize() { ...@@ -1977,57 +1996,35 @@ jint G1CollectedHeap::initialize() {
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
G1RegionToSpaceMapper* heap_storage = G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs, G1RegionToSpaceMapper::create_mapper(g1_rs,
g1_rs.size(),
UseLargePages ? os::large_page_size() : os::vm_page_size(), UseLargePages ? os::large_page_size() : os::vm_page_size(),
HeapRegion::GrainBytes, HeapRegion::GrainBytes,
1, 1,
mtJavaHeap); mtJavaHeap);
heap_storage->set_mapping_changed_listener(&_listener); heap_storage->set_mapping_changed_listener(&_listener);
// Reserve space for the block offset table. We do not support automatic uncommit // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
// for the card table at this time. BOT only.
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* bot_storage = G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs, create_aux_memory_mapper("Block offset table",
os::vm_page_size(), G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
HeapRegion::GrainBytes, G1BlockOffsetSharedArray::N_bytes);
G1BlockOffsetSharedArray::N_bytes,
mtGC);
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage = G1RegionToSpaceMapper* cardtable_storage =
G1RegionToSpaceMapper::create_mapper(cardtable_rs, create_aux_memory_mapper("Card table",
os::vm_page_size(), G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
HeapRegion::GrainBytes, G1BlockOffsetSharedArray::N_bytes);
G1BlockOffsetSharedArray::N_bytes,
mtGC);
// Reserve space for the card counts table.
ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* card_counts_storage = G1RegionToSpaceMapper* card_counts_storage =
G1RegionToSpaceMapper::create_mapper(card_counts_rs, create_aux_memory_mapper("Card counts table",
os::vm_page_size(), G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
HeapRegion::GrainBytes, G1BlockOffsetSharedArray::N_bytes);
G1BlockOffsetSharedArray::N_bytes,
mtGC);
// Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* prev_bitmap_storage = G1RegionToSpaceMapper* prev_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* next_bitmap_storage = G1RegionToSpaceMapper* next_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage); g1_barrier_set()->initialize(cardtable_storage);
......
...@@ -367,6 +367,12 @@ private: ...@@ -367,6 +367,12 @@ private:
// heap after a compaction. // heap after a compaction.
void print_hrm_post_compaction(); void print_hrm_post_compaction();
// Create a memory mapper for auxiliary data structures of the given size and
// translation factor.
static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
size_t size,
size_t translation_factor);
double verify(bool guard, const char* msg); double verify(bool guard, const char* msg);
void verify_before_gc(); void verify_before_gc();
void verify_after_gc(); void verify_after_gc();
......
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -44,20 +44,29 @@ ...@@ -44,20 +44,29 @@
#endif #endif
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL), G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
_high_boundary(NULL), _committed(), _page_size(0), _special(false), _low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
_dirty(), _executable(false) { _dirty(), _executable(false) {
initialize_with_page_size(rs, used_size, page_size);
} }
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) { void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
if (!rs.is_reserved()) { guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
return false; // Allocation failed.
}
assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(_low_boundary == NULL, "VirtualSpace already initialized");
assert(page_size > 0, "Granularity must be non-zero."); assert(page_size > 0, "Page size must be non-zero.");
guarantee(is_ptr_aligned(rs.base(), page_size),
err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
guarantee(is_size_aligned(used_size, os::vm_page_size()),
err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
guarantee(used_size <= rs.size(),
err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
guarantee(is_size_aligned(rs.size(), page_size),
err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
_low_boundary = rs.base(); _low_boundary = rs.base();
_high_boundary = _low_boundary + rs.size(); _high_boundary = _low_boundary + used_size;
_special = rs.special(); _special = rs.special();
_executable = rs.executable(); _executable = rs.executable();
...@@ -65,16 +74,15 @@ bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size ...@@ -65,16 +74,15 @@ bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size
_page_size = page_size; _page_size = page_size;
assert(_committed.size() == 0, "virtual space initialized more than once"); assert(_committed.size() == 0, "virtual space initialized more than once");
uintx size_in_bits = rs.size() / page_size; BitMap::idx_t size_in_pages = rs.size() / page_size;
_committed.resize(size_in_bits, /* in_resource_area */ false); _committed.resize(size_in_pages, /* in_resource_area */ false);
if (_special) { if (_special) {
_dirty.resize(size_in_bits, /* in_resource_area */ false); _dirty.resize(size_in_pages, /* in_resource_area */ false);
} }
return true; _tail_size = used_size % _page_size;
} }
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release(); release();
} }
...@@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() { ...@@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() {
_special = false; _special = false;
_executable = false; _executable = false;
_page_size = 0; _page_size = 0;
_tail_size = 0;
_committed.resize(0, false); _committed.resize(0, false);
_dirty.resize(0, false); _dirty.resize(0, false);
} }
size_t G1PageBasedVirtualSpace::committed_size() const { size_t G1PageBasedVirtualSpace::committed_size() const {
return _committed.count_one_bits() * _page_size; size_t result = _committed.count_one_bits() * _page_size;
// The last page might not be in full.
if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
result -= _page_size - _tail_size;
}
return result;
} }
size_t G1PageBasedVirtualSpace::reserved_size() const { size_t G1PageBasedVirtualSpace::reserved_size() const {
...@@ -103,65 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const { ...@@ -103,65 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size(); return reserved_size() - committed_size();
} }
uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size; return (addr - _low_boundary) / _page_size;
} }
bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const { bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages; size_t end_page = start_page + size_in_pages;
return _committed.get_next_zero_offset(start, end) >= end; return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
} }
bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const { bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages; size_t end_page = start_page + size_in_pages;
return _committed.get_next_one_offset(start, end) >= end; return _committed.get_next_one_offset(start_page, end_page) >= end_page;
} }
char* G1PageBasedVirtualSpace::page_start(uintptr_t index) { char* G1PageBasedVirtualSpace::page_start(size_t index) const {
return _low_boundary + index * _page_size; return _low_boundary + index * _page_size;
} }
size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) { bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
return num * _page_size; guarantee(index <= _committed.size(),
err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
return index == _committed.size();
} }
bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) { void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
assert(num_pages > 0, "No full pages to commit");
assert(start + num_pages <= _committed.size(),
err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
"that is outside of managed space of " SIZE_FORMAT " pages",
start, start + num_pages, _committed.size()));
char* start_addr = page_start(start);
size_t size = num_pages * _page_size;
os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
p2i(start_addr), p2i(start_addr + size), size));
}
void G1PageBasedVirtualSpace::commit_tail() {
assert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
}
void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
guarantee(end_page <= _committed.size(),
err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
size_t pages = end_page - start_page;
bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
// If we have to commit some (partial) tail area, decrease the amount of pages to avoid
// committing that in the full-page commit code.
if (need_to_commit_tail) {
pages--;
}
if (pages > 0) {
commit_preferred_pages(start_page, pages);
}
if (need_to_commit_tail) {
commit_tail();
}
}
char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
return MIN2(_high_boundary, page_start(end_page));
}
void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
}
bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area. // We need to make sure to commit all pages covered by the given area.
guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted"); guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
bool zero_filled = true; bool zero_filled = true;
uintptr_t end = start + size_in_pages; size_t end_page = start_page + size_in_pages;
if (_special) { if (_special) {
// Check for dirty pages and update zero_filled if any found. // Check for dirty pages and update zero_filled if any found.
if (_dirty.get_next_one_offset(start,end) < end) { if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
zero_filled = false; zero_filled = false;
_dirty.clear_range(start, end); _dirty.clear_range(start_page, end_page);
} }
} else { } else {
os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable, commit_internal(start_page, end_page);
err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
} }
_committed.set_range(start, end); _committed.set_range(start_page, end_page);
if (AlwaysPreTouch) { if (AlwaysPreTouch) {
os::pretouch_memory(page_start(start), page_start(start) + byte_size_for_pages(size_in_pages)); pretouch_internal(start_page, end_page);
} }
return zero_filled; return zero_filled;
} }
void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) { void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
guarantee(is_area_committed(start, size_in_pages), "checking"); guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
char* start_addr = page_start(start_page);
os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
}
void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
guarantee(is_area_committed(start_page, size_in_pages), "checking");
size_t end_page = start_page + size_in_pages;
if (_special) { if (_special) {
// Mark that memory is dirty. If committed again the memory might // Mark that memory is dirty. If committed again the memory might
// need to be cleared explicitly. // need to be cleared explicitly.
_dirty.set_range(start, start + size_in_pages); _dirty.set_range(start_page, end_page);
} else { } else {
os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages)); uncommit_internal(start_page, end_page);
} }
_committed.clear_range(start, start + size_in_pages); _committed.clear_range(start_page, end_page);
} }
bool G1PageBasedVirtualSpace::contains(const void* p) const { bool G1PageBasedVirtualSpace::contains(const void* p) const {
...@@ -175,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) { ...@@ -175,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) {
out->cr(); out->cr();
out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
} }
void G1PageBasedVirtualSpace::print() { void G1PageBasedVirtualSpace::print() {
......
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
// granularity. // granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index // (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages. // and multiples of pages.
// For systems that only commits of memory in a given size (always greater than
// page size) the base address is required to be aligned to that page size.
// The actual size requested need not be aligned to that page size, but the size
// of the reservation passed may be rounded up to this page size. Any fragment
// (less than the page size) of the actual size at the tail of the request will
// be committed using OS small pages.
// The implementation gives an error when trying to commit or uncommit pages that // The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted. // have already been committed or uncommitted.
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
...@@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { ...@@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
char* _low_boundary; char* _low_boundary;
char* _high_boundary; char* _high_boundary;
// The commit/uncommit granularity in bytes. // The size of the tail in bytes of the handled space that needs to be committed
// using small pages.
size_t _tail_size;
// The preferred page size used for commit/uncommit in bytes.
size_t _page_size; size_t _page_size;
// Bitmap used for verification of commit/uncommit operations. // Bitmap used for verification of commit/uncommit operations.
...@@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { ...@@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
// Indicates whether the committed space should be executable. // Indicates whether the committed space should be executable.
bool _executable; bool _executable;
// Helper function for committing memory. Commit the given memory range by using
// _page_size pages as much as possible and the remainder with small sized pages.
void commit_internal(size_t start_page, size_t end_page);
// Commit num_pages pages of _page_size size starting from start. All argument
// checking has been performed.
void commit_preferred_pages(size_t start_page, size_t end_page);
// Commit space at the high end of the space that needs to be committed with small
// sized pages.
void commit_tail();
// Uncommit the given memory range.
void uncommit_internal(size_t start_page, size_t end_page);
// Pretouch the given memory range.
void pretouch_internal(size_t start_page, size_t end_page);
// Returns the index of the page which contains the given address. // Returns the index of the page which contains the given address.
uintptr_t addr_to_page_index(char* addr) const; uintptr_t addr_to_page_index(char* addr) const;
// Returns the address of the given page index. // Returns the address of the given page index.
char* page_start(uintptr_t index); char* page_start(size_t index) const;
// Returns the byte size of the given number of pages.
size_t byte_size_for_pages(size_t num); // Is the given page index the last page?
bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
// Is the given page index the first after last page?
bool is_after_last_page(size_t index) const;
// Is the last page only partially covered by this space?
bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
// Returns the end address of the given page bounded by the reserved space.
char* bounded_end_addr(size_t end_page) const;
// Returns true if the entire area is backed by committed memory. // Returns true if the entire area is backed by committed memory.
bool is_area_committed(uintptr_t start, size_t size_in_pages) const; bool is_area_committed(size_t start_page, size_t size_in_pages) const;
// Returns true if the entire area is not backed by committed memory. // Returns true if the entire area is not backed by committed memory.
bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const; bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
public: public:
// Commit the given area of pages starting at start being size_in_pages large. // Commit the given area of pages starting at start being size_in_pages large.
// Returns true if the given area is zero filled upon completion. // Returns true if the given area is zero filled upon completion.
bool commit(uintptr_t start, size_t size_in_pages); bool commit(size_t start_page, size_t size_in_pages);
// Uncommit the given area of pages starting at start being size_in_pages large. // Uncommit the given area of pages starting at start being size_in_pages large.
void uncommit(uintptr_t start, size_t size_in_pages); void uncommit(size_t start_page, size_t size_in_pages);
// Initialization // Initialize the given reserved space with the given base address and the size
G1PageBasedVirtualSpace(); // actually used.
bool initialize_with_granularity(ReservedSpace rs, size_t page_size); // Prefer to commit in page_size chunks.
G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
// Destruction // Destruction
~G1PageBasedVirtualSpace(); ~G1PageBasedVirtualSpace();
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -31,17 +31,16 @@ ...@@ -31,17 +31,16 @@
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t commit_granularity, size_t used_size,
size_t page_size,
size_t region_granularity, size_t region_granularity,
MemoryType type) : MemoryType type) :
_storage(), _storage(rs, used_size, page_size),
_commit_granularity(commit_granularity),
_region_granularity(region_granularity), _region_granularity(region_granularity),
_listener(NULL), _listener(NULL),
_commit_map() { _commit_map() {
guarantee(is_power_of_2(commit_granularity), "must be"); guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be"); guarantee(is_power_of_2(region_granularity), "must be");
_storage.initialize_with_granularity(rs, commit_granularity);
MemTracker::record_virtual_memory_type((address)rs.base(), type); MemTracker::record_virtual_memory_type((address)rs.base(), type);
} }
...@@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { ...@@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public: public:
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity, size_t actual_size,
size_t page_size,
size_t alloc_granularity, size_t alloc_granularity,
size_t commit_factor, size_t commit_factor,
MemoryType type) : MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
_pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
} }
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { virtual void commit_regions(uint start_idx, size_t num_regions) {
bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region); bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.set_range(start_idx, start_idx + num_regions); _commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions, zero_filled); fire_on_commit(start_idx, num_regions, zero_filled);
} }
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { virtual void uncommit_regions(uint start_idx, size_t num_regions) {
_storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region); _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions); _commit_map.clear_range(start_idx, start_idx + num_regions);
} }
}; };
...@@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { ...@@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public: public:
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity, size_t actual_size,
size_t page_size,
size_t alloc_granularity, size_t alloc_granularity,
size_t commit_factor, size_t commit_factor,
MemoryType type) : MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
_regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
} }
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { virtual void commit_regions(uint start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { for (uint i = start_idx; i < start_idx + num_regions; i++) {
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i)); assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
uintptr_t idx = region_idx_to_page_idx(i); size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx); uint old_refcount = _refcounts.get_by_index(idx);
bool zero_filled = false; bool zero_filled = false;
if (old_refcount == 0) { if (old_refcount == 0) {
...@@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { ...@@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
} }
} }
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { virtual void uncommit_regions(uint start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { for (uint i = start_idx; i < start_idx + num_regions; i++) {
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i)); assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
uintptr_t idx = region_idx_to_page_idx(i); size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx); uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be"); assert(old_refcount > 0, "must be");
if (old_refcount == 1) { if (old_refcount == 1) {
...@@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b ...@@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
} }
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t os_commit_granularity, size_t actual_size,
size_t page_size,
size_t region_granularity, size_t region_granularity,
size_t commit_factor, size_t commit_factor,
MemoryType type) { MemoryType type) {
if (region_granularity >= (os_commit_granularity * commit_factor)) { if (region_granularity >= (page_size * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} else { } else {
return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} }
} }
...@@ -46,12 +46,12 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> { ...@@ -46,12 +46,12 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
protected: protected:
// Backing storage. // Backing storage.
G1PageBasedVirtualSpace _storage; G1PageBasedVirtualSpace _storage;
size_t _commit_granularity;
size_t _region_granularity; size_t _region_granularity;
// Mapping management // Mapping management
BitMap _commit_map; BitMap _commit_map;
G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type); G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled); void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public: public:
...@@ -67,16 +67,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> { ...@@ -67,16 +67,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
return _commit_map.at(idx); return _commit_map.at(idx);
} }
virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters. // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
// The actual space to be used within the given reservation is given by actual_size.
// This is because some OSes need to round up the reservation size to guarantee
// alignment of page_size.
// The byte_translation_factor defines how many bytes in a region correspond to // The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for. // a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card // Eg. in the card table, this value corresponds to the size a single card
// table entry corresponds to. // table entry corresponds to in the heap.
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs, static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
size_t os_commit_granularity, size_t actual_size,
size_t page_size,
size_t region_granularity, size_t region_granularity,
size_t byte_translation_factor, size_t byte_translation_factor,
MemoryType type); MemoryType type);
......
...@@ -420,6 +420,7 @@ void FreeRegionList_test() { ...@@ -420,6 +420,7 @@ void FreeRegionList_test() {
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size())); ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage = G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs, G1RegionToSpaceMapper::create_mapper(bot_rs,
bot_rs.size(),
os::vm_page_size(), os::vm_page_size(),
HeapRegion::GrainBytes, HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes, G1BlockOffsetSharedArray::N_bytes,
......
...@@ -77,6 +77,10 @@ WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o)) ...@@ -77,6 +77,10 @@ WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o))
return os::vm_page_size(); return os::vm_page_size();
WB_END WB_END
WB_ENTRY(jlong, WB_GetVMLargePageSize(JNIEnv* env, jobject o))
return os::large_page_size();
WB_END
class WBIsKlassAliveClosure : public KlassClosure { class WBIsKlassAliveClosure : public KlassClosure {
Symbol* _name; Symbol* _name;
bool _found; bool _found;
...@@ -976,6 +980,7 @@ static JNINativeMethod methods[] = { ...@@ -976,6 +980,7 @@ static JNINativeMethod methods[] = {
{CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen }, {CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
{CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize }, {CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize },
{CC"getVMPageSize", CC"()I", (void*)&WB_GetVMPageSize }, {CC"getVMPageSize", CC"()I", (void*)&WB_GetVMPageSize },
{CC"getVMLargePageSize", CC"()J", (void*)&WB_GetVMLargePageSize},
{CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive }, {CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive },
{CC"classKnownToNotExist", {CC"classKnownToNotExist",
CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist}, CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist},
......
...@@ -52,13 +52,21 @@ ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ...@@ -52,13 +52,21 @@ ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
_alignment(0), _special(false), _executable(false) { _alignment(0), _special(false), _executable(false) {
} }
ReservedSpace::ReservedSpace(size_t size) { ReservedSpace::ReservedSpace(size_t size, bool prefer_large_pages) {
// Want to use large pages where possible and pad with small pages. // Want to use large pages where possible and pad with small pages.
size_t page_size = os::page_size_for_region_unaligned(size, 1); size_t page_size = os::page_size_for_region_unaligned(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size(); bool large_pages = page_size != (size_t)os::vm_page_size();
size_t alignment;
if (large_pages && prefer_large_pages) {
alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
// ReservedSpace initialization requires size to be aligned to the given
// alignment. Align the size up.
size = align_size_up(size, alignment);
} else {
// Don't force the alignment to be large page aligned, // Don't force the alignment to be large page aligned,
// since that will waste memory. // since that will waste memory.
size_t alignment = os::vm_allocation_granularity(); alignment = os::vm_allocation_granularity();
}
initialize(size, alignment, large_pages, NULL, 0, false); initialize(size, alignment, large_pages, NULL, 0, false);
} }
......
...@@ -54,7 +54,12 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC { ...@@ -54,7 +54,12 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
public: public:
// Constructor // Constructor
ReservedSpace(); ReservedSpace();
ReservedSpace(size_t size); // Initialize the reserved space with the given size. If prefer_large_pages is
// set, if the given size warrants use of large pages, try to force them by
// passing an alignment restriction further down. This may waste some space
// if the given size is not aligned, as the reservation will be aligned up
// to large page alignment.
ReservedSpace(size_t size, bool prefer_large_pages = false);
ReservedSpace(size_t size, size_t alignment, bool large, ReservedSpace(size_t size, size_t alignment, bool large,
char* requested_address = NULL, char* requested_address = NULL,
const size_t noaccess_prefix = 0); const size_t noaccess_prefix = 0);
......
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestLargePageUseForAuxMemory.java
* @bug 8058354
* @key gc
* @library /testlibrary /../../test/lib
* @requires (vm.gc=="G1" | vm.gc=="null")
* @build TestLargePageUseForAuxMemory
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @summary Test that auxiliary data structures are allocated using large pages if available.
* @run main/othervm -Xbootclasspath/a:. -XX:+UseG1GC -XX:+WhiteBoxAPI -XX:+IgnoreUnrecognizedVMOptions -XX:+UseLargePages TestLargePageUseForAuxMemory
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class TestLargePageUseForAuxMemory {
static final int HEAP_REGION_SIZE = 4 * 1024 * 1024;
static long largePageSize;
static long smallPageSize;
static void checkSmallTables(OutputAnalyzer output, long expectedPageSize) throws Exception {
output.shouldContain("G1 'Block offset table': pg_sz=" + expectedPageSize);
output.shouldContain("G1 'Card counts table': pg_sz=" + expectedPageSize);
}
static void checkBitmaps(OutputAnalyzer output, long expectedPageSize) throws Exception {
output.shouldContain("G1 'Prev Bitmap': pg_sz=" + expectedPageSize);
output.shouldContain("G1 'Next Bitmap': pg_sz=" + expectedPageSize);
}
static void testVM(long heapsize, boolean cardsShouldUseLargePages, boolean bitmapShouldUseLargePages) throws Exception {
ProcessBuilder pb;
// Test with large page enabled.
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
"-Xms" + 10 * HEAP_REGION_SIZE,
"-Xmx" + heapsize,
"-XX:+TracePageSizes",
"-XX:+UseLargePages",
"-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds
"-XX:ObjectAlignmentInBytes=8",
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
checkSmallTables(output, (cardsShouldUseLargePages ? largePageSize : smallPageSize));
checkBitmaps(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize));
output.shouldHaveExitValue(0);
// Test with large page disabled.
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
"-Xms" + 10 * HEAP_REGION_SIZE,
"-Xmx" + heapsize,
"-XX:+TracePageSizes",
"-XX:-UseLargePages",
"-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds
"-XX:ObjectAlignmentInBytes=8",
"-version");
output = new OutputAnalyzer(pb.start());
checkSmallTables(output, smallPageSize);
checkBitmaps(output, smallPageSize);
output.shouldHaveExitValue(0);
}
public static void main(String[] args) throws Exception {
if (!Platform.isDebugBuild()) {
System.out.println("Skip tests on non-debug builds because the required option TracePageSizes is a debug-only option.");
return;
}
WhiteBox wb = WhiteBox.getWhiteBox();
smallPageSize = wb.getVMPageSize();
largePageSize = wb.getVMLargePageSize();
if (largePageSize == 0) {
System.out.println("Skip tests because large page support does not seem to be available on this platform.");
return;
}
// To get large pages for the card table etc. we need at least a 1G heap (with 4k page size).
// 32 bit systems will have problems reserving such an amount of contiguous space, so skip the
// test there.
if (!Platform.is32bit()) {
// Size that a single card covers.
final int cardSize = 512;
final long heapSizeForCardTableUsingLargePages = largePageSize * cardSize;
testVM(heapSizeForCardTableUsingLargePages, true, true);
testVM(heapSizeForCardTableUsingLargePages + HEAP_REGION_SIZE, true, true);
testVM(heapSizeForCardTableUsingLargePages - HEAP_REGION_SIZE, false, true);
}
// Minimum heap requirement to get large pages for bitmaps is 128M heap. This seems okay to test
// everywhere.
final int bitmapTranslationFactor = 8 * 8; // ObjectAlignmentInBytes * BitsPerByte
final long heapSizeForBitmapUsingLargePages = largePageSize * bitmapTranslationFactor;
testVM(heapSizeForBitmapUsingLargePages, false, true);
testVM(heapSizeForBitmapUsingLargePages + HEAP_REGION_SIZE, false, true);
testVM(heapSizeForBitmapUsingLargePages - HEAP_REGION_SIZE, false, false);
}
}
...@@ -76,6 +76,8 @@ public class WhiteBox { ...@@ -76,6 +76,8 @@ public class WhiteBox {
public native long getObjectAddress(Object o); public native long getObjectAddress(Object o);
public native int getHeapOopSize(); public native int getHeapOopSize();
public native int getVMPageSize(); public native int getVMPageSize();
public native long getVMLargePageSize();
public native boolean isObjectInOldGen(Object o); public native boolean isObjectInOldGen(Object o);
public native long getObjectSize(Object o); public native long getObjectSize(Object o);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册