提交 b6c254c4 编写于 作者: C coleenp

8024927: Nashorn performance regression with CompressedOops

Summary: Allocate compressed class space at end of Java heap.  For small heap sizes, without CDS, save some space so compressed classes can have the same favorable compression as oops
Reviewed-by: stefank, hseigel, goetz
上级 cf41cce1
...@@ -4096,15 +4096,19 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { ...@@ -4096,15 +4096,19 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
void MacroAssembler::encode_klass_not_null(Register r) { void MacroAssembler::encode_klass_not_null(Register r) {
assert (UseCompressedClassPointers, "must be compressed"); assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); if (Universe::narrow_klass_base() != NULL) {
assert(r != G6_heapbase, "bad register choice"); assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
sub(r, G6_heapbase, r); sub(r, G6_heapbase, r);
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
srlx(r, LogKlassAlignmentInBytes, r); srlx(r, LogKlassAlignmentInBytes, r);
}
reinit_heapbase();
} else {
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
srlx(r, Universe::narrow_klass_shift(), r);
} }
reinit_heapbase();
} }
void MacroAssembler::encode_klass_not_null(Register src, Register dst) { void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
...@@ -4112,11 +4116,16 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) { ...@@ -4112,11 +4116,16 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
encode_klass_not_null(src); encode_klass_not_null(src);
} else { } else {
assert (UseCompressedClassPointers, "must be compressed"); assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); if (Universe::narrow_klass_base() != NULL) {
set((intptr_t)Universe::narrow_klass_base(), dst); set((intptr_t)Universe::narrow_klass_base(), dst);
sub(src, dst, dst); sub(src, dst, dst);
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
srlx(dst, LogKlassAlignmentInBytes, dst); srlx(dst, LogKlassAlignmentInBytes, dst);
}
} else {
// shift src into dst
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
srlx(src, Universe::narrow_klass_shift(), dst);
} }
} }
} }
...@@ -4126,14 +4135,16 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) { ...@@ -4126,14 +4135,16 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
// the instructions they generate change, then this method needs to be updated. // the instructions they generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() { int MacroAssembler::instr_size_for_decode_klass_not_null() {
assert (UseCompressedClassPointers, "only for compressed klass ptrs"); assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// set + add + set int num_instrs = 1; // shift src,dst or add
int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + if (Universe::narrow_klass_base() != NULL) {
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); // set + add + set
if (Universe::narrow_klass_shift() == 0) { num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) +
return num_instrs * BytesPerInstWord; insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
} else { // sllx if (Universe::narrow_klass_shift() != 0) {
return (num_instrs + 1) * BytesPerInstWord; num_instrs += 1; // sllx
}
} }
return num_instrs * BytesPerInstWord;
} }
// !!! If the instructions that get generated here change then function // !!! If the instructions that get generated here change then function
...@@ -4142,13 +4153,17 @@ void MacroAssembler::decode_klass_not_null(Register r) { ...@@ -4142,13 +4153,17 @@ void MacroAssembler::decode_klass_not_null(Register r) {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp // Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit. // pd_code_size_limit.
assert (UseCompressedClassPointers, "must be compressed"); assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); if (Universe::narrow_klass_base() != NULL) {
assert(r != G6_heapbase, "bad register choice"); assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
if (Universe::narrow_klass_shift() != 0) if (Universe::narrow_klass_shift() != 0)
sllx(r, LogKlassAlignmentInBytes, r); sllx(r, LogKlassAlignmentInBytes, r);
add(r, G6_heapbase, r); add(r, G6_heapbase, r);
reinit_heapbase(); reinit_heapbase();
} else {
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
sllx(r, Universe::narrow_klass_shift(), r);
}
} }
void MacroAssembler::decode_klass_not_null(Register src, Register dst) { void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
...@@ -4158,16 +4173,21 @@ void MacroAssembler::decode_klass_not_null(Register src, Register dst) { ...@@ -4158,16 +4173,21 @@ void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp // Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit. // pd_code_size_limit.
assert (UseCompressedClassPointers, "must be compressed"); assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); if (Universe::narrow_klass_base() != NULL) {
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
sllx(src, LogKlassAlignmentInBytes, dst); sllx(src, LogKlassAlignmentInBytes, dst);
add(dst, G6_heapbase, dst); add(dst, G6_heapbase, dst);
reinit_heapbase(); reinit_heapbase();
} else {
set((intptr_t)Universe::narrow_klass_base(), dst);
add(src, dst, dst);
}
} else { } else {
set((intptr_t)Universe::narrow_klass_base(), dst); // shift/mov src into dst.
add(src, dst, dst); assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
sllx(src, Universe::narrow_klass_shift(), dst);
} }
} }
} }
......
...@@ -1660,12 +1660,16 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { ...@@ -1660,12 +1660,16 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized"); assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); if (Universe::narrow_klass_base() != 0) {
if (Universe::narrow_klass_shift() != 0) { st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
st->print_cr("\tSLL R_G5,3,R_G5"); if (Universe::narrow_klass_shift() != 0) {
st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
}
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
} else {
st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
} }
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
} else { } else {
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
} }
......
...@@ -5049,25 +5049,32 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { ...@@ -5049,25 +5049,32 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
} }
void MacroAssembler::encode_klass_not_null(Register r) { void MacroAssembler::encode_klass_not_null(Register r) {
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); if (Universe::narrow_klass_base() != NULL) {
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base. // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
assert(r != r12_heapbase, "Encoding a klass in r12"); assert(r != r12_heapbase, "Encoding a klass in r12");
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
subq(r, r12_heapbase); subq(r, r12_heapbase);
}
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
shrq(r, LogKlassAlignmentInBytes); shrq(r, LogKlassAlignmentInBytes);
} }
reinit_heapbase(); if (Universe::narrow_klass_base() != NULL) {
reinit_heapbase();
}
} }
void MacroAssembler::encode_klass_not_null(Register dst, Register src) { void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
if (dst == src) { if (dst == src) {
encode_klass_not_null(src); encode_klass_not_null(src);
} else { } else {
mov64(dst, (int64_t)Universe::narrow_klass_base()); if (Universe::narrow_klass_base() != NULL) {
negq(dst); mov64(dst, (int64_t)Universe::narrow_klass_base());
addq(dst, src); negq(dst);
addq(dst, src);
} else {
movptr(dst, src);
}
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
shrq(dst, LogKlassAlignmentInBytes); shrq(dst, LogKlassAlignmentInBytes);
...@@ -5081,15 +5088,19 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { ...@@ -5081,15 +5088,19 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
// generate change, then this method needs to be updated. // generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() { int MacroAssembler::instr_size_for_decode_klass_not_null() {
assert (UseCompressedClassPointers, "only for compressed klass ptrs"); assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()). if (Universe::narrow_klass_base() != NULL) {
return (Universe::narrow_klass_shift() == 0 ? 20 : 24); // mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
} else {
// longest load decode klass function, mov64, leaq
return 16;
}
} }
// !!! If the instructions that get generated here change then function // !!! If the instructions that get generated here change then function
// instr_size_for_decode_klass_not_null() needs to get updated. // instr_size_for_decode_klass_not_null() needs to get updated.
void MacroAssembler::decode_klass_not_null(Register r) { void MacroAssembler::decode_klass_not_null(Register r) {
// Note: it will change flags // Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert(r != r12_heapbase, "Decoding a klass in r12"); assert(r != r12_heapbase, "Decoding a klass in r12");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
...@@ -5100,14 +5111,15 @@ void MacroAssembler::decode_klass_not_null(Register r) { ...@@ -5100,14 +5111,15 @@ void MacroAssembler::decode_klass_not_null(Register r) {
shlq(r, LogKlassAlignmentInBytes); shlq(r, LogKlassAlignmentInBytes);
} }
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base. // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); if (Universe::narrow_klass_base() != NULL) {
addq(r, r12_heapbase); mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
reinit_heapbase(); addq(r, r12_heapbase);
reinit_heapbase();
}
} }
void MacroAssembler::decode_klass_not_null(Register dst, Register src) { void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
// Note: it will change flags // Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (dst == src) { if (dst == src) {
decode_klass_not_null(dst); decode_klass_not_null(dst);
...@@ -5115,7 +5127,6 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { ...@@ -5115,7 +5127,6 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
mov64(dst, (int64_t)Universe::narrow_klass_base()); mov64(dst, (int64_t)Universe::narrow_klass_base());
if (Universe::narrow_klass_shift() != 0) { if (Universe::narrow_klass_shift() != 0) {
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
......
...@@ -56,7 +56,7 @@ size_t const allocation_from_dictionary_limit = 4 * K; ...@@ -56,7 +56,7 @@ size_t const allocation_from_dictionary_limit = 4 * K;
MetaWord* last_allocated = 0; MetaWord* last_allocated = 0;
size_t Metaspace::_class_metaspace_size; size_t Metaspace::_compressed_class_space_size;
// Used in declarations in SpaceManager and ChunkManager // Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex { enum ChunkIndex {
...@@ -2799,6 +2799,8 @@ ChunkManager* Metaspace::_chunk_manager_class = NULL; ...@@ -2799,6 +2799,8 @@ ChunkManager* Metaspace::_chunk_manager_class = NULL;
#define VIRTUALSPACEMULTIPLIER 2 #define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64 #ifdef _LP64
static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
// Figure out the narrow_klass_base and the narrow_klass_shift. The // Figure out the narrow_klass_base and the narrow_klass_shift. The
// narrow_klass_base is the lower of the metaspace base and the cds base // narrow_klass_base is the lower of the metaspace base and the cds base
...@@ -2808,14 +2810,22 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address ...@@ -2808,14 +2810,22 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address
address higher_address; address higher_address;
if (UseSharedSpaces) { if (UseSharedSpaces) {
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + class_metaspace_size())); (address)(metaspace_base + compressed_class_space_size()));
lower_base = MIN2(metaspace_base, cds_base); lower_base = MIN2(metaspace_base, cds_base);
} else { } else {
higher_address = metaspace_base + class_metaspace_size(); higher_address = metaspace_base + compressed_class_space_size();
lower_base = metaspace_base; lower_base = metaspace_base;
uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
// If compressed class space fits in lower 32G, we don't need a base.
if (higher_address <= (address)klass_encoding_max) {
lower_base = 0; // effectively lower base is zero.
}
} }
Universe::set_narrow_klass_base(lower_base); Universe::set_narrow_klass_base(lower_base);
if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
if ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax) {
Universe::set_narrow_klass_shift(0); Universe::set_narrow_klass_shift(0);
} else { } else {
assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
...@@ -2830,24 +2840,24 @@ bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cd ...@@ -2830,24 +2840,24 @@ bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cd
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
address lower_base = MIN2((address)metaspace_base, cds_base); address lower_base = MIN2((address)metaspace_base, cds_base);
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + class_metaspace_size())); (address)(metaspace_base + compressed_class_space_size()));
return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); return ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax);
} }
// Try to allocate the metaspace at the requested addr. // Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
assert(using_class_space(), "called improperly"); assert(using_class_space(), "called improperly");
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
assert(class_metaspace_size() < KlassEncodingMetaspaceMax, assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
"Metaspace size is too big"); "Metaspace size is too big");
assert_is_ptr_aligned(requested_addr, _reserve_alignment); assert_is_ptr_aligned(requested_addr, _reserve_alignment);
assert_is_ptr_aligned(cds_base, _reserve_alignment); assert_is_ptr_aligned(cds_base, _reserve_alignment);
assert_is_size_aligned(class_metaspace_size(), _reserve_alignment); assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
// Don't use large pages for the class space. // Don't use large pages for the class space.
bool large_pages = false; bool large_pages = false;
ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment, _reserve_alignment,
large_pages, large_pages,
requested_addr, 0); requested_addr, 0);
...@@ -2862,7 +2872,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a ...@@ -2862,7 +2872,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
while (!metaspace_rs.is_reserved() && (addr + increment > addr) && while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
addr = addr + increment; addr = addr + increment;
metaspace_rs = ReservedSpace(class_metaspace_size(), metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment, large_pages, addr, 0); _reserve_alignment, large_pages, addr, 0);
} }
} }
...@@ -2873,11 +2883,11 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a ...@@ -2873,11 +2883,11 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// initialization has happened that depends on UseCompressedClassPointers. // initialization has happened that depends on UseCompressedClassPointers.
// So, UseCompressedClassPointers cannot be turned off at this point. // So, UseCompressedClassPointers cannot be turned off at this point.
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
metaspace_rs = ReservedSpace(class_metaspace_size(), metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment, large_pages); _reserve_alignment, large_pages);
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
class_metaspace_size())); compressed_class_space_size()));
} }
} }
} }
...@@ -2899,8 +2909,8 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a ...@@ -2899,8 +2909,8 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
Universe::narrow_klass_base(), Universe::narrow_klass_shift()); Universe::narrow_klass_base(), Universe::narrow_klass_shift());
gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
class_metaspace_size(), metaspace_rs.base(), requested_addr); compressed_class_space_size(), metaspace_rs.base(), requested_addr);
} }
} }
...@@ -2966,7 +2976,7 @@ void Metaspace::ergo_initialize() { ...@@ -2966,7 +2976,7 @@ void Metaspace::ergo_initialize() {
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment); MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment); CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
set_class_metaspace_size(CompressedClassSpaceSize); set_compressed_class_space_size(CompressedClassSpaceSize);
} }
void Metaspace::global_initialize() { void Metaspace::global_initialize() {
...@@ -2995,12 +3005,12 @@ void Metaspace::global_initialize() { ...@@ -2995,12 +3005,12 @@ void Metaspace::global_initialize() {
} }
#ifdef _LP64 #ifdef _LP64
if (cds_total + class_metaspace_size() > (uint64_t)max_juint) { if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
vm_exit_during_initialization("Unable to dump shared archive.", vm_exit_during_initialization("Unable to dump shared archive.",
err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
"klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(), "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
cds_total + class_metaspace_size(), (size_t)max_juint)); cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
} }
// Set the compressed klass pointer base so that decoding of these pointers works // Set the compressed klass pointer base so that decoding of these pointers works
...@@ -3048,7 +3058,8 @@ void Metaspace::global_initialize() { ...@@ -3048,7 +3058,8 @@ void Metaspace::global_initialize() {
cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
} else { } else {
allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(base, 0);
} }
} }
#endif #endif
......
...@@ -115,13 +115,13 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -115,13 +115,13 @@ class Metaspace : public CHeapObj<mtClass> {
static size_t align_word_size_up(size_t); static size_t align_word_size_up(size_t);
// Aligned size of the metaspace. // Aligned size of the metaspace.
static size_t _class_metaspace_size; static size_t _compressed_class_space_size;
static size_t class_metaspace_size() { static size_t compressed_class_space_size() {
return _class_metaspace_size; return _compressed_class_space_size;
} }
static void set_class_metaspace_size(size_t metaspace_size) { static void set_compressed_class_space_size(size_t size) {
_class_metaspace_size = metaspace_size; _compressed_class_space_size = size;
} }
static size_t _first_chunk_word_size; static size_t _first_chunk_word_size;
......
...@@ -677,13 +677,13 @@ jint universe_init() { ...@@ -677,13 +677,13 @@ jint universe_init() {
// HeapBased - Use compressed oops with heap base + encoding. // HeapBased - Use compressed oops with heap base + encoding.
// 4Gb // 4Gb
static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
// 32Gb // 32Gb
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be"); assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
assert(is_size_aligned(heap_size, alignment), "Must be"); assert(is_size_aligned(heap_size, alignment), "Must be");
uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
...@@ -702,20 +702,40 @@ char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_O ...@@ -702,20 +702,40 @@ char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_O
// If the total size is small enough to allow UnscaledNarrowOop then // If the total size is small enough to allow UnscaledNarrowOop then
// just use UnscaledNarrowOop. // just use UnscaledNarrowOop.
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
(Universe::narrow_oop_shift() == 0)) { (Universe::narrow_oop_shift() == 0)) {
// Use 32-bits oops without encoding and // Use 32-bits oops without encoding and
// place heap's top on the 4Gb boundary // place heap's top on the 4Gb boundary
base = (NarrowOopHeapMax - heap_size); base = (UnscaledOopHeapMax - heap_size);
} else { } else {
// Can't reserve with NarrowOopShift == 0 // Can't reserve with NarrowOopShift == 0
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
if (mode == UnscaledNarrowOop || if (mode == UnscaledNarrowOop ||
mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) { mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
// Use zero based compressed oops with encoding and // Use zero based compressed oops with encoding and
// place heap's top on the 32Gb boundary in case // place heap's top on the 32Gb boundary in case
// total_size > 4Gb or failed to reserve below 4Gb. // total_size > 4Gb or failed to reserve below 4Gb.
base = (OopEncodingHeapMax - heap_size); uint64_t heap_top = OopEncodingHeapMax;
// For small heaps, save some space for compressed class pointer
// space so it can be decoded with no base.
if (UseCompressedClassPointers && !UseSharedSpaces &&
OopEncodingHeapMax <= 32*G) {
uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
alignment), "difference must be aligned too");
uint64_t new_top = OopEncodingHeapMax-class_space;
if (total_size <= new_top) {
heap_top = new_top;
}
}
// Align base to the adjusted top of the heap
base = heap_top - heap_size;
} }
} }
} else { } else {
...@@ -737,7 +757,7 @@ char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_O ...@@ -737,7 +757,7 @@ char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_O
// Set to a non-NULL value so the ReservedSpace ctor computes // Set to a non-NULL value so the ReservedSpace ctor computes
// the correct no-access prefix. // the correct no-access prefix.
// The final value will be set in initialize_heap() below. // The final value will be set in initialize_heap() below.
Universe::set_narrow_oop_base((address)NarrowOopHeapMax); Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
#ifdef _WIN64 #ifdef _WIN64
if (UseLargePages) { if (UseLargePages) {
// Cannot allocate guard pages for implicit checks in indexed // Cannot allocate guard pages for implicit checks in indexed
...@@ -833,7 +853,7 @@ jint Universe::initialize_heap() { ...@@ -833,7 +853,7 @@ jint Universe::initialize_heap() {
Universe::set_narrow_oop_use_implicit_null_checks(true); Universe::set_narrow_oop_use_implicit_null_checks(true);
} }
#endif // _WIN64 #endif // _WIN64
if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) { if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
// Can't reserve heap below 4Gb. // Can't reserve heap below 4Gb.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
} else { } else {
......
...@@ -368,8 +368,6 @@ const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize; ...@@ -368,8 +368,6 @@ const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize;
// Klass encoding metaspace max size // Klass encoding metaspace max size
const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G
// Machine dependent stuff // Machine dependent stuff
#ifdef TARGET_ARCH_x86 #ifdef TARGET_ARCH_x86
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8024927
* @summary Testing address of compressed class pointer space as best as possible.
* @library /testlibrary
*/
import com.oracle.java.testlibrary.*;
public class CompressedClassPointers {
public static void smallHeapTest() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedBaseAddress=8g",
"-Xmx128m",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base: 0x0000000000000000");
output.shouldHaveExitValue(0);
}
public static void smallHeapTestWith3G() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:CompressedClassSpaceSize=3g",
"-Xmx128m",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base: 0x0000000000000000, Narrow klass shift: 3");
output.shouldHaveExitValue(0);
}
public static void largeHeapTest() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-Xmx30g",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("Narrow klass base: 0x0000000000000000");
output.shouldContain("Narrow klass shift: 0");
output.shouldHaveExitValue(0);
}
public static void largePagesTest() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-Xmx128m",
"-XX:+UseLargePages",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base:");
output.shouldHaveExitValue(0);
}
public static void sharingTest() throws Exception {
// Test small heaps
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa",
"-Xmx128m",
"-XX:SharedBaseAddress=8g",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC",
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa",
"-Xmx128m",
"-XX:SharedBaseAddress=8g",
"-XX:+PrintCompressedOopsMode",
"-Xshare:on",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
}
}
public static void main(String[] args) throws Exception {
if (!Platform.is64bit()) {
// Can't test this on 32 bit, just pass
System.out.println("Skipping test on 32bit");
return;
}
// Solaris 10 can't mmap compressed oops space without a base
if (Platform.isSolaris()) {
String name = System.getProperty("os.version");
if (name.equals("5.10")) {
System.out.println("Skipping test on Solaris 10");
return;
}
}
smallHeapTest();
smallHeapTestWith3G();
largeHeapTest();
largePagesTest();
sharingTest();
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册