提交 dc0c6049 编写于 作者: A amurillo

Merge

...@@ -272,9 +272,10 @@ public class Bytecodes { ...@@ -272,9 +272,10 @@ public class Bytecodes {
public static final int _fast_aldc = 229; public static final int _fast_aldc = 229;
public static final int _fast_aldc_w = 230; public static final int _fast_aldc_w = 230;
public static final int _return_register_finalizer = 231; public static final int _return_register_finalizer = 231;
public static final int _shouldnotreachhere = 232; // For debugging public static final int _invokehandle = 232;
public static final int _shouldnotreachhere = 233; // For debugging
public static final int number_of_codes = 233; public static final int number_of_codes = 234;
// Flag bits derived from format strings, can_trap, can_rewrite, etc.: // Flag bits derived from format strings, can_trap, can_rewrite, etc.:
// semantic flags: // semantic flags:
...@@ -787,20 +788,22 @@ public class Bytecodes { ...@@ -787,20 +788,22 @@ public class Bytecodes {
def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 );
def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 );
def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload); def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload );
def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload); def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload );
def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload); def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload );
// Faster method invocation. // Faster method invocation.
def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual );
def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch );
def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch );
def(_fast_aldc , "fast_aldc" , "bj" , null , BasicType.getTObject(), 1, true, _ldc );
def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , null , BasicType.getTObject(), 1, true, _ldc_w );
def(_return_register_finalizer, "return_register_finalizer", "b" , null , BasicType.getTVoid() , 0, true, _return ); def(_return_register_finalizer, "return_register_finalizer", "b" , null , BasicType.getTVoid() , 0, true, _return );
def(_fast_aldc , "fast_aldc" , "bj" , null , BasicType.getTObject(), 1, true, _ldc ); // special handling of signature-polymorphic methods
def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , null , BasicType.getTObject(), 1, true, _ldc_w ); def(_invokehandle , "invokehandle" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual );
def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false); def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false);
......
...@@ -30,24 +30,10 @@ import sun.jvm.hotspot.utilities.PlatformInfo; ...@@ -30,24 +30,10 @@ import sun.jvm.hotspot.utilities.PlatformInfo;
/** Encapsulates some byte-swapping operations defined in the VM */ /** Encapsulates some byte-swapping operations defined in the VM */
public class Bytes { public class Bytes {
// swap if client platform is different from server's.
private boolean swap; private boolean swap;
public Bytes(MachineDescription machDesc) { public Bytes(MachineDescription machDesc) {
String cpu = PlatformInfo.getCPU(); swap = !machDesc.isBigEndian();
if (cpu.equals("sparc")) {
if (machDesc.isBigEndian()) {
swap = false;
} else {
swap = true;
}
} else { // intel
if (machDesc.isBigEndian()) {
swap = true;
} else {
swap = false;
}
}
} }
/** Should only swap if the hardware's underlying byte order is /** Should only swap if the hardware's underlying byte order is
......
...@@ -29,6 +29,11 @@ import sun.jvm.hotspot.interpreter.*; ...@@ -29,6 +29,11 @@ import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.AccessControlContext;
import java.security.PrivilegedExceptionAction;
import java.security.PrivilegedActionException;
public class ByteCodeRewriter public class ByteCodeRewriter
{ {
...@@ -38,8 +43,20 @@ public class ByteCodeRewriter ...@@ -38,8 +43,20 @@ public class ByteCodeRewriter
private byte[] code; private byte[] code;
private Bytes bytes; private Bytes bytes;
public static final boolean DEBUG = false;
private static final int jintSize = 4; private static final int jintSize = 4;
public static final boolean DEBUG;
static {
String debug = (String) AccessController.doPrivileged(
new PrivilegedAction() {
public Object run() {
return System.getProperty("sun.jvm.hotspot.tools.jcore.ByteCodeRewriter.DEBUG");
}
}
);
DEBUG = (debug != null ? debug.equalsIgnoreCase("true") : false);
}
protected void debugMessage(String message) { protected void debugMessage(String message) {
System.out.println(message); System.out.println(message);
...@@ -54,6 +71,18 @@ public class ByteCodeRewriter ...@@ -54,6 +71,18 @@ public class ByteCodeRewriter
} }
protected short getConstantPoolIndexFromRefMap(int rawcode, int bci) {
int refIndex;
String fmt = Bytecodes.format(rawcode);
switch (fmt.length()) {
case 2: refIndex = 0xFF & method.getBytecodeByteArg(bci); break;
case 3: refIndex = 0xFFFF & bytes.swapShort(method.getBytecodeShortArg(bci)); break;
default: throw new IllegalArgumentException();
}
return (short)cpool.objectToCPIndex(refIndex);
}
protected short getConstantPoolIndex(int rawcode, int bci) { protected short getConstantPoolIndex(int rawcode, int bci) {
// get ConstantPool index from ConstantPoolCacheIndex at given bci // get ConstantPool index from ConstantPoolCacheIndex at given bci
String fmt = Bytecodes.format(rawcode); String fmt = Bytecodes.format(rawcode);
...@@ -95,6 +124,12 @@ public class ByteCodeRewriter ...@@ -95,6 +124,12 @@ public class ByteCodeRewriter
int hotspotcode = Bytecodes._illegal; int hotspotcode = Bytecodes._illegal;
int len = 0; int len = 0;
if (DEBUG) {
String msg = method.getMethodHolder().getName().asString() + "." +
method.getName().asString() +
method.getSignature().asString();
debugMessage(msg);
}
for (int bci = 0; bci < code.length;) { for (int bci = 0; bci < code.length;) {
hotspotcode = Bytecodes.codeAt(method, bci); hotspotcode = Bytecodes.codeAt(method, bci);
bytecode = Bytecodes.javaCode(hotspotcode); bytecode = Bytecodes.javaCode(hotspotcode);
...@@ -133,15 +168,15 @@ public class ByteCodeRewriter ...@@ -133,15 +168,15 @@ public class ByteCodeRewriter
case Bytecodes._ldc_w: case Bytecodes._ldc_w:
if (hotspotcode != bytecode) { if (hotspotcode != bytecode) {
// fast_aldc_w puts constant in CP cache // fast_aldc_w puts constant in reference map
cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); cpoolIndex = getConstantPoolIndexFromRefMap(hotspotcode, bci + 1);
writeShort(code, bci + 1, cpoolIndex); writeShort(code, bci + 1, cpoolIndex);
} }
break; break;
case Bytecodes._ldc: case Bytecodes._ldc:
if (hotspotcode != bytecode) { if (hotspotcode != bytecode) {
// fast_aldc puts constant in CP cache // fast_aldc puts constant in reference map
cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); cpoolIndex = getConstantPoolIndexFromRefMap(hotspotcode, bci + 1);
code[bci + 1] = (byte)(cpoolIndex); code[bci + 1] = (byte)(cpoolIndex);
} }
break; break;
......
...@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false) ...@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false)
CXXFLAGS += -DSERIALGC CXXFLAGS += -DSERIALGC
CFLAGS += -DSERIALGC CFLAGS += -DSERIALGC
Src_Files_EXCLUDE += \ Src_Files_EXCLUDE += \
binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \ cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \ cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \ concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \ freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \ concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \ dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \ g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
......
...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=06 HS_BUILD_NUMBER=07
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -1007,6 +1007,67 @@ void Assembler::addss(XMMRegister dst, Address src) { ...@@ -1007,6 +1007,67 @@ void Assembler::addss(XMMRegister dst, Address src) {
emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
} }
void Assembler::aesdec(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xde);
emit_operand(dst, src);
}
void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xde);
emit_byte(0xC0 | encode);
}
void Assembler::aesdeclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdf);
emit_operand(dst, src);
}
void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdf);
emit_byte(0xC0 | encode);
}
void Assembler::aesenc(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdc);
emit_operand(dst, src);
}
void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdc);
emit_byte(0xC0 | encode);
}
void Assembler::aesenclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdd);
emit_operand(dst, src);
}
void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdd);
emit_byte(0xC0 | encode);
}
void Assembler::andl(Address dst, int32_t imm32) { void Assembler::andl(Address dst, int32_t imm32) {
InstructionMark im(this); InstructionMark im(this);
prefix(dst); prefix(dst);
...@@ -2307,6 +2368,22 @@ void Assembler::prefix(Prefix p) { ...@@ -2307,6 +2368,22 @@ void Assembler::prefix(Prefix p) {
a_byte(p); a_byte(p);
} }
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_ssse3(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0x00);
emit_byte(0xC0 | encode);
}
void Assembler::pshufb(XMMRegister dst, Address src) {
assert(VM_Version::supports_ssse3(), "");
assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0x00);
emit_operand(dst, src);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
assert(isByte(mode), "invalid value"); assert(isByte(mode), "invalid value");
NOT_LP64(assert(VM_Version::supports_sse2(), "")); NOT_LP64(assert(VM_Version::supports_sse2(), ""));
...@@ -8067,6 +8144,15 @@ void MacroAssembler::movptr(Address dst, Register src) { ...@@ -8067,6 +8144,15 @@ void MacroAssembler::movptr(Address dst, Register src) {
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
} }
void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::movdqu(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::movdqu(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) { if (reachable(src)) {
Assembler::movsd(dst, as_Address(src)); Assembler::movsd(dst, as_Address(src));
...@@ -8357,6 +8443,17 @@ void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { ...@@ -8357,6 +8443,17 @@ void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
} }
} }
void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
// Used in sign-bit flipping with aligned address.
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
if (reachable(src)) {
Assembler::pshufb(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::pshufb(dst, Address(rscratch1, 0));
}
}
// AVX 3-operands instructions // AVX 3-operands instructions
void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
......
...@@ -875,6 +875,17 @@ private: ...@@ -875,6 +875,17 @@ private:
void addss(XMMRegister dst, Address src); void addss(XMMRegister dst, Address src);
void addss(XMMRegister dst, XMMRegister src); void addss(XMMRegister dst, XMMRegister src);
// AES instructions
void aesdec(XMMRegister dst, Address src);
void aesdec(XMMRegister dst, XMMRegister src);
void aesdeclast(XMMRegister dst, Address src);
void aesdeclast(XMMRegister dst, XMMRegister src);
void aesenc(XMMRegister dst, Address src);
void aesenc(XMMRegister dst, XMMRegister src);
void aesenclast(XMMRegister dst, Address src);
void aesenclast(XMMRegister dst, XMMRegister src);
void andl(Address dst, int32_t imm32); void andl(Address dst, int32_t imm32);
void andl(Register dst, int32_t imm32); void andl(Register dst, int32_t imm32);
void andl(Register dst, Address src); void andl(Register dst, Address src);
...@@ -1424,6 +1435,10 @@ private: ...@@ -1424,6 +1435,10 @@ private:
void prefetcht2(Address src); void prefetcht2(Address src);
void prefetchw(Address src); void prefetchw(Address src);
// Shuffle Bytes
void pshufb(XMMRegister dst, XMMRegister src);
void pshufb(XMMRegister dst, Address src);
// Shuffle Packed Doublewords // Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode); void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode); void pshufd(XMMRegister dst, Address src, int mode);
...@@ -2611,6 +2626,12 @@ public: ...@@ -2611,6 +2626,12 @@ public:
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, AddressLiteral src); void divss(XMMRegister dst, AddressLiteral src);
// Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, AddressLiteral src);
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
...@@ -2658,6 +2679,10 @@ public: ...@@ -2658,6 +2679,10 @@ public:
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src); void xorps(XMMRegister dst, AddressLiteral src);
// Shuffle Bytes
void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
void pshufb(XMMRegister dst, AddressLiteral src);
// AVX 3-operands instructions // AVX 3-operands instructions
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
......
...@@ -44,3 +44,4 @@ ...@@ -44,3 +44,4 @@
address StubRoutines::x86::_verify_mxcsr_entry = NULL; address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL; address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
...@@ -41,10 +41,14 @@ class x86 { ...@@ -41,10 +41,14 @@ class x86 {
private: private:
static address _verify_mxcsr_entry; static address _verify_mxcsr_entry;
static address _verify_fpu_cntrl_wrd_entry; static address _verify_fpu_cntrl_wrd_entry;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public: public:
static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } static address verify_mxcsr_entry() { return _verify_mxcsr_entry; }
static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; } static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
}; };
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
......
...@@ -56,3 +56,4 @@ address StubRoutines::x86::_float_sign_flip = NULL; ...@@ -56,3 +56,4 @@ address StubRoutines::x86::_float_sign_flip = NULL;
address StubRoutines::x86::_double_sign_mask = NULL; address StubRoutines::x86::_double_sign_mask = NULL;
address StubRoutines::x86::_double_sign_flip = NULL; address StubRoutines::x86::_double_sign_flip = NULL;
address StubRoutines::x86::_mxcsr_std = NULL; address StubRoutines::x86::_mxcsr_std = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
...@@ -54,6 +54,8 @@ class x86 { ...@@ -54,6 +54,8 @@ class x86 {
static address _double_sign_mask; static address _double_sign_mask;
static address _double_sign_flip; static address _double_sign_flip;
static address _mxcsr_std; static address _mxcsr_std;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public: public:
...@@ -116,6 +118,9 @@ class x86 { ...@@ -116,6 +118,9 @@ class x86 {
{ {
return _mxcsr_std; return _mxcsr_std;
} }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
}; };
#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP #endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
...@@ -419,13 +419,16 @@ void VM_Version::get_processor_features() { ...@@ -419,13 +419,16 @@ void VM_Version::get_processor_features() {
if (UseAVX < 1) if (UseAVX < 1)
_cpuFeatures &= ~CPU_AVX; _cpuFeatures &= ~CPU_AVX;
if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
_cpuFeatures &= ~CPU_AES;
if (logical_processors_per_package() == 1) { if (logical_processors_per_package() == 1) {
// HT processor could be installed on a system which doesn't support HT. // HT processor could be installed on a system which doesn't support HT.
_cpuFeatures &= ~CPU_HT; _cpuFeatures &= ~CPU_HT;
} }
char buf[256]; char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(), cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping, cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""), (supports_cmov() ? ", cmov" : ""),
...@@ -441,6 +444,7 @@ void VM_Version::get_processor_features() { ...@@ -441,6 +444,7 @@ void VM_Version::get_processor_features() {
(supports_popcnt() ? ", popcnt" : ""), (supports_popcnt() ? ", popcnt" : ""),
(supports_avx() ? ", avx" : ""), (supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""), (supports_avx2() ? ", avx2" : ""),
(supports_aes() ? ", aes" : ""),
(supports_mmx_ext() ? ", mmxext" : ""), (supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""), (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""), (supports_lzcnt() ? ", lzcnt": ""),
...@@ -472,6 +476,29 @@ void VM_Version::get_processor_features() { ...@@ -472,6 +476,29 @@ void VM_Version::get_processor_features() {
if (!supports_avx ()) // Drop to 0 if no AVX support if (!supports_avx ()) // Drop to 0 if no AVX support
UseAVX = 0; UseAVX = 0;
// Use AES instructions if available.
if (supports_aes()) {
if (FLAG_IS_DEFAULT(UseAES)) {
UseAES = true;
}
} else if (UseAES) {
if (!FLAG_IS_DEFAULT(UseAES))
warning("AES instructions not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
// The AES intrinsic stubs require AES instruction support (of course)
// but also require AVX mode for misaligned SSE access
if (UseAES && (UseAVX > 0)) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
UseAESIntrinsics = true;
}
} else if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
#ifdef COMPILER2 #ifdef COMPILER2
if (UseFPUForSpilling) { if (UseFPUForSpilling) {
if (UseSSE < 2) { if (UseSSE < 2) {
...@@ -714,6 +741,9 @@ void VM_Version::get_processor_features() { ...@@ -714,6 +741,9 @@ void VM_Version::get_processor_features() {
if (UseAVX > 0) { if (UseAVX > 0) {
tty->print(" UseAVX=%d",UseAVX); tty->print(" UseAVX=%d",UseAVX);
} }
if (UseAES) {
tty->print(" UseAES=1");
}
tty->cr(); tty->cr();
tty->print("Allocation"); tty->print("Allocation");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
......
...@@ -78,7 +78,9 @@ public: ...@@ -78,7 +78,9 @@ public:
sse4_2 : 1, sse4_2 : 1,
: 2, : 2,
popcnt : 1, popcnt : 1,
: 3, : 1,
aes : 1,
: 1,
osxsave : 1, osxsave : 1,
avx : 1, avx : 1,
: 3; : 3;
...@@ -244,7 +246,8 @@ protected: ...@@ -244,7 +246,8 @@ protected:
CPU_TSC = (1 << 15), CPU_TSC = (1 << 15),
CPU_TSCINV = (1 << 16), CPU_TSCINV = (1 << 16),
CPU_AVX = (1 << 17), CPU_AVX = (1 << 17),
CPU_AVX2 = (1 << 18) CPU_AVX2 = (1 << 18),
CPU_AES = (1 << 19)
} cpuFeatureFlags; } cpuFeatureFlags;
enum { enum {
...@@ -420,6 +423,8 @@ protected: ...@@ -420,6 +423,8 @@ protected:
result |= CPU_TSC; result |= CPU_TSC;
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0) if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
result |= CPU_TSCINV; result |= CPU_TSCINV;
if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
result |= CPU_AES;
// AMD features. // AMD features.
if (is_amd()) { if (is_amd()) {
...@@ -544,6 +549,7 @@ public: ...@@ -544,6 +549,7 @@ public:
static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; } static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; }
static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; } static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; }
static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; } static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; }
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
// Intel features // Intel features
static bool is_intel_family_core() { return is_intel() && static bool is_intel_family_core() { return is_intel() &&
......
...@@ -4102,9 +4102,158 @@ instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ ...@@ -4102,9 +4102,158 @@ instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{
// ----------------------- LogicalRightShift ----------------------------------- // ----------------------- LogicalRightShift -----------------------------------
// Shorts/Chars vector logical right shift produces incorrect Java result // Shorts vector logical right shift produces incorrect Java result
// for negative data because java code convert short value into int with // for negative data because java code convert short value into int with
// sign extension before a shift. // sign extension before a shift. But char vectors are fine since chars are
// unsigned values.
instruct vsrl2S(vecS dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_imm(vecS dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_imm(vecD dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_imm(vecX dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
ins_encode %{
bool vector256 = true;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
ins_encode %{
bool vector256 = true;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
// Integers vector logical right shift // Integers vector logical right shift
instruct vsrl2I(vecD dst, vecS shift) %{ instruct vsrl2I(vecD dst, vecS shift) %{
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_bsd.inline.hpp" #include "os_bsd.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_linux.inline.hpp" #include "os_linux.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp" #include "runtime/timer.hpp"
#include "services/attachListener.hpp" #include "services/attachListener.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp" #include "services/runtimeService.hpp"
#include "thread_solaris.inline.hpp" #include "thread_solaris.inline.hpp"
#include "utilities/decoder.hpp" #include "utilities/decoder.hpp"
...@@ -3072,11 +3073,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { ...@@ -3072,11 +3073,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Since snv_84, Solaris attempts to honor the address hint - see 5003415. // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
// Give it a try, if the kernel honors the hint we can return immediately. // Give it a try, if the kernel honors the hint we can return immediately.
char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
volatile int err = errno; volatile int err = errno;
if (addr == requested_addr) { if (addr == requested_addr) {
return addr; return addr;
} else if (addr != NULL) { } else if (addr != NULL) {
unmap_memory(addr, bytes); pd_unmap_memory(addr, bytes);
} }
if (PrintMiscellaneous && Verbose) { if (PrintMiscellaneous && Verbose) {
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_solaris.inline.hpp" #include "os_solaris.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -768,6 +769,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -768,6 +769,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -927,6 +932,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -927,6 +932,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_windows.inline.hpp" #include "os_windows.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
#include <windows.h> #include <windows.h>
...@@ -1496,6 +1497,10 @@ static char* mapping_create_shared(size_t size) { ...@@ -1496,6 +1497,10 @@ static char* mapping_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)memset(mapAddress, '\0', size); (void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return (char*) mapAddress; return (char*) mapAddress;
} }
...@@ -1672,6 +1677,11 @@ static void open_file_mapping(const char* user, int vmid, ...@@ -1672,6 +1677,11 @@ static void open_file_mapping(const char* user, int vmid,
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addrp = (char*)mapAddress; *addrp = (char*)mapAddress;
*sizep = size; *sizep = size;
...@@ -1824,6 +1834,8 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) { ...@@ -1824,6 +1834,8 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
} }
remove_file_mapping(addr); remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_release((address)addr, bytes);
} }
char* PerfMemory::backing_store_filename() { char* PerfMemory::backing_store_filename() {
......
...@@ -1844,17 +1844,12 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1844,17 +1844,12 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokevirtual && target->is_final_method() || code == Bytecodes::_invokevirtual && target->is_final_method() ||
code == Bytecodes::_invokedynamic) { code == Bytecodes::_invokedynamic) {
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target; ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
bool success = false; // static binding => check if callee is ok
if (target->is_method_handle_intrinsic()) { bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
// method handle invokes
success = try_method_handle_inline(target);
} else {
// static binding => check if callee is ok
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
}
CHECK_BAILOUT();
CHECK_BAILOUT();
clear_inline_bailout(); clear_inline_bailout();
if (success) { if (success) {
// Register dependence if JVMTI has either breakpoint // Register dependence if JVMTI has either breakpoint
// setting or hotswapping of methods capabilities since they may // setting or hotswapping of methods capabilities since they may
...@@ -3201,6 +3196,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co ...@@ -3201,6 +3196,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
return false; return false;
} }
// method handle invokes
if (callee->is_method_handle_intrinsic()) {
return try_method_handle_inline(callee);
}
// handle intrinsics // handle intrinsics
if (callee->intrinsic_id() != vmIntrinsics::_none) { if (callee->intrinsic_id() != vmIntrinsics::_none) {
if (try_inline_intrinsics(callee)) { if (try_inline_intrinsics(callee)) {
...@@ -3885,10 +3885,14 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) { ...@@ -3885,10 +3885,14 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueType* type = state()->stack_at(args_base)->type(); ValueType* type = state()->stack_at(args_base)->type();
if (type->is_constant()) { if (type->is_constant()) {
ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget(); ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove // We don't do CHA here so only inline static and statically bindable methods.
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; if (target->is_static() || target->can_be_statically_bound()) {
if (try_inline(target, /*holder_known*/ true, bc)) { Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
return true; if (try_inline(target, /*holder_known*/ true, bc)) {
return true;
}
} else {
print_inlining(target, "not static or statically bindable", /*success*/ false);
} }
} else { } else {
print_inlining(callee, "receiver not constant", /*success*/ false); print_inlining(callee, "receiver not constant", /*success*/ false);
...@@ -3941,9 +3945,14 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) { ...@@ -3941,9 +3945,14 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
} }
j += t->size(); // long and double take two slots j += t->size(); // long and double take two slots
} }
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; // We don't do CHA here so only inline static and statically bindable methods.
if (try_inline(target, /*holder_known*/ true, bc)) { if (target->is_static() || target->can_be_statically_bound()) {
return true; Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
if (try_inline(target, /*holder_known*/ true, bc)) {
return true;
}
} else {
print_inlining(target, "not static or statically bindable", /*success*/ false);
} }
} }
} else { } else {
......
...@@ -110,6 +110,7 @@ ...@@ -110,6 +110,7 @@
template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \ template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \ template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
template(sun_misc_Launcher_ExtClassLoader, "sun/misc/Launcher$ExtClassLoader") \
\ \
/* Java runtime version access */ \ /* Java runtime version access */ \
template(sun_misc_Version, "sun/misc/Version") \ template(sun_misc_Version, "sun/misc/Version") \
...@@ -723,6 +724,21 @@ ...@@ -723,6 +724,21 @@
/* java/lang/ref/Reference */ \ /* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \ do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\ \
/* support for com.sum.crypto.provider.AESCrypt and some of its callers */ \
do_class(com_sun_crypto_provider_aescrypt, "com/sun/crypto/provider/AESCrypt") \
do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R) \
do_intrinsic(_aescrypt_decryptBlock, com_sun_crypto_provider_aescrypt, decryptBlock_name, byteArray_int_byteArray_int_signature, F_R) \
do_name( encryptBlock_name, "encryptBlock") \
do_name( decryptBlock_name, "decryptBlock") \
do_signature(byteArray_int_byteArray_int_signature, "([BI[BI)V") \
\
do_class(com_sun_crypto_provider_cipherBlockChaining, "com/sun/crypto/provider/CipherBlockChaining") \
do_intrinsic(_cipherBlockChaining_encryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, encrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
do_name( encrypt_name, "encrypt") \
do_name( decrypt_name, "decrypt") \
do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \
\
/* support for sun.misc.Unsafe */ \ /* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\ \
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
template <>
void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
st->print(SIZE_FORMAT_W(16), size());
}
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
init_statistics();
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::initialize() {
FreeList<Chunk>::initialize();
set_hint(0);
init_statistics(true /* split_birth */);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::reset(size_t hint) {
FreeList<Chunk>::reset();
set_hint(hint);
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) {
assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else {
ShouldNotReachHere(); // unaccounted thread type?
}
}
#endif
template <class Chunk>
void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
_allocation_stats.initialize(split_birth);
}
template <class Chunk>
size_t AdaptiveFreeList<Chunk>::get_better_size() {
// A candidate chunk has been found. If it is already under
// populated and there is a hinT, REturn the hint(). Else
// return the size of this chunk.
if (surplus() <= 0) {
if (hint() != 0) {
return hint();
} else {
return size();
}
} else {
// This list has a surplus so use it.
return size();
}
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
assert_proper_lock_protection();
return_chunk_at_head(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
return_chunk_at_tail(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
#endif
// Needs to be after the definitions have been seen.
template class AdaptiveFreeList<FreeChunk>;
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#include "memory/freeList.hpp"
#include "gc_implementation/shared/allocationStats.hpp"
class CompactibleFreeListSpace;
// A class for maintaining a free list of Chunk's. The FreeList
// maintains a the structure of the list (head, tail, etc.) plus
// statistics for allocations from the list. The links between items
// are not part of FreeList. The statistics are
// used to make decisions about coalescing Chunk's when they
// are swept during collection.
//
// See the corresponding .cpp file for a description of the specifics
// for that implementation.
class Mutex;
template <class Chunk>
class AdaptiveFreeList : public FreeList<Chunk> {
friend class CompactibleFreeListSpace;
friend class VMStructs;
// friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
size_t _hint; // next larger size list with a positive surplus
AllocationStats _allocation_stats; // allocation-related statistics
public:
AdaptiveFreeList();
AdaptiveFreeList(Chunk* fc);
using FreeList<Chunk>::assert_proper_lock_protection;
#ifdef ASSERT
using FreeList<Chunk>::protecting_lock;
#endif
using FreeList<Chunk>::count;
using FreeList<Chunk>::size;
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::getFirstNChunksFromList;
using FreeList<Chunk>::print_on;
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc);
using FreeList<Chunk>::return_chunk_at_tail;
using FreeList<Chunk>::remove_chunk;
using FreeList<Chunk>::prepend;
using FreeList<Chunk>::print_labels_on;
using FreeList<Chunk>::get_chunk_at_head;
// Initialize.
void initialize();
// Reset the head, tail, hint, and count of a free list.
void reset(size_t hint);
void assert_proper_lock_protection_work() const PRODUCT_RETURN;
void print_on(outputStream* st, const char* c = NULL) const;
size_t hint() const {
return _hint;
}
void set_hint(size_t v) {
assert_proper_lock_protection();
assert(v == 0 || size() < v, "Bad hint");
_hint = v;
}
size_t get_better_size();
// Accessors for statistics
void init_statistics(bool split_birth = false);
AllocationStats* allocation_stats() {
assert_proper_lock_protection();
return &_allocation_stats;
}
ssize_t desired() const {
return _allocation_stats.desired();
}
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(count(),
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
return _allocation_stats.surplus();
}
void set_surplus(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_surplus(v);
}
void increment_surplus() {
assert_proper_lock_protection();
_allocation_stats.increment_surplus();
}
void decrement_surplus() {
assert_proper_lock_protection();
_allocation_stats.decrement_surplus();
}
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfr_surp(v);
}
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prev_sweep(v);
}
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_before_sweep(v);
}
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_births(v);
}
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_births();
}
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_deaths(v);
}
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_deaths();
}
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_births(v);
}
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_split_births();
}
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_deaths(v);
}
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_split_deaths();
}
#ifndef PRODUCT
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
// Stats verification
void verify_stats() const;
#endif // NOT PRODUCT
};
#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
...@@ -91,7 +91,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, ...@@ -91,7 +91,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
_collector(NULL) _collector(NULL)
{ {
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize, assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
"FreeChunk is larger than expected"); "FreeChunk is larger than expected");
_bt.set_space(this); _bt.set_space(this);
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
// We have all of "mr", all of which we place in the dictionary // We have all of "mr", all of which we place in the dictionary
...@@ -101,14 +101,14 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, ...@@ -101,14 +101,14 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// implementation, namely, the simple binary tree (splaying // implementation, namely, the simple binary tree (splaying
// temporarily disabled). // temporarily disabled).
switch (dictionaryChoice) { switch (dictionaryChoice) {
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
break;
case FreeBlockDictionary<FreeChunk>::dictionarySplayTree: case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
case FreeBlockDictionary<FreeChunk>::dictionarySkipList: case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
default: default:
warning("dictionaryChoice: selected option not understood; using" warning("dictionaryChoice: selected option not understood; using"
" default BinaryTreeDictionary implementation instead."); " default BinaryTreeDictionary implementation instead.");
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
break;
} }
assert(_dictionary != NULL, "CMS dictionary initialization"); assert(_dictionary != NULL, "CMS dictionary initialization");
// The indexed free lists are initially all empty and are lazily // The indexed free lists are initially all empty and are lazily
...@@ -453,7 +453,7 @@ const { ...@@ -453,7 +453,7 @@ const {
reportIndexedFreeListStatistics(); reportIndexedFreeListStatistics();
gclog_or_tty->print_cr("Layout of Indexed Freelists"); gclog_or_tty->print_cr("Layout of Indexed Freelists");
gclog_or_tty->print_cr("---------------------------"); gclog_or_tty->print_cr("---------------------------");
FreeList<FreeChunk>::print_labels_on(st, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeList[i].print_on(gclog_or_tty); _indexedFreeList[i].print_on(gclog_or_tty);
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
...@@ -1319,7 +1319,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { ...@@ -1319,7 +1319,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
size_t currSize = numWords + MinChunkSize; size_t currSize = numWords + MinChunkSize;
assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
for (i = currSize; i < IndexSetSize; i += IndexSetStride) { for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (fl->head()) { if (fl->head()) {
ret = getFromListGreater(fl, numWords); ret = getFromListGreater(fl, numWords);
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk"); assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
...@@ -1702,7 +1702,9 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { ...@@ -1702,7 +1702,9 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
_dictionary->return_chunk(chunk); _dictionary->return_chunk(chunk);
#ifndef PRODUCT #ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats(); TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
tl->verify_stats();
} }
#endif // PRODUCT #endif // PRODUCT
} }
...@@ -1745,7 +1747,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( ...@@ -1745,7 +1747,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
{ {
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
ec = dictionary()->find_largest_dict(); // get largest block ec = dictionary()->find_largest_dict(); // get largest block
if (ec != NULL && ec->end() == chunk) { if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
// It's a coterminal block - we can coalesce. // It's a coterminal block - we can coalesce.
size_t old_size = ec->size(); size_t old_size = ec->size();
coalDeath(old_size); coalDeath(old_size);
...@@ -1850,11 +1852,11 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { ...@@ -1850,11 +1852,11 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
the excess is >= MIN_CHUNK. */ the excess is >= MIN_CHUNK. */
size_t start = align_object_size(numWords + MinChunkSize); size_t start = align_object_size(numWords + MinChunkSize);
if (start < IndexSetSize) { if (start < IndexSetSize) {
FreeList<FreeChunk>* it = _indexedFreeList; AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
size_t hint = _indexedFreeList[start].hint(); size_t hint = _indexedFreeList[start].hint();
while (hint < IndexSetSize) { while (hint < IndexSetSize) {
assert(hint % MinObjAlignment == 0, "hint should be aligned"); assert(hint % MinObjAlignment == 0, "hint should be aligned");
FreeList<FreeChunk> *fl = &_indexedFreeList[hint]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
if (fl->surplus() > 0 && fl->head() != NULL) { if (fl->surplus() > 0 && fl->head() != NULL) {
// Found a list with surplus, reset original hint // Found a list with surplus, reset original hint
// and split out a free chunk which is returned. // and split out a free chunk which is returned.
...@@ -1873,7 +1875,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { ...@@ -1873,7 +1875,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
} }
/* Requires fl->size >= numWords + MinChunkSize */ /* Requires fl->size >= numWords + MinChunkSize */
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl, FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
size_t numWords) { size_t numWords) {
FreeChunk *curr = fl->head(); FreeChunk *curr = fl->head();
size_t oldNumWords = curr->size(); size_t oldNumWords = curr->size();
...@@ -2155,7 +2157,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus( ...@@ -2155,7 +2157,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (PrintFLSStatistics > 1) { if (PrintFLSStatistics > 1) {
gclog_or_tty->print("size[%d] : ", i); gclog_or_tty->print("size[%d] : ", i);
} }
...@@ -2174,7 +2176,7 @@ void CompactibleFreeListSpace::setFLSurplus() { ...@@ -2174,7 +2176,7 @@ void CompactibleFreeListSpace::setFLSurplus() {
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_surplus(fl->count() - fl->set_surplus(fl->count() -
(ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
} }
...@@ -2185,7 +2187,7 @@ void CompactibleFreeListSpace::setFLHints() { ...@@ -2185,7 +2187,7 @@ void CompactibleFreeListSpace::setFLHints() {
size_t i; size_t i;
size_t h = IndexSetSize; size_t h = IndexSetSize;
for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_hint(h); fl->set_hint(h);
if (fl->surplus() > 0) { if (fl->surplus() > 0) {
h = i; h = i;
...@@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::clearFLCensus() { ...@@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::clearFLCensus() {
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_prev_sweep(fl->count()); fl->set_prev_sweep(fl->count());
fl->set_coal_births(0); fl->set_coal_births(0);
fl->set_coal_deaths(0); fl->set_coal_deaths(0);
...@@ -2224,7 +2226,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { ...@@ -2224,7 +2226,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
return (fl->coal_desired() < 0) || return (fl->coal_desired() < 0) ||
((int)fl->count() > fl->coal_desired()); ((int)fl->count() > fl->coal_desired());
} else { } else {
...@@ -2234,14 +2236,14 @@ bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { ...@@ -2234,14 +2236,14 @@ bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
void CompactibleFreeListSpace::smallCoalBirth(size_t size) { void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_births(); fl->increment_coal_births();
fl->increment_surplus(); fl->increment_surplus();
} }
void CompactibleFreeListSpace::smallCoalDeath(size_t size) { void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_deaths(); fl->increment_coal_deaths();
fl->decrement_surplus(); fl->decrement_surplus();
} }
...@@ -2250,7 +2252,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) { ...@@ -2250,7 +2252,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallCoalBirth(size); smallCoalBirth(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
false /* split */, false /* split */,
true /* birth */); true /* birth */);
} }
...@@ -2260,7 +2262,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) { ...@@ -2260,7 +2262,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
if(size < SmallForDictionary) { if(size < SmallForDictionary) {
smallCoalDeath(size); smallCoalDeath(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
false /* split */, false /* split */,
false /* birth */); false /* birth */);
} }
...@@ -2268,14 +2270,14 @@ void CompactibleFreeListSpace::coalDeath(size_t size) { ...@@ -2268,14 +2270,14 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
void CompactibleFreeListSpace::smallSplitBirth(size_t size) { void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_births(); fl->increment_split_births();
fl->increment_surplus(); fl->increment_surplus();
} }
void CompactibleFreeListSpace::smallSplitDeath(size_t size) { void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_deaths(); fl->increment_split_deaths();
fl->decrement_surplus(); fl->decrement_surplus();
} }
...@@ -2284,7 +2286,7 @@ void CompactibleFreeListSpace::split_birth(size_t size) { ...@@ -2284,7 +2286,7 @@ void CompactibleFreeListSpace::split_birth(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallSplitBirth(size); smallSplitBirth(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
true /* split */, true /* split */,
true /* birth */); true /* birth */);
} }
...@@ -2294,7 +2296,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) { ...@@ -2294,7 +2296,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallSplitDeath(size); smallSplitDeath(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
true /* split */, true /* split */,
false /* birth */); false /* birth */);
} }
...@@ -2517,10 +2519,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { ...@@ -2517,10 +2519,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
#ifndef PRODUCT #ifndef PRODUCT
void CompactibleFreeListSpace::check_free_list_consistency() const { void CompactibleFreeListSpace::check_free_list_consistency() const {
assert(_dictionary->min_size() <= IndexSetSize, assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
"Some sizes can't be allocated without recourse to" "Some sizes can't be allocated without recourse to"
" linear allocation buffers"); " linear allocation buffers");
assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>), assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
"else MIN_TREE_CHUNK_SIZE is wrong"); "else MIN_TREE_CHUNK_SIZE is wrong");
assert(IndexSetStart != 0, "IndexSetStart not initialized"); assert(IndexSetStart != 0, "IndexSetStart not initialized");
assert(IndexSetStride != 0, "IndexSetStride not initialized"); assert(IndexSetStride != 0, "IndexSetStride not initialized");
...@@ -2529,15 +2531,15 @@ void CompactibleFreeListSpace::check_free_list_consistency() const { ...@@ -2529,15 +2531,15 @@ void CompactibleFreeListSpace::check_free_list_consistency() const {
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
assert_lock_strong(&_freelistLock); assert_lock_strong(&_freelistLock);
FreeList<FreeChunk> total; AdaptiveFreeList<FreeChunk> total;
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
size_t total_free = 0; size_t total_free = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
const FreeList<FreeChunk> *fl = &_indexedFreeList[i]; const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
total_free += fl->count() * fl->size(); total_free += fl->count() * fl->size();
if (i % (40*IndexSetStride) == 0) { if (i % (40*IndexSetStride) == 0) {
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
} }
fl->print_on(gclog_or_tty); fl->print_on(gclog_or_tty);
total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() ); total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
...@@ -2620,7 +2622,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) { ...@@ -2620,7 +2622,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
res = _cfls->getChunkFromDictionaryExact(word_sz); res = _cfls->getChunkFromDictionaryExact(word_sz);
if (res == NULL) return NULL; if (res == NULL) return NULL;
} else { } else {
FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
if (fl->count() == 0) { if (fl->count() == 0) {
// Attempt to refill this local free list. // Attempt to refill this local free list.
get_from_global_pool(word_sz, fl); get_from_global_pool(word_sz, fl);
...@@ -2640,7 +2642,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) { ...@@ -2640,7 +2642,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// Get a chunk of blocks of the right size and update related // Get a chunk of blocks of the right size and update related
// book-keeping stats // book-keeping stats
void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) { void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
// Get the #blocks we want to claim // Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error"); assert(n_blks > 0, "Error");
...@@ -2722,7 +2724,7 @@ void CFLS_LAB::retire(int tid) { ...@@ -2722,7 +2724,7 @@ void CFLS_LAB::retire(int tid) {
if (num_retire > 0) { if (num_retire > 0) {
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
// Reset this list. // Reset this list.
_indexedFreeList[i] = FreeList<FreeChunk>(); _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
_indexedFreeList[i].set_size(i); _indexedFreeList[i].set_size(i);
} }
} }
...@@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) { ...@@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) {
} }
} }
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) { void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition."); assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize, assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition"); "Precondition");
...@@ -2752,12 +2754,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2752,12 +2754,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
(cur_sz < CompactibleFreeListSpace::IndexSetSize) && (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
(CMSSplitIndexedFreeListBlocks || k <= 1); (CMSSplitIndexedFreeListBlocks || k <= 1);
k++, cur_sz = k * word_sz) { k++, cur_sz = k * word_sz) {
FreeList<FreeChunk> fl_for_cur_sz; // Empty. AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
fl_for_cur_sz.set_size(cur_sz); fl_for_cur_sz.set_size(cur_sz);
{ {
MutexLockerEx x(_indexedFreeListParLocks[cur_sz], MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz]; AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
if (gfl->count() != 0) { if (gfl->count() != 0) {
// nn is the number of chunks of size cur_sz that // nn is the number of chunks of size cur_sz that
// we'd need to split k-ways each, in order to create // we'd need to split k-ways each, in order to create
...@@ -2832,12 +2834,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2832,12 +2834,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
MutexLockerEx x(parDictionaryAllocLock(), MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
while (n > 0) { while (n > 0) {
fc = dictionary()->get_chunk(MAX2(n * word_sz, fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
_dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast); FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) { if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_udpate(fc->size(), dictionary()->dict_census_update(fc->size(),
true /*split*/, true /*split*/,
false /*birth*/); false /*birth*/);
break; break;
...@@ -2890,7 +2891,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2890,7 +2891,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc->set_size(prefix_size); fc->set_size(prefix_size);
if (rem >= IndexSetSize) { if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc); returnChunkToDictionary(rem_fc);
dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/); dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
rem_fc = NULL; rem_fc = NULL;
} }
// Otherwise, return it to the small list below. // Otherwise, return it to the small list below.
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp" #include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp" #include "memory/blockOffsetTable.inline.hpp"
...@@ -38,6 +39,7 @@ ...@@ -38,6 +39,7 @@
class CompactibleFreeListSpace; class CompactibleFreeListSpace;
class BlkClosure; class BlkClosure;
class BlkClosureCareful; class BlkClosureCareful;
class FreeChunk;
class UpwardsObjectClosure; class UpwardsObjectClosure;
class ObjectClosureCareful; class ObjectClosureCareful;
class Klass; class Klass;
...@@ -131,7 +133,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -131,7 +133,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
FreeList<FreeChunk> _indexedFreeList[IndexSetSize]; AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
// indexed array for small size blocks // indexed array for small size blocks
// allocation stategy // allocation stategy
bool _fitStrategy; // Use best fit strategy. bool _fitStrategy; // Use best fit strategy.
...@@ -168,7 +170,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -168,7 +170,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the count of "fl" is negative, it's absolute value indicates a // If the count of "fl" is negative, it's absolute value indicates a
// number of free chunks that had been previously "borrowed" from global // number of free chunks that had been previously "borrowed" from global
// list of size "word_sz", and must now be decremented. // list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl); void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions // Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists // Allocate using a strategy that takes from the indexed free lists
...@@ -214,7 +216,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -214,7 +216,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// and return it. The split off remainder is returned to // and return it. The split off remainder is returned to
// the free lists. The old name for getFromListGreater // the free lists. The old name for getFromListGreater
// was lookInListGreater. // was lookInListGreater.
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords); FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
// Get a chunk in the indexed free list or dictionary, // Get a chunk in the indexed free list or dictionary,
// by considering a larger chunk and splitting it. // by considering a larger chunk and splitting it.
FreeChunk* getChunkFromGreater(size_t numWords); FreeChunk* getChunkFromGreater(size_t numWords);
...@@ -621,7 +623,7 @@ class CFLS_LAB : public CHeapObj<mtGC> { ...@@ -621,7 +623,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
CompactibleFreeListSpace* _cfls; CompactibleFreeListSpace* _cfls;
// Our local free lists. // Our local free lists.
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg. // Initialized from a command-line arg.
...@@ -634,7 +636,7 @@ class CFLS_LAB : public CHeapObj<mtGC> { ...@@ -634,7 +636,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method // Internal work method
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl); void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
public: public:
CFLS_LAB(CompactibleFreeListSpace* cfls); CFLS_LAB(CompactibleFreeListSpace* cfls);
......
...@@ -9143,7 +9143,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { ...@@ -9143,7 +9143,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t shrinkable_size_in_bytes = chunk_at_end->size(); size_t shrinkable_size_in_bytes = chunk_at_end->size();
size_t aligned_shrinkable_size_in_bytes = size_t aligned_shrinkable_size_in_bytes =
align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
assert(unallocated_start <= chunk_at_end->end(), assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
"Inconsistent chunk at end of space"); "Inconsistent chunk at end of space");
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
size_t word_size_before = heap_word_size(_virtual_space.committed_size()); size_t word_size_before = heap_word_size(_virtual_space.committed_size());
...@@ -9210,7 +9210,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { ...@@ -9210,7 +9210,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
"Inconsistency at end of space"); "Inconsistency at end of space");
assert(chunk_at_end->end() == _cmsSpace->end(), assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
"Shrinking is inconsistent"); "Shrinking is inconsistent");
return; return;
} }
......
...@@ -133,7 +133,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC { ...@@ -133,7 +133,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
} }
// Return the address past the end of this chunk // Return the address past the end of this chunk
HeapWord* end() const { return ((HeapWord*) this) + size(); } uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
// debugging // debugging
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
#define VM_STRUCTS_CMS(nonstatic_field, \ #define VM_STRUCTS_CMS(nonstatic_field, \
volatile_nonstatic_field, \ volatile_nonstatic_field, \
static_field) \ static_field) \
...@@ -38,14 +40,8 @@ ...@@ -38,14 +40,8 @@
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \ nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \ nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \ static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \ nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \ nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \ nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock) nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
...@@ -60,19 +56,17 @@ ...@@ -60,19 +56,17 @@
declare_toplevel_type(CMSCollector) \ declare_toplevel_type(CMSCollector) \
declare_toplevel_type(CMSBitMap) \ declare_toplevel_type(CMSBitMap) \
declare_toplevel_type(FreeChunk) \ declare_toplevel_type(FreeChunk) \
declare_toplevel_type(Metablock) \
declare_toplevel_type(ConcurrentMarkSweepThread*) \ declare_toplevel_type(ConcurrentMarkSweepThread*) \
declare_toplevel_type(ConcurrentMarkSweepGeneration*) \ declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
declare_toplevel_type(SurrogateLockerThread*) \ declare_toplevel_type(SurrogateLockerThread*) \
declare_toplevel_type(CompactibleFreeListSpace*) \ declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \ declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(FreeChunk*) \ declare_toplevel_type(AFLBinaryTreeDictionary*) \
declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(LinearAllocBlock) \ declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \ declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>) declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
#define VM_INT_CONSTANTS_CMS(declare_constant) \ #define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \ declare_constant(Generation::ConcurrentMarkSweep) \
......
...@@ -191,7 +191,7 @@ class VM_GenCollectFull: public VM_GC_Operation { ...@@ -191,7 +191,7 @@ class VM_GenCollectFull: public VM_GC_Operation {
class VM_CollectForMetadataAllocation: public VM_GC_Operation { class VM_CollectForMetadataAllocation: public VM_GC_Operation {
private: private:
MetaWord* _result; MetaWord* _result;
size_t _size; // size of object to be allocated size_t _size; // size of object to be allocated
Metaspace::MetadataType _mdtype; Metaspace::MetadataType _mdtype;
ClassLoaderData* _loader_data; ClassLoaderData* _loader_data;
public: public:
......
...@@ -433,19 +433,18 @@ Arena::Arena() { ...@@ -433,19 +433,18 @@ Arena::Arena() {
NOT_PRODUCT(Atomic::inc(&_instance_count);) NOT_PRODUCT(Atomic::inc(&_instance_count);)
} }
Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
set_size_in_bytes(a->size_in_bytes());
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena *Arena::move_contents(Arena *copy) { Arena *Arena::move_contents(Arena *copy) {
copy->destruct_contents(); copy->destruct_contents();
copy->_chunk = _chunk; copy->_chunk = _chunk;
copy->_hwm = _hwm; copy->_hwm = _hwm;
copy->_max = _max; copy->_max = _max;
copy->_first = _first; copy->_first = _first;
copy->set_size_in_bytes(size_in_bytes());
// workaround rare racing condition, which could double count
// the arena size by native memory tracking
size_t size = size_in_bytes();
set_size_in_bytes(0);
copy->set_size_in_bytes(size);
// Destroy original arena // Destroy original arena
reset(); reset();
return copy; // Return Arena with contents return copy; // Return Arena with contents
...@@ -497,6 +496,9 @@ void Arena::destruct_contents() { ...@@ -497,6 +496,9 @@ void Arena::destruct_contents() {
char* end = _first->next() ? _first->top() : _hwm; char* end = _first->next() ? _first->top() : _hwm;
free_malloced_objects(_first, _first->bottom(), end, _hwm); free_malloced_objects(_first, _first->bottom(), end, _hwm);
} }
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
set_size_in_bytes(0);
_first->chop(); _first->chop();
reset(); reset();
} }
......
...@@ -144,8 +144,10 @@ enum MemoryType { ...@@ -144,8 +144,10 @@ enum MemoryType {
mtNMT = 0x0A00, // memory used by native memory tracking mtNMT = 0x0A00, // memory used by native memory tracking
mtChunk = 0x0B00, // chunk that holds content of arenas mtChunk = 0x0B00, // chunk that holds content of arenas
mtJavaHeap = 0x0C00, // Java heap mtJavaHeap = 0x0C00, // Java heap
mtDontTrack = 0x0D00, // memory we donot or cannot track mtClassShared = 0x0D00, // class data sharing
mt_number_of_types = 0x000C, // number of memory types mt_number_of_types = 0x000D, // number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack = 0x0E00, // memory we do not or cannot track
mt_masks = 0x7F00, mt_masks = 0x7F00,
// object type mask // object type mask
...@@ -342,7 +344,6 @@ protected: ...@@ -342,7 +344,6 @@ protected:
public: public:
Arena(); Arena();
Arena(size_t init_size); Arena(size_t init_size);
Arena(Arena *old);
~Arena(); ~Arena();
void destruct_contents(); void destruct_contents();
char* hwm() const { return _hwm; } char* hwm() const { return _hwm; }
......
...@@ -37,77 +37,78 @@ ...@@ -37,77 +37,78 @@
// A TreeList is a FreeList which can be used to maintain a // A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists. // binary tree of free lists.
template <class Chunk> class TreeChunk; template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
template <class Chunk> class BinaryTreeDictionary; template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
template <class Chunk> class AscendTreeCensusClosure; template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
template <class Chunk> class DescendTreeCensusClosure; template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
template <class Chunk> class DescendTreeSearchClosure; template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeList: public FreeList<Chunk> { class TreeList : public FreeList_t<Chunk_t> {
friend class TreeChunk<Chunk>; friend class TreeChunk<Chunk_t, FreeList_t>;
friend class BinaryTreeDictionary<Chunk>; friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
friend class AscendTreeCensusClosure<Chunk>; friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeCensusClosure<Chunk>; friend class DescendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeSearchClosure<Chunk>; friend class DescendTreeSearchClosure<Chunk_t, FreeList_t>;
TreeList<Chunk>* _parent; TreeList<Chunk_t, FreeList_t>* _parent;
TreeList<Chunk>* _left; TreeList<Chunk_t, FreeList_t>* _left;
TreeList<Chunk>* _right; TreeList<Chunk_t, FreeList_t>* _right;
protected: protected:
TreeList<Chunk>* parent() const { return _parent; }
TreeList<Chunk>* left() const { return _left; }
TreeList<Chunk>* right() const { return _right; }
// Explicitly import these names into our namespace to fix name lookup with templates TreeList<Chunk_t, FreeList_t>* parent() const { return _parent; }
using FreeList<Chunk>::head; TreeList<Chunk_t, FreeList_t>* left() const { return _left; }
using FreeList<Chunk>::set_head; TreeList<Chunk_t, FreeList_t>* right() const { return _right; }
using FreeList<Chunk>::tail; // Wrapper on call to base class, to get the template to compile.
using FreeList<Chunk>::set_tail; Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
using FreeList<Chunk>::link_tail; Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
using FreeList<Chunk>::increment_count; size_t size() const { return FreeList_t<Chunk_t>::size(); }
NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::size;
// Accessors for links in tree. // Accessors for links in tree.
void set_left(TreeList<Chunk>* tl) { void set_left(TreeList<Chunk_t, FreeList_t>* tl) {
_left = tl; _left = tl;
if (tl != NULL) if (tl != NULL)
tl->set_parent(this); tl->set_parent(this);
} }
void set_right(TreeList<Chunk>* tl) { void set_right(TreeList<Chunk_t, FreeList_t>* tl) {
_right = tl; _right = tl;
if (tl != NULL) if (tl != NULL)
tl->set_parent(this); tl->set_parent(this);
} }
void set_parent(TreeList<Chunk>* tl) { _parent = tl; } void set_parent(TreeList<Chunk_t, FreeList_t>* tl) { _parent = tl; }
void clearLeft() { _left = NULL; } void clear_left() { _left = NULL; }
void clear_right() { _right = NULL; } void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; } void clear_parent() { _parent = NULL; }
void initialize() { clearLeft(); clear_right(), clear_parent(); } void initialize() { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
// For constructing a TreeList from a Tree chunk or // For constructing a TreeList from a Tree chunk or
// address and size. // address and size.
static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc); TreeList();
static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size); static TreeList<Chunk_t, FreeList_t>*
as_TreeList(TreeChunk<Chunk_t, FreeList_t>* tc);
static TreeList<Chunk_t, FreeList_t>* as_TreeList(HeapWord* addr, size_t size);
// Returns the head of the free list as a pointer to a TreeChunk. // Returns the head of the free list as a pointer to a TreeChunk.
TreeChunk<Chunk>* head_as_TreeChunk(); TreeChunk<Chunk_t, FreeList_t>* head_as_TreeChunk();
// Returns the first available chunk in the free list as a pointer // Returns the first available chunk in the free list as a pointer
// to a TreeChunk. // to a TreeChunk.
TreeChunk<Chunk>* first_available(); TreeChunk<Chunk_t, FreeList_t>* first_available();
// Returns the block with the largest heap address amongst // Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive, // those in the list for this size; potentially slow and expensive,
// use with caution! // use with caution!
TreeChunk<Chunk>* largest_address(); TreeChunk<Chunk_t, FreeList_t>* largest_address();
TreeList<Chunk_t, FreeList_t>* get_better_list(
BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary);
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList. // remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the // If "tc" is the first chunk in the list, it is also the
...@@ -115,10 +116,10 @@ class TreeList: public FreeList<Chunk> { ...@@ -115,10 +116,10 @@ class TreeList: public FreeList<Chunk> {
// returns the possibly replaced TreeList* for the node in // returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original // the tree. It also updates the parent of the original
// node to point to the new node. // node to point to the new node.
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc); TreeList<Chunk_t, FreeList_t>* remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc);
// See FreeList. // See FreeList.
void return_chunk_at_head(TreeChunk<Chunk>* tc); void return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* tc);
void return_chunk_at_tail(TreeChunk<Chunk>* tc); void return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* tc);
}; };
// A TreeChunk is a subclass of a Chunk that additionally // A TreeChunk is a subclass of a Chunk that additionally
...@@ -134,52 +135,54 @@ class TreeList: public FreeList<Chunk> { ...@@ -134,52 +135,54 @@ class TreeList: public FreeList<Chunk> {
// on the free list for a node in the tree and is only removed if // on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list. // it is the last chunk on the free list.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeChunk : public Chunk { class TreeChunk : public Chunk_t {
friend class TreeList<Chunk>; friend class TreeList<Chunk_t, FreeList_t>;
TreeList<Chunk>* _list; TreeList<Chunk_t, FreeList_t>* _list;
TreeList<Chunk> _embedded_list; // if non-null, this chunk is on _list TreeList<Chunk_t, FreeList_t> _embedded_list; // if non-null, this chunk is on _list
static size_t _min_tree_chunk_size;
protected: protected:
TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; } TreeList<Chunk_t, FreeList_t>* embedded_list() const { return (TreeList<Chunk_t, FreeList_t>*) &_embedded_list; }
void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; } void set_embedded_list(TreeList<Chunk_t, FreeList_t>* v) { _embedded_list = *v; }
public: public:
TreeList<Chunk>* list() { return _list; } TreeList<Chunk_t, FreeList_t>* list() { return _list; }
void set_list(TreeList<Chunk>* v) { _list = v; } void set_list(TreeList<Chunk_t, FreeList_t>* v) { _list = v; }
static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc); static TreeChunk<Chunk_t, FreeList_t>* as_TreeChunk(Chunk_t* fc);
// Initialize fields in a TreeChunk that should be // Initialize fields in a TreeChunk that should be
// initialized when the TreeChunk is being added to // initialized when the TreeChunk is being added to
// a free list in the tree. // a free list in the tree.
void initialize() { embedded_list()->initialize(); } void initialize() { embedded_list()->initialize(); }
Chunk* next() const { return Chunk::next(); } Chunk_t* next() const { return Chunk_t::next(); }
Chunk* prev() const { return Chunk::prev(); } Chunk_t* prev() const { return Chunk_t::prev(); }
size_t size() const volatile { return Chunk::size(); } size_t size() const volatile { return Chunk_t::size(); }
static size_t min_size() {
return _min_tree_chunk_size;
}
// debugging // debugging
void verify_tree_chunk_list() const; void verify_tree_chunk_list() const;
void assert_is_mangled() const;
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
friend class VMStructs; friend class VMStructs;
bool _splay;
bool _adaptive_freelists;
size_t _total_size; size_t _total_size;
size_t _total_free_blocks; size_t _total_free_blocks;
TreeList<Chunk>* _root; TreeList<Chunk_t, FreeList_t>* _root;
// private accessors // private accessors
bool splay() const { return _splay; }
void set_splay(bool v) { _splay = v; }
void set_total_size(size_t v) { _total_size = v; } void set_total_size(size_t v) { _total_size = v; }
virtual void inc_total_size(size_t v); virtual void inc_total_size(size_t v);
virtual void dec_total_size(size_t v); virtual void dec_total_size(size_t v);
size_t total_free_blocks() const { return _total_free_blocks; }
void set_total_free_blocks(size_t v) { _total_free_blocks = v; } void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
TreeList<Chunk>* root() const { return _root; } TreeList<Chunk_t, FreeList_t>* root() const { return _root; }
void set_root(TreeList<Chunk>* v) { _root = v; } void set_root(TreeList<Chunk_t, FreeList_t>* v) { _root = v; }
bool adaptive_freelists() { return _adaptive_freelists; }
// This field is added and can be set to point to the // This field is added and can be set to point to the
// the Mutex used to synchronize access to the // the Mutex used to synchronize access to the
...@@ -191,54 +194,55 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -191,54 +194,55 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// return it. If the chunk // return it. If the chunk
// is the last chunk of that size, remove the node for that size // is the last chunk of that size, remove the node for that size
// from the tree. // from the tree.
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay); TreeChunk<Chunk_t, FreeList_t>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk>* find_list (size_t size) const;
// Remove this chunk from the tree. If the removal results // Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list. // in an empty list in the tree, remove the empty list.
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc); TreeChunk<Chunk_t, FreeList_t>* remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc);
// Remove the node in the trees starting at tl that has the // Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed. // minimum value and return it. Repair the tree as needed.
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl); TreeList<Chunk_t, FreeList_t>* remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl);
void semi_splay_step(TreeList<Chunk>* tl);
// Add this free chunk to the tree. // Add this free chunk to the tree.
void insert_chunk_in_tree(Chunk* freeChunk); void insert_chunk_in_tree(Chunk_t* freeChunk);
public: public:
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize; // Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk_t, FreeList_t>* find_list (size_t size) const;
void verify_tree() const; void verify_tree() const;
// verify that the given chunk is in the tree. // verify that the given chunk is in the tree.
bool verify_chunk_in_free_list(Chunk* tc) const; bool verify_chunk_in_free_list(Chunk_t* tc) const;
private: private:
void verify_tree_helper(TreeList<Chunk>* tl) const; void verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl); static size_t verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl);
// Returns the total number of chunks in the list. // Returns the total number of chunks in the list.
size_t total_list_length(TreeList<Chunk>* tl) const; size_t total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the total number of words in the chunks in the tree // Returns the total number of words in the chunks in the tree
// starting at "tl". // starting at "tl".
size_t total_size_in_tree(TreeList<Chunk>* tl) const; size_t total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the sum of the square of the size of each block // Returns the sum of the square of the size of each block
// in the tree starting at "tl". // in the tree starting at "tl".
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const; double sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const;
// Returns the total number of free blocks in the tree starting // Returns the total number of free blocks in the tree starting
// at "tl". // at "tl".
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const; size_t total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t num_free_blocks() const; size_t num_free_blocks() const;
size_t treeHeight() const; size_t tree_height() const;
size_t tree_height_helper(TreeList<Chunk>* tl) const; size_t tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const; size_t total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_helper(TreeList<Chunk>* tl) const; size_t total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
public: public:
// Constructor // Constructor
BinaryTreeDictionary(bool adaptive_freelists, bool splay = false); BinaryTreeDictionary() :
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false); _total_size(0), _total_free_blocks(0), _root(0) {}
BinaryTreeDictionary(MemRegion mr);
// Public accessors // Public accessors
size_t total_size() const { return _total_size; } size_t total_size() const { return _total_size; }
size_t total_free_blocks() const { return _total_free_blocks; }
// Reset the dictionary to the initial conditions with // Reset the dictionary to the initial conditions with
// a single free chunk. // a single free chunk.
...@@ -249,23 +253,24 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -249,23 +253,24 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// Return a chunk of size "size" or greater from // Return a chunk of size "size" or greater from
// the tree. // the tree.
// want a better dynamic splay strategy for the future. Chunk_t* get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) { FreeBlockDictionary<Chunk_t>::verify_par_locked();
FreeBlockDictionary<Chunk>::verify_par_locked(); Chunk_t* res = get_chunk_from_tree(size, dither);
Chunk* res = get_chunk_from_tree(size, dither, splay());
assert(res == NULL || res->is_free(), assert(res == NULL || res->is_free(),
"Should be returning a free chunk"); "Should be returning a free chunk");
assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
res == NULL || res->size() == size, "Not correct size");
return res; return res;
} }
void return_chunk(Chunk* chunk) { void return_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
insert_chunk_in_tree(chunk); insert_chunk_in_tree(chunk);
} }
void remove_chunk(Chunk* chunk) { void remove_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk); remove_chunk_from_tree((TreeChunk<Chunk_t, FreeList_t>*)chunk);
assert(chunk->is_free(), "Should still be a free chunk"); assert(chunk->is_free(), "Should still be a free chunk");
} }
...@@ -281,19 +286,19 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -281,19 +286,19 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
} }
size_t min_size() const { size_t min_size() const {
return min_tree_chunk_size; return TreeChunk<Chunk_t, FreeList_t>::min_size();
} }
double sum_of_squared_block_sizes() const { double sum_of_squared_block_sizes() const {
return sum_of_squared_block_sizes(root()); return sum_of_squared_block_sizes(root());
} }
Chunk* find_chunk_ends_at(HeapWord* target) const; Chunk_t* find_chunk_ends_at(HeapWord* target) const;
// Find the list with size "size" in the binary tree and update // Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was // the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed). // split or coalesce) and "birth" (chunk was added or removed).
void dict_census_udpate(size_t size, bool split, bool birth); void dict_census_update(size_t size, bool split, bool birth);
// Return true if the dictionary is overpopulated (more chunks of // Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size". // this size than desired) for size "size".
bool coal_dict_over_populated(size_t size); bool coal_dict_over_populated(size_t size);
...@@ -307,7 +312,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -307,7 +312,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// statistics for the sweep. // statistics for the sweep.
void end_sweep_dict_census(double splitSurplusPercent); void end_sweep_dict_census(double splitSurplusPercent);
// Return the largest free chunk in the tree. // Return the largest free chunk in the tree.
Chunk* find_largest_dict() const; Chunk_t* find_largest_dict() const;
// Accessors for statistics // Accessors for statistics
void set_tree_surplus(double splitSurplusPercent); void set_tree_surplus(double splitSurplusPercent);
void set_tree_hints(void); void set_tree_hints(void);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp" #include "utilities/defaultStream.hpp"
# include <sys/stat.h> # include <sys/stat.h>
...@@ -344,24 +345,13 @@ ReservedSpace FileMapInfo::reserve_shared_memory() { ...@@ -344,24 +345,13 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
fail_continue(err_msg("Unable to reserved shared space at required address " INTPTR_FORMAT, requested_addr)); fail_continue(err_msg("Unable to reserved shared space at required address " INTPTR_FORMAT, requested_addr));
return rs; return rs;
} }
// the reserved virtual memory is for mapping class data sharing archive
if (MemTracker::is_on()) {
MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
}
return rs; return rs;
} }
// Memory map a region in the address space.
char* FileMapInfo::map_region(int i, ReservedSpace rs) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity());
ReservedSpace mapped_rs = rs.first_part(size, true, true);
ReservedSpace unmapped_rs = rs.last_part(size);
mapped_rs.release();
return map_region(i);
}
// Memory map a region in the address space. // Memory map a region in the address space.
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"}; static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
......
...@@ -125,7 +125,6 @@ public: ...@@ -125,7 +125,6 @@ public:
size_t capacity, bool read_only, bool allow_exec); size_t capacity, bool read_only, bool allow_exec);
void write_bytes(const void* buffer, int count); void write_bytes(const void* buffer, int count);
void write_bytes_aligned(const void* buffer, int count); void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i, ReservedSpace rs);
char* map_region(int i); char* map_region(int i);
void unmap_region(int i); void unmap_region(int i);
void close(); void close();
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC #endif // SERIALGC
#include "memory/freeBlockDictionary.hpp" #include "memory/freeBlockDictionary.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#ifdef TARGET_OS_FAMILY_linux #ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp" # include "thread_linux.inline.hpp"
#endif #endif
...@@ -62,6 +64,9 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() cons ...@@ -62,6 +64,9 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() cons
} }
#endif #endif
template class FreeBlockDictionary<Metablock>;
template class FreeBlockDictionary<Metachunk>;
#ifndef SERIALGC #ifndef SERIALGC
// Explicitly instantiate for FreeChunk // Explicitly instantiate for FreeChunk
template class FreeBlockDictionary<FreeChunk>; template class FreeBlockDictionary<FreeChunk>;
......
...@@ -66,7 +66,7 @@ class FreeBlockDictionary: public CHeapObj<mtGC> { ...@@ -66,7 +66,7 @@ class FreeBlockDictionary: public CHeapObj<mtGC> {
virtual void reset(HeapWord* addr, size_t size) = 0; virtual void reset(HeapWord* addr, size_t size) = 0;
virtual void reset() = 0; virtual void reset() = 0;
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0; virtual void dict_census_update(size_t size, bool split, bool birth) = 0;
virtual bool coal_dict_over_populated(size_t size) = 0; virtual bool coal_dict_over_populated(size_t size) = 0;
virtual void begin_sweep_dict_census(double coalSurplusPercent, virtual void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate, float inter_sweep_current, float inter_sweep_estimate,
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "memory/freeBlockDictionary.hpp" #include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp" #include "memory/freeList.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#include "memory/sharedHeap.hpp" #include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
...@@ -49,8 +51,6 @@ FreeList<Chunk>::FreeList() : ...@@ -49,8 +51,6 @@ FreeList<Chunk>::FreeList() :
{ {
_size = 0; _size = 0;
_count = 0; _count = 0;
_hint = 0;
init_statistics();
} }
template <class Chunk> template <class Chunk>
...@@ -62,34 +62,50 @@ FreeList<Chunk>::FreeList(Chunk* fc) : ...@@ -62,34 +62,50 @@ FreeList<Chunk>::FreeList(Chunk* fc) :
{ {
_size = fc->size(); _size = fc->size();
_count = 1; _count = 1;
_hint = 0;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
} }
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::reset(size_t hint) { void FreeList<Chunk>::link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->link_prev(NULL);
}
}
template <class Chunk>
void FreeList<Chunk>::reset() {
// Don't set the _size to 0 because this method is
// used with a existing list that has a size but which has
// been emptied.
// Don't clear the _protecting_lock of an existing list.
set_count(0); set_count(0);
set_head(NULL); set_head(NULL);
set_tail(NULL); set_tail(NULL);
set_hint(hint);
} }
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::init_statistics(bool split_birth) { void FreeList<Chunk>::initialize() {
_allocation_stats.initialize(split_birth); #ifdef ASSERT
// Needed early because it might be checked in other initializing code.
set_protecting_lock(NULL);
#endif
reset();
set_size(0);
} }
template <class Chunk> template <class Chunk_t>
Chunk* FreeList<Chunk>::get_chunk_at_head() { Chunk_t* FreeList<Chunk_t>::get_chunk_at_head() {
assert_proper_lock_protection(); assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* fc = head(); Chunk_t* fc = head();
if (fc != NULL) { if (fc != NULL) {
Chunk* nextFC = fc->next(); Chunk_t* nextFC = fc->next();
if (nextFC != NULL) { if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the // The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc. // "prev" of fc.
...@@ -197,11 +213,6 @@ void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) { ...@@ -197,11 +213,6 @@ void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
link_tail(chunk); link_tail(chunk);
} }
increment_count(); // of # of chunks in list increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list"); assert(head() == NULL || head()->size() == size(), "wrong item on list");
...@@ -233,11 +244,6 @@ void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) { ...@@ -233,11 +244,6 @@ void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
} }
link_tail(chunk); link_tail(chunk);
increment_count(); // of # of chunks in list increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list"); assert(head() == NULL || head()->size() == size(), "wrong item on list");
...@@ -273,7 +279,7 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) { ...@@ -273,7 +279,7 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
} }
} }
// verify_chunk_in_free_list() is used to verify that an item is in this free list. // verify_chunk_in_free_lists() is used to verify that an item is in this free list.
// It is used as a debugging aid. // It is used as a debugging aid.
template <class Chunk> template <class Chunk>
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const { bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
...@@ -293,41 +299,15 @@ bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const { ...@@ -293,41 +299,15 @@ bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
} }
#ifndef PRODUCT #ifndef PRODUCT
template <class Chunk>
void FreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::assert_proper_lock_protection_work() const { void FreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(_protecting_lock != NULL, "Don't call this directly"); assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly"); assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current(); Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) { if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock // assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) { } else if (thr->is_GC_task_thread()) {
assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED"); assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) { } else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing"); assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else { } else {
...@@ -350,21 +330,17 @@ void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) { ...@@ -350,21 +330,17 @@ void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
// to the call is a non-null string, it is printed in the first column; // to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the // otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column. // (free list) block is printed in the first column.
template <class Chunk> template <class Chunk_t>
void FreeList<Chunk>::print_on(outputStream* st, const char* c) const { void FreeList<Chunk_t>::print_on(outputStream* st, const char* c) const {
if (c != NULL) { if (c != NULL) {
st->print("%16s", c); st->print("%16s", c);
} else { } else {
st->print(SIZE_FORMAT_W(16), size()); st->print(SIZE_FORMAT_W(16), size());
} }
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
} }
template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#ifndef SERIALGC #ifndef SERIALGC
// Needs to be after the definitions have been seen.
template class FreeList<FreeChunk>; template class FreeList<FreeChunk>;
#endif // SERIALGC #endif // SERIALGC
此差异已折叠。
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
#define SHARE_VM_MEMORY_METABLOCK_HPP
// Metablock are the unit of allocation from a Chunk. It is initialized
// with the size of the requested allocation. That size is overwritten
// once the allocation returns.
//
// A Metablock may be reused by its SpaceManager but are never moved between
// SpaceManagers. There is no explicit link to the Metachunk
// from which it was allocated. Metablock may be deallocated and
// put on a freelist but the space is never freed, rather
// the Metachunk it is a part of will be deallocated when it's
// associated class loader is collected.
class Metablock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
// Used to align the allocation (see below).
union block_t {
void* _data[3];
struct header_t {
size_t _word_size;
Metablock* _next;
Metablock* _prev;
} _header;
} _block;
static size_t _min_block_byte_size;
static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
const Block* block() const { return &_block; }
const Block::header_t* header() const { return &(block()->_header); }
public:
static Metablock* initialize(MetaWord* p, size_t word_size);
// This places the body of the block at a 2 word boundary
// because every block starts on a 2 word boundary. Work out
// how to make the body on a 2 word boundary if the block
// starts on a arbitrary boundary. JJJ
size_t word_size() const { return header()->_word_size; }
void set_word_size(size_t v) { _block._header._word_size = v; }
size_t size() const volatile { return _block._header._word_size; }
void set_size(size_t v) { _block._header._word_size = v; }
Metablock* next() const { return header()->_next; }
void set_next(Metablock* v) { _block._header._next = v; }
Metablock* prev() const { return header()->_prev; }
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }
void link_prev(Metablock* ptr) { set_prev(ptr); }
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
bool cantCoalesce() const { return false; }
void link_next(Metablock* ptr) { set_next(ptr); }
void link_after(Metablock* ptr){
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
// Should not be needed in a free list of Metablocks
void markNotFree() { ShouldNotReachHere(); }
// Debug support
#ifdef ASSERT
void* prev_addr() const { return (void*)&_block._header._prev; }
void* next_addr() const { return (void*)&_block._header._next; }
void* size_addr() const { return (void*)&_block._header._word_size; }
#endif
bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
bool verify_par_locked() { return true; }
void assert_is_mangled() const {/* Don't check "\*/}
};
#endif // SHARE_VM_MEMORY_METABLOCK_HPP
此差异已折叠。
此差异已折叠。
...@@ -57,12 +57,10 @@ ...@@ -57,12 +57,10 @@
// //
class ClassLoaderData; class ClassLoaderData;
class Metablock;
class MetaWord; class MetaWord;
class Mutex; class Mutex;
class outputStream; class outputStream;
class FreeChunk;
template <class Chunk_t> class FreeList;
template <class Chunk_t> class BinaryTreeDictionary;
class SpaceManager; class SpaceManager;
// Metaspaces each have a SpaceManager and allocations // Metaspaces each have a SpaceManager and allocations
...@@ -128,7 +126,7 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -128,7 +126,7 @@ class Metaspace : public CHeapObj<mtClass> {
size_t capacity_words(MetadataType mdtype) const; size_t capacity_words(MetadataType mdtype) const;
size_t waste_words(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const;
static MetaWord* allocate(ClassLoaderData* loader_data, size_t size, static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
bool read_only, MetadataType mdtype, TRAPS); bool read_only, MetadataType mdtype, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
......
...@@ -663,8 +663,8 @@ bool MetaspaceShared::is_in_shared_space(const void* p) { ...@@ -663,8 +663,8 @@ bool MetaspaceShared::is_in_shared_space(const void* p) {
if (_ro_base == NULL || _rw_base == NULL) { if (_ro_base == NULL || _rw_base == NULL) {
return false; return false;
} else { } else {
return ((p > _ro_base && p < (_ro_base + SharedReadOnlySize)) || return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
(p > _rw_base && p < (_rw_base + SharedReadWriteSize))); (p >= _rw_base && p < (_rw_base + SharedReadWriteSize)));
} }
} }
...@@ -693,14 +693,6 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { ...@@ -693,14 +693,6 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
if (!shared_rs.is_reserved()) return false; if (!shared_rs.is_reserved()) return false;
// Split reserved memory into pieces (windows needs this)
ReservedSpace ro_rs = shared_rs.first_part(SharedReadOnlySize);
ReservedSpace tmp_rs1 = shared_rs.last_part(SharedReadOnlySize);
ReservedSpace rw_rs = tmp_rs1.first_part(SharedReadWriteSize);
ReservedSpace tmp_rs2 = tmp_rs1.last_part(SharedReadWriteSize);
ReservedSpace md_rs = tmp_rs2.first_part(SharedMiscDataSize);
ReservedSpace mc_rs = tmp_rs2.last_part(SharedMiscDataSize);
// Map each shared region // Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL && if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
(_rw_base = mapinfo->map_region(rw)) != NULL && (_rw_base = mapinfo->map_region(rw)) != NULL &&
......
...@@ -1155,8 +1155,12 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n ...@@ -1155,8 +1155,12 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) { vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
// because we are not loading from core libraries // because we are not loading from core libraries
if (InstanceKlass::cast(holder)->class_loader() != NULL) // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
// which does not use the class default class loader so we check for its loader here
if ((InstanceKlass::cast(holder)->class_loader() != NULL) &&
InstanceKlass::cast(holder)->class_loader()->klass()->name() != vmSymbols::sun_misc_Launcher_ExtClassLoader()) {
return vmSymbols::NO_SID; // regardless of name, no intrinsics here return vmSymbols::NO_SID; // regardless of name, no intrinsics here
}
// see if the klass name is well-known: // see if the klass name is well-known:
Symbol* klass_name = InstanceKlass::cast(holder)->name(); Symbol* klass_name = InstanceKlass::cast(holder)->name();
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册