提交 4469db58 编写于 作者: Z zgu

Merge

......@@ -287,3 +287,7 @@ f2e12eb74117c917c0bb264694c02de4a6a15a10 hs25-b03
b261523fe66c40a02968f0aa7e73602491bb3386 hs25-b05
4547dc71db765276e027b0c2780b724bae0a07d3 jdk8-b61
d0337c31c8be7716369b4e7c3bd5f352983c6a06 hs25-b06
dccd40de8db1fa96f186e6179907818d75320440 jdk8-b62
dc16fe422c535ecd4e9f80fb814a1bb9704da6f5 hs25-b07
acabb5c282f59be7e3238920b2ea06b684ab68f7 jdk8-b63
8cb93eadfb6dcab88d91b8e2cd3e0e07d0ac4048 hs25-b08
......@@ -453,14 +453,30 @@ ifneq ($(OSNAME),windows)
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz
$(install-file)
endif
ifeq ($(JVM_VARIANT_ZERO), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz
$(install-file)
endif
ifeq ($(JVM_VARIANT_MINIMAL1), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
......
......@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false)
CXXFLAGS += -DSERIALGC
CFLAGS += -DSERIALGC
Src_Files_EXCLUDE += \
binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=07
HS_BUILD_NUMBER=09
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -1007,6 +1007,67 @@ void Assembler::addss(XMMRegister dst, Address src) {
emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
}
void Assembler::aesdec(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xde);
emit_operand(dst, src);
}
void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xde);
emit_byte(0xC0 | encode);
}
void Assembler::aesdeclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdf);
emit_operand(dst, src);
}
void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdf);
emit_byte(0xC0 | encode);
}
void Assembler::aesenc(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdc);
emit_operand(dst, src);
}
void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdc);
emit_byte(0xC0 | encode);
}
void Assembler::aesenclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdd);
emit_operand(dst, src);
}
void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0xdd);
emit_byte(0xC0 | encode);
}
void Assembler::andl(Address dst, int32_t imm32) {
InstructionMark im(this);
prefix(dst);
......@@ -2307,6 +2368,22 @@ void Assembler::prefix(Prefix p) {
a_byte(p);
}
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_ssse3(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0x00);
emit_byte(0xC0 | encode);
}
void Assembler::pshufb(XMMRegister dst, Address src) {
assert(VM_Version::supports_ssse3(), "");
assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
InstructionMark im(this);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
emit_byte(0x00);
emit_operand(dst, src);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
assert(isByte(mode), "invalid value");
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
......@@ -8067,6 +8144,15 @@ void MacroAssembler::movptr(Address dst, Register src) {
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
}
void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::movdqu(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::movdqu(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::movsd(dst, as_Address(src));
......@@ -8357,6 +8443,17 @@ void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
}
}
void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
// Used in sign-bit flipping with aligned address.
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
if (reachable(src)) {
Assembler::pshufb(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::pshufb(dst, Address(rscratch1, 0));
}
}
// AVX 3-operands instructions
void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
......
......@@ -875,6 +875,17 @@ private:
void addss(XMMRegister dst, Address src);
void addss(XMMRegister dst, XMMRegister src);
// AES instructions
void aesdec(XMMRegister dst, Address src);
void aesdec(XMMRegister dst, XMMRegister src);
void aesdeclast(XMMRegister dst, Address src);
void aesdeclast(XMMRegister dst, XMMRegister src);
void aesenc(XMMRegister dst, Address src);
void aesenc(XMMRegister dst, XMMRegister src);
void aesenclast(XMMRegister dst, Address src);
void aesenclast(XMMRegister dst, XMMRegister src);
void andl(Address dst, int32_t imm32);
void andl(Register dst, int32_t imm32);
void andl(Register dst, Address src);
......@@ -1424,6 +1435,10 @@ private:
void prefetcht2(Address src);
void prefetchw(Address src);
// Shuffle Bytes
void pshufb(XMMRegister dst, XMMRegister src);
void pshufb(XMMRegister dst, Address src);
// Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode);
......@@ -2611,6 +2626,12 @@ public:
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, AddressLiteral src);
// Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, AddressLiteral src);
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
......@@ -2658,6 +2679,10 @@ public:
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src);
// Shuffle Bytes
void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
void pshufb(XMMRegister dst, AddressLiteral src);
// AVX 3-operands instructions
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
......
......@@ -44,3 +44,4 @@
address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
......@@ -41,10 +41,14 @@ class x86 {
private:
static address _verify_mxcsr_entry;
static address _verify_fpu_cntrl_wrd_entry;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public:
static address verify_mxcsr_entry() { return _verify_mxcsr_entry; }
static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
};
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
......
......@@ -56,3 +56,4 @@ address StubRoutines::x86::_float_sign_flip = NULL;
address StubRoutines::x86::_double_sign_mask = NULL;
address StubRoutines::x86::_double_sign_flip = NULL;
address StubRoutines::x86::_mxcsr_std = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
......@@ -54,6 +54,8 @@ class x86 {
static address _double_sign_mask;
static address _double_sign_flip;
static address _mxcsr_std;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public:
......@@ -116,6 +118,9 @@ class x86 {
{
return _mxcsr_std;
}
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
};
#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
......@@ -419,13 +419,16 @@ void VM_Version::get_processor_features() {
if (UseAVX < 1)
_cpuFeatures &= ~CPU_AVX;
if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
_cpuFeatures &= ~CPU_AES;
if (logical_processors_per_package() == 1) {
// HT processor could be installed on a system which doesn't support HT.
_cpuFeatures &= ~CPU_HT;
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
......@@ -441,6 +444,7 @@ void VM_Version::get_processor_features() {
(supports_popcnt() ? ", popcnt" : ""),
(supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""),
(supports_aes() ? ", aes" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""),
......@@ -472,6 +476,29 @@ void VM_Version::get_processor_features() {
if (!supports_avx ()) // Drop to 0 if no AVX support
UseAVX = 0;
// Use AES instructions if available.
if (supports_aes()) {
if (FLAG_IS_DEFAULT(UseAES)) {
UseAES = true;
}
} else if (UseAES) {
if (!FLAG_IS_DEFAULT(UseAES))
warning("AES instructions not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
// The AES intrinsic stubs require AES instruction support (of course)
// but also require AVX mode for misaligned SSE access
if (UseAES && (UseAVX > 0)) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
UseAESIntrinsics = true;
}
} else if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
#ifdef COMPILER2
if (UseFPUForSpilling) {
if (UseSSE < 2) {
......@@ -714,6 +741,9 @@ void VM_Version::get_processor_features() {
if (UseAVX > 0) {
tty->print(" UseAVX=%d",UseAVX);
}
if (UseAES) {
tty->print(" UseAES=1");
}
tty->cr();
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
......
......@@ -78,7 +78,9 @@ public:
sse4_2 : 1,
: 2,
popcnt : 1,
: 3,
: 1,
aes : 1,
: 1,
osxsave : 1,
avx : 1,
: 3;
......@@ -244,7 +246,8 @@ protected:
CPU_TSC = (1 << 15),
CPU_TSCINV = (1 << 16),
CPU_AVX = (1 << 17),
CPU_AVX2 = (1 << 18)
CPU_AVX2 = (1 << 18),
CPU_AES = (1 << 19)
} cpuFeatureFlags;
enum {
......@@ -420,6 +423,8 @@ protected:
result |= CPU_TSC;
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
result |= CPU_TSCINV;
if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
result |= CPU_AES;
// AMD features.
if (is_amd()) {
......@@ -544,6 +549,7 @@ public:
static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; }
static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; }
static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; }
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
......
......@@ -4102,9 +4102,158 @@ instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{
// ----------------------- LogicalRightShift -----------------------------------
// Shorts/Chars vector logical right shift produces incorrect Java result
// Shorts vector logical right shift produces incorrect Java result
// for negative data because java code convert short value into int with
// sign extension before a shift.
// sign extension before a shift. But char vectors are fine since chars are
// unsigned values.
instruct vsrl2S(vecS dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_imm(vecS dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_imm(vecD dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_imm(vecX dst, immI8 shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (URShiftVS dst shift));
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
ins_encode %{
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
ins_encode %{
bool vector256 = false;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
ins_encode %{
bool vector256 = true;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
%}
ins_pipe( pipe_slow );
%}
instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (URShiftVS src shift));
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
ins_encode %{
bool vector256 = true;
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
%}
ins_pipe( pipe_slow );
%}
// Integers vector logical right shift
instruct vsrl2I(vecD dst, vecS shift) %{
......
......@@ -31,12 +31,17 @@
return _masm;
}
protected:
address generate_entry(address entry_point) {
ZeroEntry *entry = (ZeroEntry *) assembler()->pc();
assembler()->advance(sizeof(ZeroEntry));
public:
static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
ZeroEntry *entry = (ZeroEntry *) masm->pc();
masm->advance(sizeof(ZeroEntry));
entry->set_entry_point(entry_point);
return (address) entry;
}
protected:
address generate_entry(address entry_point) {
return generate_entry_impl(assembler(), entry_point);
}
#endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
......@@ -36,7 +36,6 @@
static int native_entry(Method* method, intptr_t UNUSED, TRAPS);
static int accessor_entry(Method* method, intptr_t UNUSED, TRAPS);
static int empty_entry(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry(Method* method, intptr_t UNUSED, TRAPS);
public:
// Main loop of normal_entry
......@@ -44,7 +43,6 @@
private:
// Helpers for method_handle_entry
static void process_method_handle(oop method_handle, TRAPS);
static void insert_vmslots(int insert_before, int num_slots, TRAPS);
static void remove_vmslots(int first_slot, int num_slots, TRAPS);
static BasicType result_type_of_handle(oop method_handle);
......
......@@ -351,7 +351,7 @@ void SharkFrame::identify_word(int frame_index,
switch (offset) {
case pc_off:
strncpy(fieldbuf, "pc", buflen);
if (method()->is_oop()) {
if (method()->is_method()) {
nmethod *code = method()->code();
if (code && code->pc_desc_at(pc())) {
SimpleScopeDesc ssd(code, pc());
......@@ -367,7 +367,7 @@ void SharkFrame::identify_word(int frame_index,
case method_off:
strncpy(fieldbuf, "method", buflen);
if (method()->is_oop()) {
if (method()->is_method()) {
method()->name_and_sig_as_C_string(valuebuf, buflen);
}
return;
......@@ -378,7 +378,7 @@ void SharkFrame::identify_word(int frame_index,
}
// Variable part
if (method()->is_oop()) {
if (method()->is_method()) {
identify_vp_word(frame_index, addr_of_word(offset),
addr_of_word(header_words + 1),
unextended_sp() + method()->max_stack(),
......@@ -430,4 +430,3 @@ intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}
......@@ -36,6 +36,8 @@ inline frame::frame() {
_deopt_state = unknown;
}
inline address frame::sender_pc() const { ShouldNotCallThis(); }
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
_zeroframe = zf;
_sp = sp;
......
......@@ -40,7 +40,7 @@ int InlineCacheBuffer::ic_stub_code_size() {
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
Metadata* cached_oop,
void* cached_oop,
address entry_point) {
// NB ic_stub_code_size() must return the size of the code we generate
ShouldNotCallThis();
......@@ -51,7 +51,6 @@ address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
ShouldNotCallThis();
}
Metadata* InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) {
// NB ic_stub_code_size() must return the size of the code we generate
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
ShouldNotCallThis();
}
......@@ -24,26 +24,159 @@
*/
#include "precompiled.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
int MethodHandles::adapter_conversion_ops_supported_mask() {
return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
MethodHandles::EntryKind ek) {
init_entry(ek, (MethodHandleEntry *) ek);
void MethodHandles::invoke_target(Method* method, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
interpreterState istate = frame->interpreter_state();
// Trim back the stack to put the parameters at the top
stack->set_sp(istate->stack() + 1);
Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD);
// Convert the result
istate->set_stack(stack->sp() - 1);
}
oop MethodHandles::popFromStack(TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
interpreterState istate = frame->interpreter_state();
intptr_t* topOfStack = istate->stack();
oop top = STACK_OBJECT(-1);
MORE_STACK(-1);
istate->set_stack(topOfStack);
return top;
}
int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
interpreterState istate = frame->interpreter_state();
intptr_t* topOfStack = istate->stack();
// 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget.
int numArgs = method->size_of_parameters();
oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form
oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1);
Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmEntry1);
invoke_target(vmtarget, THREAD);
// No deoptimized frames on the stack
return 0;
}
int MethodHandles::method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS) {
// Pop appendix argument from stack. This is a MemberName which we resolve to the
// target method.
oop vmentry = popFromStack(THREAD);
Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
invoke_target(vmtarget, THREAD);
return 0;
}
int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
interpreterState istate = frame->interpreter_state();
// Pop appendix argument from stack. This is a MemberName which we resolve to the
// target method.
oop vmentry = popFromStack(THREAD);
intptr_t* topOfStack = istate->stack();
// Resolve target method by looking up in the receiver object's itable.
Klass* clazz = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(vmentry));
intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
int numArgs = target->size_of_parameters();
oop recv = STACK_OBJECT(-numArgs);
InstanceKlass* klass_part = InstanceKlass::cast(recv->klass());
itableOffsetEntry* ki = (itableOffsetEntry*) klass_part->start_of_itable();
int i;
for ( i = 0 ; i < klass_part->itable_length() ; i++, ki++ ) {
if (ki->interface_klass() == clazz) break;
}
itableMethodEntry* im = ki->first_method_entry(recv->klass());
Method* vmtarget = im[vmindex].method();
invoke_target(vmtarget, THREAD);
return 0;
}
int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
interpreterState istate = frame->interpreter_state();
// Pop appendix argument from stack. This is a MemberName which we resolve to the
// target method.
oop vmentry = popFromStack(THREAD);
intptr_t* topOfStack = istate->stack();
// Resolve target method by looking up in the receiver object's vtable.
intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
int numArgs = target->size_of_parameters();
oop recv = STACK_OBJECT(-numArgs);
Klass* clazz = recv->klass();
Klass* klass_part = InstanceKlass::cast(clazz);
klassVtable* vtable = klass_part->vtable();
Method* vmtarget = vtable->method_at(vmindex);
invoke_target(vmtarget, THREAD);
return 0;
}
int MethodHandles::method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS) {
ShouldNotReachHere();
return 0;
}
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm,
vmIntrinsics::ID iid) {
switch (iid) {
case vmIntrinsics::_invokeGeneric:
case vmIntrinsics::_compiledLambdaForm:
// Perhaps surprisingly, the symbolic references visible to Java are not directly used.
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument.
return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
case vmIntrinsics::_invokeBasic:
return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
case vmIntrinsics::_linkToStatic:
case vmIntrinsics::_linkToSpecial:
return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
case vmIntrinsics::_linkToInterface:
return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
case vmIntrinsics::_linkToVirtual:
return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
default:
ShouldNotReachHere();
return NULL;
}
}
......@@ -26,6 +26,14 @@
// Adapters
enum /* platform_dependent_constants */ {
adapter_code_size = 0
adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1)
};
private:
static oop popFromStack(TRAPS);
static void invoke_target(Method* method, TRAPS);
static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS);
......@@ -114,5 +114,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
};
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
#ifndef DONT_USE_REGISTER_DEFINES
#define noreg ((Register)(noreg_RegisterEnumValue))
#endif
#endif // CPU_ZERO_VM_REGISTER_ZERO_HPP
......@@ -77,3 +77,7 @@ void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src,
CodeBuffer* dst) {
ShouldNotCallThis();
}
void metadata_Relocation::pd_fix_value(address x) {
ShouldNotCallThis();
}
......@@ -35,6 +35,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_zero.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
......@@ -47,6 +48,12 @@
#endif
static address zero_null_code_stub() {
address start = ShouldNotCallThisStub();
return start;
}
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed,
......@@ -63,16 +70,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(
AdapterFingerPrint *fingerprint) {
return AdapterHandlerLibrary::new_entry(
fingerprint,
ShouldNotCallThisStub(),
ShouldNotCallThisStub(),
ShouldNotCallThisStub());
CAST_FROM_FN_PTR(address,zero_null_code_stub),
CAST_FROM_FN_PTR(address,zero_null_code_stub),
CAST_FROM_FN_PTR(address,zero_null_code_stub));
}
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
int compile_id,
int total_args_passed,
int max_arg,
BasicType *sig_bt,
VMRegPair *regs,
BasicType ret_type) {
......@@ -96,19 +101,20 @@ uint SharedRuntime::out_preserve_stack_slots() {
ShouldNotCallThis();
}
JRT_LEAF(void, zero_stub())
ShouldNotCallThis();
JRT_END
static RuntimeStub* generate_empty_runtime_stub(const char* name) {
CodeBuffer buffer(name, 0, 0);
return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false);
return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
}
static SafepointBlob* generate_empty_safepoint_blob() {
CodeBuffer buffer("handler_blob", 0, 0);
return SafepointBlob::create(&buffer, NULL, 0);
return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub);
}
static DeoptimizationBlob* generate_empty_deopt_blob() {
CodeBuffer buffer("handler_blob", 0, 0);
return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
}
......@@ -116,7 +122,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = generate_empty_deopt_blob();
}
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
return generate_empty_safepoint_blob();
}
......@@ -124,6 +130,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return generate_empty_runtime_stub("resolve_blob");
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed) {
......
......@@ -342,7 +342,6 @@ BsdAttachOperation* BsdAttachListener::dequeue() {
// get the credentials of the peer and check the effective uid/guid
// - check with jeff on this.
#ifdef _ALLBSD_SOURCE
uid_t puid;
gid_t pgid;
if (::getpeereid(s, &puid, &pgid) != 0) {
......@@ -350,17 +349,6 @@ BsdAttachOperation* BsdAttachListener::dequeue() {
RESTARTABLE(::close(s), res);
continue;
}
#else
struct ucred cred_info;
socklen_t optlen = sizeof(cred_info);
if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
int res;
RESTARTABLE(::close(s), res);
continue;
}
uid_t puid = cred_info.uid;
gid_t pgid = cred_info.gid;
#endif
uid_t euid = geteuid();
gid_t egid = getegid();
......
......@@ -39,18 +39,12 @@
private:
#ifdef _ALLBSD_SOURCE
#ifdef __APPLE__
typedef thread_t thread_id_t;
#else
typedef pthread_t thread_id_t;
#endif
#else
typedef pid_t thread_id_t;
#endif
// _pthread_id is the pthread id, which is used by library calls
// (e.g. pthread_kill).
pthread_t _pthread_id;
......
此差异已折叠。
......@@ -56,19 +56,6 @@ class Bsd {
static int sigflags[MAXSIGNUM];
static int (*_clock_gettime)(clockid_t, struct timespec *);
#ifndef _ALLBSD_SOURCE
static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
static address _initial_thread_stack_bottom;
static uintptr_t _initial_thread_stack_size;
static const char *_glibc_version;
static const char *_libpthread_version;
static bool _is_floating_stack;
static bool _is_NPTL;
static bool _supports_fast_thread_cpu_time;
#endif
static GrowableArray<int>* _cpu_to_node;
......@@ -76,28 +63,14 @@ class Bsd {
static julong _physical_memory;
static pthread_t _main_thread;
#ifndef _ALLBSD_SOURCE
static Mutex* _createThread_lock;
#endif
static int _page_size;
static julong available_memory();
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
#ifndef _ALLBSD_SOURCE
static void set_glibc_version(const char *s) { _glibc_version = s; }
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
#endif
static bool supports_variable_stack_size();
#ifndef _ALLBSD_SOURCE
static void set_is_NPTL() { _is_NPTL = true; }
static void set_is_BsdThreads() { _is_NPTL = false; }
static void set_is_floating_stack() { _is_floating_stack = true; }
#endif
static void rebuild_cpu_to_node_map();
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
......@@ -106,25 +79,10 @@ class Bsd {
public:
static void init_thread_fpu_state();
#ifndef _ALLBSD_SOURCE
static int get_fpu_control_word();
static void set_fpu_control_word(int fpu_control);
#endif
static pthread_t main_thread(void) { return _main_thread; }
#ifndef _ALLBSD_SOURCE
// returns kernel thread id (similar to LWP id on Solaris), which can be
// used to access /proc
static pid_t gettid();
static void set_createThread_lock(Mutex* lk) { _createThread_lock = lk; }
static Mutex* createThread_lock(void) { return _createThread_lock; }
#endif
static void hotspot_sigmask(Thread* thread);
#ifndef _ALLBSD_SOURCE
static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; }
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
#endif
static bool is_initial_thread(void);
static int page_size(void) { return _page_size; }
......@@ -161,23 +119,6 @@ class Bsd {
static struct sigaction *get_chained_signal_action(int sig);
static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
#ifndef _ALLBSD_SOURCE
// GNU libc and libpthread version strings
static const char *glibc_version() { return _glibc_version; }
static const char *libpthread_version() { return _libpthread_version; }
// NPTL or BsdThreads?
static bool is_BsdThreads() { return !_is_NPTL; }
static bool is_NPTL() { return _is_NPTL; }
// NPTL is always floating stack. BsdThreads could be using floating
// stack or fixed stack.
static bool is_floating_stack() { return _is_floating_stack; }
static void libpthread_init();
static bool libnuma_init();
static void* libnuma_dlsym(void* handle, const char* name);
#endif
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
......@@ -186,22 +127,9 @@ class Bsd {
static size_t default_stack_size(os::ThreadType thr_type);
static size_t default_guard_size(os::ThreadType thr_type);
#ifndef _ALLBSD_SOURCE
static void capture_initial_stack(size_t max_size);
// Stack overflow handling
static bool manually_expand_stack(JavaThread * t, address addr);
static int max_register_window_saves_before_flushing();
#endif
// Real-time clock functions
static void clock_init(void);
#ifndef _ALLBSD_SOURCE
// fast POSIX clocks support
static void fast_thread_clock_init(void);
#endif
static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}
......@@ -210,18 +138,6 @@ class Bsd {
return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
}
#ifndef _ALLBSD_SOURCE
static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
}
static bool supports_fast_thread_cpu_time() {
return _supports_fast_thread_cpu_time;
}
static jlong fast_thread_cpu_time(clockid_t clockid);
#endif
// Stack repair handling
// none present
......
......@@ -25,10 +25,6 @@
#ifndef OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
#define OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
#ifndef _ALLBSD_SOURCE
#include <byteswap.h>
#endif
#ifdef __APPLE__
#include <libkern/OSByteOrder.h>
#endif
......
......@@ -76,7 +76,7 @@
# include <ucontext.h>
#endif
#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
#if !defined(__APPLE__) && !defined(__NetBSD__)
# include <pthread_np.h>
#endif
......@@ -489,23 +489,6 @@ JVM_handle_bsd_signal(int sig,
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
#ifndef _ALLBSD_SOURCE
} else {
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
// current thread was created by user code with MAP_GROWSDOWN flag
// and then attached to VM. See notes in os_bsd.cpp.
if (thread->osthread()->expanding_stack() == 0) {
thread->osthread()->set_expanding_stack();
if (os::Bsd::manually_expand_stack(thread, addr)) {
thread->osthread()->clear_expanding_stack();
return 1;
}
thread->osthread()->clear_expanding_stack();
} else {
fatal("recursive segv. expanding stack.");
}
#endif
}
}
}
......@@ -744,61 +727,21 @@ JVM_handle_bsd_signal(int sig,
ShouldNotReachHere();
}
#ifdef _ALLBSD_SOURCE
// From solaris_i486.s ported to bsd_i486.s
extern "C" void fixcw();
#endif
void os::Bsd::init_thread_fpu_state(void) {
#ifndef AMD64
# ifdef _ALLBSD_SOURCE
// Set fpu to 53 bit precision. This happens too early to use a stub.
fixcw();
# else
// set fpu to 53 bit precision
set_fpu_control_word(0x27f);
# endif
#endif // !AMD64
}
#ifndef _ALLBSD_SOURCE
int os::Bsd::get_fpu_control_word(void) {
#ifdef AMD64
return 0;
#else
int fpu_control;
_FPU_GETCW(fpu_control);
return fpu_control & 0xffff;
#endif // AMD64
}
void os::Bsd::set_fpu_control_word(int fpu_control) {
#ifndef AMD64
_FPU_SETCW(fpu_control);
#endif // !AMD64
}
#endif
// Check that the bsd kernel version is 2.4 or higher since earlier
// versions do not support SSE without patches.
bool os::supports_sse() {
#if defined(AMD64) || defined(_ALLBSD_SOURCE)
return true;
#else
struct utsname uts;
if( uname(&uts) != 0 ) return false; // uname fails?
char *minor_string;
int major = strtol(uts.release,&minor_string,10);
int minor = strtol(minor_string+1,NULL,10);
bool result = (major > 2 || (major==2 && minor >= 4));
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
major,minor, result ? "DOES" : "does NOT");
}
#endif
return result;
#endif // AMD64
}
bool os::is_allocatable(size_t bytes) {
......@@ -836,46 +779,7 @@ size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
#endif
#ifdef _ALLBSD_SOURCE
bool os::Bsd::supports_variable_stack_size() { return true; }
#else
// Test if pthread library can support variable thread stack size. BsdThreads
// in fixed stack mode allocates 2M fixed slot for each thread. BsdThreads
// in floating stack mode and NPTL support variable stack size.
bool os::Bsd::supports_variable_stack_size() {
if (os::Bsd::is_NPTL()) {
// NPTL, yes
return true;
} else {
// Note: We can't control default stack size when creating a thread.
// If we use non-default stack size (pthread_attr_setstacksize), both
// floating stack and non-floating stack BsdThreads will return the
// same value. This makes it impossible to implement this function by
// detecting thread stack size directly.
//
// An alternative approach is to check %gs. Fixed-stack BsdThreads
// do not use %gs, so its value is 0. Floating-stack BsdThreads use
// %gs (either as LDT selector or GDT selector, depending on kernel)
// to access thread specific data.
//
// Note that %gs is a reserved glibc register since early 2001, so
// applications are not allowed to change its value (Ulrich Drepper from
// Redhat confirmed that all known offenders have been modified to use
// either %fs or TSD). In the worst case scenario, when VM is embedded in
// a native application that plays with %gs, we might see non-zero %gs
// even BsdThreads is running in fixed stack mode. As the result, we'll
// return true and skip _thread_safety_check(), so we may not be able to
// detect stack-heap collisions. But otherwise it's harmless.
//
#ifdef __GNUC__
return (GET_GS() != 0);
#else
return false;
#endif
}
}
#endif
#endif // AMD64
// return default stack size for thr_type
......@@ -943,7 +847,7 @@ static void current_stack_region(address * bottom, size_t * size) {
*bottom = (address)((char *)ss.ss_sp - ss.ss_size);
*size = ss.ss_size;
#elif defined(_ALLBSD_SOURCE)
#else
pthread_attr_t attr;
int rslt = pthread_attr_init(&attr);
......@@ -963,33 +867,6 @@ static void current_stack_region(address * bottom, size_t * size) {
}
pthread_attr_destroy(&attr);
#else
if (os::Bsd::is_initial_thread()) {
// initial thread needs special handling because pthread_getattr_np()
// may return bogus value.
*bottom = os::Bsd::initial_thread_stack_bottom();
*size = os::Bsd::initial_thread_stack_size();
} else {
pthread_attr_t attr;
int rslt = pthread_getattr_np(pthread_self(), &attr);
// JVM needs to know exact stack location, abort if it fails
if (rslt != 0) {
if (rslt == ENOMEM) {
vm_exit_out_of_memory(0, "pthread_getattr_np");
} else {
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
}
}
if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
fatal("Can not locate current stack attributes!");
}
pthread_attr_destroy(&attr);
}
#endif
assert(os::current_stack_pointer() >= *bottom &&
os::current_stack_pointer() < *bottom + *size, "just checking");
......
......@@ -23,7 +23,7 @@
*
*/
#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
#if !defined(__APPLE__) && !defined(__NetBSD__)
#include <pthread.h>
# include <pthread_np.h> /* For pthread_attr_get_np */
#endif
......@@ -178,26 +178,6 @@ JVM_handle_bsd_signal(int sig,
thread->disable_stack_red_zone();
ShouldNotCallThis();
}
#ifndef _ALLBSD_SOURCE
else {
// Accessing stack address below sp may cause SEGV if
// current thread has MAP_GROWSDOWN stack. This should
// only happen when current thread was created by user
// code with MAP_GROWSDOWN flag and then attached to VM.
// See notes in os_bsd.cpp.
if (thread->osthread()->expanding_stack() == 0) {
thread->osthread()->set_expanding_stack();
if (os::Bsd::manually_expand_stack(thread, addr)) {
thread->osthread()->clear_expanding_stack();
return true;
}
thread->osthread()->clear_expanding_stack();
}
else {
fatal("recursive segv. expanding stack.");
}
}
#endif
}
}
......@@ -266,16 +246,6 @@ void os::Bsd::init_thread_fpu_state(void) {
// Nothing to do
}
#ifndef _ALLBSD_SOURCE
int os::Bsd::get_fpu_control_word() {
ShouldNotCallThis();
}
void os::Bsd::set_fpu_control_word(int fpu) {
ShouldNotCallThis();
}
#endif
bool os::is_allocatable(size_t bytes) {
#ifdef _LP64
return true;
......@@ -339,7 +309,7 @@ static void current_stack_region(address *bottom, size_t *size) {
stack_top = (address) ss.ss_sp;
stack_bytes = ss.ss_size;
stack_bottom = stack_top - stack_bytes;
#elif defined(_ALLBSD_SOURCE)
#else
pthread_attr_t attr;
int rslt = pthread_attr_init(&attr);
......@@ -362,67 +332,6 @@ static void current_stack_region(address *bottom, size_t *size) {
pthread_attr_destroy(&attr);
stack_top = stack_bottom + stack_bytes;
#else /* Linux */
pthread_attr_t attr;
int res = pthread_getattr_np(pthread_self(), &attr);
if (res != 0) {
if (res == ENOMEM) {
vm_exit_out_of_memory(0, "pthread_getattr_np");
}
else {
fatal(err_msg("pthread_getattr_np failed with errno = " INT32_FORMAT,
res));
}
}
res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
if (res != 0) {
fatal(err_msg("pthread_attr_getstack failed with errno = " INT32_FORMAT,
res));
}
stack_top = stack_bottom + stack_bytes;
// The block of memory returned by pthread_attr_getstack() includes
// guard pages where present. We need to trim these off.
size_t page_bytes = os::Bsd::page_size();
assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
size_t guard_bytes;
res = pthread_attr_getguardsize(&attr, &guard_bytes);
if (res != 0) {
fatal(err_msg(
"pthread_attr_getguardsize failed with errno = " INT32_FORMAT, res));
}
int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
#ifdef IA64
// IA64 has two stacks sharing the same area of memory, a normal
// stack growing downwards and a register stack growing upwards.
// Guard pages, if present, are in the centre. This code splits
// the stack in two even without guard pages, though in theory
// there's nothing to stop us allocating more to the normal stack
// or more to the register stack if one or the other were found
// to grow faster.
int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
#endif // IA64
stack_bottom += guard_bytes;
pthread_attr_destroy(&attr);
// The initial thread has a growable stack, and the size reported
// by pthread_attr_getstack is the maximum size it could possibly
// be given what currently mapped. This can be huge, so we cap it.
if (os::Bsd::is_initial_thread()) {
stack_bytes = stack_top - stack_bottom;
if (stack_bytes > JavaThread::stack_size_at_create())
stack_bytes = JavaThread::stack_size_at_create();
stack_bottom = stack_top - stack_bytes;
}
#endif
assert(os::current_stack_pointer() >= stack_bottom, "should do");
......
......@@ -85,9 +85,11 @@ void end_of_file() { }
#include "dlfcn.h"
#define DECODE_INSTRUCTIONS_NAME "decode_instructions_virtual"
#define DECODE_INSTRUCTIONS_VIRTUAL_NAME "decode_instructions_virtual"
#define DECODE_INSTRUCTIONS_NAME "decode_instructions"
#define HSDIS_NAME "hsdis"
static void* decode_instructions_pv = 0;
static void* decode_instructions_sv = 0;
static const char* hsdis_path[] = {
HSDIS_NAME"-"LIBARCH LIB_EXT,
"./" HSDIS_NAME"-"LIBARCH LIB_EXT,
......@@ -101,11 +103,12 @@ static const char* load_decode_instructions() {
void* dllib = NULL;
const char* *next_in_path = hsdis_path;
while (1) {
decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
if (decode_instructions_pv != NULL)
decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
decode_instructions_sv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME);
if (decode_instructions_pv != NULL || decode_instructions_sv != NULL)
return NULL;
if (dllib != NULL)
return "plugin does not defined "DECODE_INSTRUCTIONS_NAME;
return "plugin does not defined "DECODE_INSTRUCTIONS_VIRTUAL_NAME" and "DECODE_INSTRUCTIONS_NAME;
for (dllib = NULL; dllib == NULL; ) {
const char* next_lib = (*next_in_path++);
if (next_lib == NULL)
......@@ -213,20 +216,44 @@ void disassemble(uintptr_t from, uintptr_t to) {
printf("%s: %s\n", err, dlerror());
exit(1);
}
printf("Decoding from %p to %p...\n", from, to);
decode_instructions_ftype decode_instructions
= (decode_instructions_ftype) decode_instructions_pv;
decode_func_vtype decode_instructions_v
= (decode_func_vtype) decode_instructions_pv;
decode_func_stype decode_instructions_s
= (decode_func_stype) decode_instructions_sv;
void* res;
if (raw && xml) {
res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
} else if (raw) {
res = (*decode_instructions)(from, to, (unsigned char*)from, to - from, simple_handle_event, stdout, NULL, stdout, options);
} else {
res = (*decode_instructions)(from, to, (unsigned char*)from, to - from,
handle_event, (void*) event_cookie,
fprintf_callback, stdout,
options);
if (decode_instructions_pv != NULL) {
printf("\nDecoding from %p to %p...with %s\n", from, to, DECODE_INSTRUCTIONS_VIRTUAL_NAME);
if (raw) {
res = (*decode_instructions_v)(from, to,
(unsigned char*)from, to - from,
simple_handle_event, stdout,
NULL, stdout,
options, 0);
} else {
res = (*decode_instructions_v)(from, to,
(unsigned char*)from, to - from,
handle_event, (void*) event_cookie,
fprintf_callback, stdout,
options, 0);
}
if (res != (void*)to)
printf("*** Result was %p!\n", res);
}
void* sres;
if (decode_instructions_sv != NULL) {
printf("\nDecoding from %p to %p...with old decode_instructions\n", from, to, DECODE_INSTRUCTIONS_NAME);
if (raw) {
sres = (*decode_instructions_s)(from, to,
simple_handle_event, stdout,
NULL, stdout,
options);
} else {
sres = (*decode_instructions_s)(from, to,
handle_event, (void*) event_cookie,
fprintf_callback, stdout,
options);
}
if (sres != (void *)to)
printf("*** Result of decode_instructions %p!\n", sres);
}
if (res != (void*)to)
printf("*** Result was %p!\n", res);
}
......@@ -99,7 +99,7 @@ decode_instructions_virtual(uintptr_t start_va, uintptr_t end_va,
unsigned char* buffer, uintptr_t length,
event_callback_t event_callback_arg, void* event_stream_arg,
printf_callback_t printf_callback_arg, void* printf_stream_arg,
const char* options) {
const char* options, int newline) {
struct hsdis_app_data app_data;
memset(&app_data, 0, sizeof(app_data));
app_data.start_va = start_va;
......@@ -110,7 +110,7 @@ decode_instructions_virtual(uintptr_t start_va, uintptr_t end_va,
app_data.event_stream = event_stream_arg;
app_data.printf_callback = printf_callback_arg;
app_data.printf_stream = printf_stream_arg;
app_data.do_newline = false;
app_data.do_newline = newline == 0 ? false : true;
return decode(&app_data, options);
}
......@@ -132,7 +132,7 @@ decode_instructions(void* start_pv, void* end_pv,
event_stream_arg,
printf_callback_arg,
printf_stream_arg,
options);
options, false);
}
static void* decode(struct hsdis_app_data* app_data, const char* options) {
......@@ -173,7 +173,7 @@ static void* decode(struct hsdis_app_data* app_data, const char* options) {
if (!app_data->losing) {
const char* insn_close = format_insn_close("/insn", &app_data->dinfo,
buf, sizeof(buf));
(*event_callback)(event_stream, insn_close, (void*) p) != NULL;
(*event_callback)(event_stream, insn_close, (void*) p);
if (app_data->do_newline) {
/* follow each complete insn by a nice newline */
......@@ -182,13 +182,14 @@ static void* decode(struct hsdis_app_data* app_data, const char* options) {
}
}
(*event_callback)(event_stream, "/insns", (void*) p);
if (app_data->losing) (*event_callback)(event_stream, "/insns", (void*) p);
return (void*) p;
}
}
/* take the address of the function, for luck, and also test the typedef: */
const decode_instructions_ftype decode_instructions_address = &decode_instructions_virtual;
const decode_func_vtype decode_func_virtual_address = &decode_instructions_virtual;
const decode_func_stype decode_func_address = &decode_instructions;
static const char* format_insn_close(const char* close,
disassemble_info* dinfo,
......
......@@ -47,6 +47,9 @@
where tag is a simple identifier, signifying (as in XML) a element start,
element end, and standalone element. (To render as XML, add angle brackets.)
*/
#ifndef SHARED_TOOLS_HSDIS_H
#define SHARED_TOOLS_HSDIS_H
extern
#ifdef DLL_EXPORT
DLL_EXPORT
......@@ -57,16 +60,37 @@ void* decode_instructions_virtual(uintptr_t start_va, uintptr_t end_va,
void* event_stream,
int (*printf_callback)(void*, const char*, ...),
void* printf_stream,
const char* options);
const char* options,
int newline /* bool value for nice new line */);
/* This is the compatability interface for older versions of hotspot */
extern
#ifdef DLL_ENTRY
DLL_ENTRY
#endif
void* decode_instructions(void* start_pv, void* end_pv,
void* (*event_callback)(void*, const char*, void*),
void* event_stream,
int (*printf_callback)(void*, const char*, ...),
void* printf_stream,
const char* options);
/* convenience typedefs */
typedef void* (*decode_instructions_event_callback_ftype) (void*, const char*, void*);
typedef int (*decode_instructions_printf_callback_ftype) (void*, const char*, ...);
typedef void* (*decode_instructions_ftype) (uintptr_t start_va, uintptr_t end_va,
unsigned char* buffer, uintptr_t length,
decode_instructions_event_callback_ftype event_callback,
void* event_stream,
decode_instructions_printf_callback_ftype printf_callback,
void* printf_stream,
const char* options);
typedef void* (*decode_func_vtype) (uintptr_t start_va, uintptr_t end_va,
unsigned char* buffer, uintptr_t length,
decode_instructions_event_callback_ftype event_callback,
void* event_stream,
decode_instructions_printf_callback_ftype printf_callback,
void* printf_stream,
const char* options,
int newline);
typedef void* (*decode_func_stype) (void* start_pv, void* end_pv,
decode_instructions_event_callback_ftype event_callback,
void* event_stream,
decode_instructions_printf_callback_ftype printf_callback,
void* printf_stream,
const char* options);
#endif /* SHARED_TOOLS_HSDIS_H */
......@@ -758,7 +758,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
}
}
if (dest->blob() == NULL) {
if (dest->blob() == NULL && dest_filled != NULL) {
// Destination is a final resting place, not just another buffer.
// Normalize uninitialized bytes in the final padding.
Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -151,6 +151,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
ClassLoaderData* loader_data,
Handle protection_domain,
Symbol* class_name,
bool* has_default_methods,
TRAPS);
void record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS);
......@@ -188,6 +189,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<AnnotationArray*>** methods_annotations,
Array<AnnotationArray*>** methods_parameter_annotations,
Array<AnnotationArray*>** methods_default_annotations,
bool* has_default_method,
TRAPS);
Array<int>* sort_methods(ClassLoaderData* loader_data,
Array<Method*>* methods,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -137,6 +137,7 @@ class SymbolPropertyTable;
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
......
......@@ -555,9 +555,10 @@ void ClassVerifier::verify_class(TRAPS) {
if (was_recursively_verified()) return;
Method* m = methods->at(index);
if (m->is_native() || m->is_abstract()) {
if (m->is_native() || m->is_abstract() || m->is_overpass()) {
// If m is native or abstract, skip it. It is checked in class file
// parser that methods do not override a final method.
// parser that methods do not override a final method. Overpass methods
// are trusted since the VM generates them.
continue;
}
verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
......@@ -2304,11 +2305,21 @@ void ClassVerifier::verify_invoke_instructions(
// Make sure the constant pool item is the right type
u2 index = bcs->get_index_u2();
Bytecodes::Code opcode = bcs->raw_code();
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
? 1 << JVM_CONSTANT_InvokeDynamic
: 1 << JVM_CONSTANT_Methodref);
unsigned int types;
switch (opcode) {
case Bytecodes::_invokeinterface:
types = 1 << JVM_CONSTANT_InterfaceMethodref;
break;
case Bytecodes::_invokedynamic:
types = 1 << JVM_CONSTANT_InvokeDynamic;
break;
case Bytecodes::_invokespecial:
types = (1 << JVM_CONSTANT_InterfaceMethodref) |
(1 << JVM_CONSTANT_Methodref);
break;
default:
types = 1 << JVM_CONSTANT_Methodref;
}
verify_cp_type(bcs->bci(), index, cp, types, CHECK_VERIFY(this));
// Get method name and signature
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册