提交 af12d942 编写于 作者: A amurillo

Merge

上级 b0c5d2ff 4023b172
master
Tags不可用
无相关合并请求
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=20
HS_BUILD_NUMBER=03
HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -88,6 +88,7 @@ class Assembler : public AbstractAssembler {
orncc_op3 = 0x16,
xnorcc_op3 = 0x17,
addccc_op3 = 0x18,
aes4_op3 = 0x19,
umulcc_op3 = 0x1a,
smulcc_op3 = 0x1b,
subccc_op3 = 0x1c,
......@@ -121,6 +122,8 @@ class Assembler : public AbstractAssembler {
fpop1_op3 = 0x34,
fpop2_op3 = 0x35,
impdep1_op3 = 0x36,
aes3_op3 = 0x36,
flog3_op3 = 0x36,
impdep2_op3 = 0x37,
jmpl_op3 = 0x38,
rett_op3 = 0x39,
......@@ -172,41 +175,56 @@ class Assembler : public AbstractAssembler {
enum opfs {
// selected opfs
fmovs_opf = 0x01,
fmovd_opf = 0x02,
fnegs_opf = 0x05,
fnegd_opf = 0x06,
fadds_opf = 0x41,
faddd_opf = 0x42,
fsubs_opf = 0x45,
fsubd_opf = 0x46,
fmuls_opf = 0x49,
fmuld_opf = 0x4a,
fdivs_opf = 0x4d,
fdivd_opf = 0x4e,
fcmps_opf = 0x51,
fcmpd_opf = 0x52,
fstox_opf = 0x81,
fdtox_opf = 0x82,
fxtos_opf = 0x84,
fxtod_opf = 0x88,
fitos_opf = 0xc4,
fdtos_opf = 0xc6,
fitod_opf = 0xc8,
fstod_opf = 0xc9,
fstoi_opf = 0xd1,
fdtoi_opf = 0xd2,
mdtox_opf = 0x110,
mstouw_opf = 0x111,
mstosw_opf = 0x113,
mxtod_opf = 0x118,
mwtos_opf = 0x119
fmovs_opf = 0x01,
fmovd_opf = 0x02,
fnegs_opf = 0x05,
fnegd_opf = 0x06,
fadds_opf = 0x41,
faddd_opf = 0x42,
fsubs_opf = 0x45,
fsubd_opf = 0x46,
fmuls_opf = 0x49,
fmuld_opf = 0x4a,
fdivs_opf = 0x4d,
fdivd_opf = 0x4e,
fcmps_opf = 0x51,
fcmpd_opf = 0x52,
fstox_opf = 0x81,
fdtox_opf = 0x82,
fxtos_opf = 0x84,
fxtod_opf = 0x88,
fitos_opf = 0xc4,
fdtos_opf = 0xc6,
fitod_opf = 0xc8,
fstod_opf = 0xc9,
fstoi_opf = 0xd1,
fdtoi_opf = 0xd2,
mdtox_opf = 0x110,
mstouw_opf = 0x111,
mstosw_opf = 0x113,
mxtod_opf = 0x118,
mwtos_opf = 0x119,
aes_kexpand0_opf = 0x130,
aes_kexpand2_opf = 0x131
};
enum op5s {
aes_eround01_op5 = 0x00,
aes_eround23_op5 = 0x01,
aes_dround01_op5 = 0x02,
aes_dround23_op5 = 0x03,
aes_eround01_l_op5 = 0x04,
aes_eround23_l_op5 = 0x05,
aes_dround01_l_op5 = 0x06,
aes_dround23_l_op5 = 0x07,
aes_kexpand1_op5 = 0x08
};
enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez };
......@@ -427,6 +445,7 @@ class Assembler : public AbstractAssembler {
static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
static int opf_low6( int w) { return u_field(w, 10, 5); }
static int opf_low5( int w) { return u_field(w, 9, 5); }
static int op5( int x) { return u_field(x, 8, 5); }
static int trapcc( CC cc) { return u_field(cc, 12, 11); }
static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
static int opf( int x) { return u_field(x, 13, 5); }
......@@ -451,6 +470,7 @@ class Assembler : public AbstractAssembler {
static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
static int fs3(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13, 9); };
// some float instructions use this encoding on the op3 field
static int alt_op3(int op, FloatRegisterImpl::Width w) {
......@@ -559,6 +579,12 @@ class Assembler : public AbstractAssembler {
return x & ((1 << 10) - 1);
}
// AES crypto instructions supported only on certain processors
static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
// instruction only in VIS1
static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
// instruction only in VIS3
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
......@@ -682,6 +708,24 @@ public:
void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// 4-operand AES instructions
void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
// 3-operand AES instructions
void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
// pp 136
inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
......@@ -784,6 +828,10 @@ public:
void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
// FXORs/FXORd instructions
void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
// pp 164
void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
......
......@@ -1848,6 +1848,12 @@ const bool Matcher::misaligned_vectors_ok() {
return false;
}
// Current (2013) SPARC platforms need to read original key
// to construct decryption expanded key
const bool Matcher::pass_original_key_for_aes() {
return true;
}
// USII supports fxtof through the whole range of number, USIII doesn't
const bool Matcher::convL2FSupported(void) {
return VM_Version::has_fast_fxtof();
......
......@@ -234,7 +234,7 @@ void VM_Version::initialize() {
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
char buf[512];
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", vis1" : ""),
......@@ -242,6 +242,7 @@ void VM_Version::initialize() {
(has_vis3() ? ", vis3" : ""),
(has_blk_init() ? ", blk_init" : ""),
(has_cbcond() ? ", cbcond" : ""),
(has_aes() ? ", aes" : ""),
(is_ultra3() ? ", ultra3" : ""),
(is_sun4v() ? ", sun4v" : ""),
(is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
......@@ -265,6 +266,41 @@ void VM_Version::initialize() {
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
// T2 and above should have support for AES instructions
if (has_aes()) {
if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
if (FLAG_IS_DEFAULT(UseAES)) {
FLAG_SET_DEFAULT(UseAES, true);
}
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
// we disable both the AES flags if either of them is disabled on the command line
if (!UseAES || !UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAES, false);
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
} else {
if (UseAES || UseAESIntrinsics) {
warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
if (UseAES) {
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
}
} else if (UseAES || UseAESIntrinsics) {
warning("AES instructions are not available on this CPU");
if (UseAES) {
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
......
......@@ -48,7 +48,8 @@ protected:
sparc64_family = 14,
M_family = 15,
T_family = 16,
T1_model = 17
T1_model = 17,
aes_instructions = 18
};
enum Feature_Flag_Set {
......@@ -73,6 +74,7 @@ protected:
M_family_m = 1 << M_family,
T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model,
aes_instructions_m = 1 << aes_instructions,
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m,
......@@ -123,6 +125,7 @@ public:
static bool has_vis3() { return (_features & vis3_instructions_m) != 0; }
static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; }
static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; }
static bool has_aes() { return (_features & aes_instructions_m) != 0; }
static bool supports_compare_and_exchange()
{ return has_v9(); }
......
......@@ -2403,6 +2403,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// rax - input length
//
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
......@@ -2483,7 +2486,7 @@ class StubGenerator: public StubCodeGenerator {
__ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object
handleSOERegisters(false /*restoring*/);
__ movl(rax, 0); // return 0 (why?)
__ movptr(rax, len_param); // return length
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......@@ -2557,6 +2560,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// rax - input length
//
address generate_cipherBlockChaining_decryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
......@@ -2650,7 +2656,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(rvec , rvec_param); // restore this since used in loop
__ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object
handleSOERegisters(false /*restoring*/);
__ movl(rax, 0); // return 0 (why?)
__ movptr(rax, len_param); // return length
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......
......@@ -3217,6 +3217,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// rax - input length
//
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
......@@ -3232,7 +3235,7 @@ class StubGenerator: public StubCodeGenerator {
#ifndef _WIN64
const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
#else
const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Register len_reg = r10; // pick the first volatile windows register
#endif
const Register pos = rax;
......@@ -3259,6 +3262,8 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(xmm_save(i), as_XMMRegister(i));
}
#else
__ push(len_reg); // Save
#endif
const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front
......@@ -3301,8 +3306,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(as_XMMRegister(i), xmm_save(i));
}
__ movl(rax, len_mem);
#else
__ pop(rax); // return length
#endif
__ movl(rax, 0); // return 0 (why?)
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......@@ -3409,6 +3416,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// rax - input length
//
address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
assert(UseAES, "need AES instructions and misaligned SSE support");
......@@ -3427,7 +3437,7 @@ class StubGenerator: public StubCodeGenerator {
#ifndef _WIN64
const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
#else
const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Register len_reg = r10; // pick the first volatile windows register
#endif
const Register pos = rax;
......@@ -3448,7 +3458,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(xmm_save(i), as_XMMRegister(i));
}
#else
__ push(len_reg); // Save
#endif
// the java expanded key ordering is rotated one position from what we want
// so we start from 0x10 here and hit 0x00 last
const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
......@@ -3554,8 +3567,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(as_XMMRegister(i), xmm_save(i));
}
__ movl(rax, len_mem);
#else
__ pop(rax); // return length
#endif
__ movl(rax, 0); // return 0 (why?)
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......
......@@ -581,6 +581,12 @@ const bool Matcher::misaligned_vectors_ok() {
return !AlignVector; // can be changed by flag
}
// x86 AES instructions are compatible with SunJCE expanded
// keys, hence we do not need to pass the original key to stubs
const bool Matcher::pass_original_key_for_aes() {
return false;
}
// Helper methods for MachSpillCopyNode::implementation().
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st) {
......
......@@ -119,6 +119,11 @@ int VM_Version::platform_features(int features) {
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
......
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -520,6 +520,13 @@ void ClassLoaderData::verify() {
}
}
bool ClassLoaderData::contains_klass(Klass* klass) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k == klass) return true;
}
return false;
}
// GC root of class loader data created.
ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
......@@ -648,12 +655,12 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
return array;
}
#ifndef PRODUCT
// for debugging and hsfind(x)
bool ClassLoaderDataGraph::contains(address x) {
// I think we need the _metaspace_lock taken here because the class loader
// data graph could be changing while we are walking it (new entries added,
// new entries being unloaded, etc).
// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This
// is done lock free to avoid lock inversion problems. It is safe because
// new ClassLoaderData are added to the end of the CLDG, and only removed at
// safepoint. The _unloading list can be deallocated concurrently with CMS so
// this doesn't look in metaspace for classes that have been unloaded.
bool ClassLoaderDataGraph::contains(const void* x) {
if (DumpSharedSpaces) {
// There are only two metaspaces to worry about.
ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
......@@ -670,16 +677,11 @@ bool ClassLoaderDataGraph::contains(address x) {
}
}
// Could also be on an unloading list which is okay, ie. still allocated
// for a little while.
for (ClassLoaderData* ucld = _unloading; ucld != NULL; ucld = ucld->next()) {
if (ucld->metaspace_or_null() != NULL && ucld->metaspace_or_null()->contains(x)) {
return true;
}
}
// Do not check unloading list because deallocation can be concurrent.
return false;
}
#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
if (loader_data == data) {
......
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -90,9 +90,9 @@ class ClassLoaderDataGraph : public AllStatic {
static void dump() { dump_on(tty); }
static void verify();
#ifndef PRODUCT
// expensive test for pointer in metaspace for debugging
static bool contains(address x);
static bool contains(const void* x);
#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
......@@ -260,6 +260,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
jobject add_handle(Handle h);
void add_class(Klass* k);
void remove_class(Klass* k);
bool contains_klass(Klass* k);
void record_dependency(Klass* to, TRAPS);
void init_dependencies(TRAPS);
......
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -707,7 +707,7 @@ void Dictionary::verify() {
loader_data->class_loader() == NULL ||
loader_data->class_loader()->is_instance(),
"checking type of class_loader");
e->verify(/*check_dictionary*/false);
e->verify();
probe->verify_protection_domain_set();
element_count++;
}
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -2650,23 +2650,6 @@ void SystemDictionary::verify() {
constraints()->verify(dictionary(), placeholders());
}
void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
ClassLoaderData* loader_data) {
GCMutexLocker mu(SystemDictionary_lock);
Symbol* name;
Klass* probe = find_class(class_name, loader_data);
if (probe == NULL) {
probe = SystemDictionary::find_shared_class(class_name);
if (probe == NULL) {
name = find_placeholder(class_name, loader_data);
}
}
guarantee(probe != NULL || name != NULL,
"Loaded klasses should be in SystemDictionary");
}
// utility function for class load event
void SystemDictionary::post_class_load_event(const Ticks& start_time,
instanceKlassHandle k,
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -375,10 +375,6 @@ public:
static bool is_internal_format(Symbol* class_name);
#endif
// Verify class is in dictionary
static void verify_obj_klass_present(Symbol* class_name,
ClassLoaderData* loader_data);
// Initialization
static void initialize(TRAPS);
......
......@@ -787,7 +787,7 @@
do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
do_name( encrypt_name, "encrypt") \
do_name( decrypt_name, "decrypt") \
do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \
do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \
\
/* support for java.util.zip */ \
do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \
......
......@@ -655,8 +655,6 @@ inline Metadata* Dependencies::DepStream::recorded_metadata_at(int i) {
} else {
o = _deps->oop_recorder()->metadata_at(i);
}
assert(o == NULL || o->is_metaspace_object(),
err_msg("Should be metadata " PTR_FORMAT, o));
return o;
}
......
......@@ -55,6 +55,9 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
const int chunk_factor = 32;
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
// There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
return NULL;
......@@ -62,12 +65,6 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
Forte::register_stub("vtable stub", _chunk, _chunk_end);
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released
// all locks.
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
}
align_chunk();
}
assert(_chunk + real_size <= _chunk_end, "bad allocation");
......@@ -130,6 +127,13 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
Disassembler::decode(s->code_begin(), s->code_end());
}
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released
// all locks.
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
s->code_begin(), s->code_end());
}
}
return s->entry_point();
}
......@@ -195,6 +199,14 @@ void vtableStubs_init() {
VtableStubs::initialize();
}
void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
for (int i = 0; i < N; i++) {
for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
f(s);
}
}
}
//-----------------------------------------------------------------------------------------------------
// Non-product code
......
......@@ -131,6 +131,7 @@ class VtableStubs : AllStatic {
static VtableStub* stub_containing(address pc); // stub containing pc or NULL
static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
static void initialize();
static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
};
#endif // SHARE_VM_CODE_VTABLESTUBS_HPP
......@@ -564,16 +564,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
}
}
// 5. check if method is concrete
if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
Method::name_and_sig_as_C_string(resolved_klass(),
method_name,
method_signature));
}
// 6. access checks, access checking may be turned off when calling from within the VM.
// 5. access checks, access checking may be turned off when calling from within the VM.
if (check_access) {
assert(current_klass.not_null() , "current_klass should not be null");
......
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -250,8 +250,8 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
// We will reverse the bytecode rewriting _after_ adjusting them.
// Adjust the cache index by offset to the invokedynamic entries in the
// cpCache plus the delta if the invokedynamic bytecodes were adjusted.
cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
int adjustment = cp_cache_delta() + _first_iteration_cp_cache_limit;
int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index - adjustment);
assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
// zero out 4 bytes
Bytes::put_Java_u4(p, 0);
......@@ -453,18 +453,7 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
return method;
}
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
ResourceMark rm(THREAD);
Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
// (That's all, folks.)
}
Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
: _klass(klass),
_pool(cpool),
_methods(methods)
{
void Rewriter::rewrite_bytecodes(TRAPS) {
assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
// determine index maps for Method* rewriting
......@@ -508,6 +497,29 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
// May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
// entries had to be added.
patch_invokedynamic_bytecodes();
}
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
ResourceMark rm(THREAD);
Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
// (That's all, folks.)
}
Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
: _klass(klass),
_pool(cpool),
_methods(methods)
{
// Rewrite bytecodes - exception here exits.
rewrite_bytecodes(CHECK);
// Stress restoring bytecodes
if (StressRewriter) {
restore_bytecodes();
rewrite_bytecodes(CHECK);
}
// allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache(THREAD);
......@@ -523,6 +535,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
// so methods with jsrs in custom class lists in aren't attempted to be
// rewritten in the RO section of the shared archive.
// Relocated bytecodes don't have to be restored, only the cp cache entries
int len = _methods->length();
for (int i = len-1; i >= 0; i--) {
methodHandle m(THREAD, _methods->at(i));
......
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -199,6 +199,9 @@ class Rewriter: public StackObj {
void patch_invokedynamic_bytecodes();
// Do all the work.
void rewrite_bytecodes(TRAPS);
// Revert bytecodes in case of an exception.
void restore_bytecodes();
......
......@@ -71,9 +71,8 @@ bool MetaspaceObj::is_shared() const {
return MetaspaceShared::is_in_shared_space(this);
}
bool MetaspaceObj::is_metaspace_object() const {
return Metaspace::contains((void*)this);
return ClassLoaderDataGraph::contains((void*)this);
}
void MetaspaceObj::print_address_on(outputStream* st) const {
......
......@@ -264,7 +264,7 @@ class ClassLoaderData;
class MetaspaceObj {
public:
bool is_metaspace_object() const; // more specific test but slower
bool is_metaspace_object() const;
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
......
......@@ -143,6 +143,8 @@ class Metachunk : public Metabase<Metachunk> {
void set_is_tagged_free(bool v) { _is_tagged_free = v; }
#endif
bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
NOT_PRODUCT(void mangle();)
void print_on(outputStream* st) const;
......
......@@ -513,8 +513,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
// Unlink empty VirtualSpaceNodes and free it.
void purge(ChunkManager* chunk_manager);
bool contains(const void *ptr);
void print_on(outputStream* st) const;
class VirtualSpaceListIterator : public StackObj {
......@@ -558,7 +556,7 @@ class SpaceManager : public CHeapObj<mtClass> {
private:
// protects allocations and contains.
// protects allocations
Mutex* const _lock;
// Type of metadata allocated.
......@@ -595,7 +593,11 @@ class SpaceManager : public CHeapObj<mtClass> {
private:
// Accessors
Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
_chunks_in_use[index] = v;
}
BlockFreelist* block_freelists() const {
return (BlockFreelist*) &_block_freelists;
......@@ -708,6 +710,8 @@ class SpaceManager : public CHeapObj<mtClass> {
void print_on(outputStream* st) const;
void locked_print_chunks_in_use_on(outputStream* st) const;
bool contains(const void *ptr);
void verify();
void verify_chunk_size(Metachunk* chunk);
NOT_PRODUCT(void mangle_freed_chunks();)
......@@ -1159,8 +1163,6 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
} else {
assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
link_vs(new_entry);
return true;
}
......@@ -1287,19 +1289,6 @@ void VirtualSpaceList::print_on(outputStream* st) const {
}
}
bool VirtualSpaceList::contains(const void *ptr) {
VirtualSpaceNode* list = virtual_space_list();
VirtualSpaceListIterator iter(list);
while (iter.repeat()) {
VirtualSpaceNode* node = iter.get_next();
if (node->reserved()->contains(ptr)) {
return true;
}
}
return false;
}
// MetaspaceGC methods
// VM_CollectForMetadataAllocation is the vm operation used to GC.
......@@ -2392,6 +2381,21 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
return result;
}
// This function looks at the chunks in the metaspace without locking.
// The chunks are added with store ordering and not deleted except for at
// unloading time.
bool SpaceManager::contains(const void *ptr) {
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
{
Metachunk* curr = chunks_in_use(i);
while (curr != NULL) {
if (curr->contains(ptr)) return true;
curr = curr->next();
}
}
return false;
}
void SpaceManager::verify() {
// If there are blocks in the dictionary, then
// verfication of chunks does not work since
......@@ -3463,17 +3467,12 @@ void Metaspace::print_on(outputStream* out) const {
}
}
bool Metaspace::contains(const void * ptr) {
if (MetaspaceShared::is_in_shared_space(ptr)) {
return true;
bool Metaspace::contains(const void* ptr) {
if (vsm()->contains(ptr)) return true;
if (using_class_space()) {
return class_vsm()->contains(ptr);
}
// This is checked while unlocked. As long as the virtualspaces are added
// at the end, the pointer will be in one of them. The virtual spaces
// aren't deleted presently. When they are, some sort of locking might
// be needed. Note, locking this can cause inversion problems with the
// caller in MetaspaceObj::is_metadata() function.
return space_list()->contains(ptr) ||
(using_class_space() && class_space_list()->contains(ptr));
return false;
}
void Metaspace::verify() {
......
......@@ -225,7 +225,7 @@ class Metaspace : public CHeapObj<mtClass> {
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
static bool contains(const void *ptr);
bool contains(const void* ptr);
void dump(outputStream* const out) const;
// Free empty virtualspaces
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -214,8 +214,8 @@ void ArrayKlass::oop_print_on(oop obj, outputStream* st) {
// Verification
void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
Klass::verify_on(st, check_dictionary);
void ArrayKlass::verify_on(outputStream* st) {
Klass::verify_on(st);
if (component_mirror() != NULL) {
guarantee(component_mirror()->klass() != NULL, "should have a class");
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -146,7 +146,7 @@ class ArrayKlass: public Klass {
void oop_print_on(oop obj, outputStream* st);
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
......
......@@ -82,6 +82,9 @@ ConstantPool::ConstantPool(Array<u1>* tags) {
void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, cache());
set_cache(NULL);
MetadataFactory::free_array<u2>(loader_data, reference_map());
set_reference_map(NULL);
MetadataFactory::free_array<jushort>(loader_data, operands());
set_operands(NULL);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -3180,7 +3180,7 @@ class VerifyFieldClosure: public OopClosure {
virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
void InstanceKlass::verify_on(outputStream* st) {
#ifndef PRODUCT
// Avoid redundant verifies, this really should be in product.
if (_verify_count == Universe::verify_count()) return;
......@@ -3188,14 +3188,11 @@ void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
#endif
// Verify Klass
Klass::verify_on(st, check_dictionary);
Klass::verify_on(st);
// Verify that klass is present in SystemDictionary if not already
// verifying the SystemDictionary.
if (is_loaded() && !is_anonymous() && check_dictionary) {
Symbol* h_name = name();
SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
}
// Verify that klass is present in ClassLoaderData
guarantee(class_loader_data()->contains_klass(this),
"this class isn't found in class loader data");
// Verify vtables
if (is_linked()) {
......
......@@ -1086,7 +1086,7 @@ public:
const char* internal_name() const;
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -376,8 +376,6 @@ void Klass::append_to_sibling_list() {
}
bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
#ifdef ASSERT
// The class is alive iff the class loader is alive.
oop loader = class_loader();
......@@ -640,7 +638,7 @@ void Klass::collect_statistics(KlassSizeStats *sz) const {
// Verification
void Klass::verify_on(outputStream* st, bool check_dictionary) {
void Klass::verify_on(outputStream* st) {
// This can be expensive, but it is worth checking that this klass is actually
// in the CLD graph but not in production.
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -695,8 +695,8 @@ class Klass : public Metadata {
virtual const char* internal_name() const = 0;
// Verification
virtual void verify_on(outputStream* st, bool check_dictionary);
void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
virtual void verify_on(outputStream* st);
void verify() { verify_on(tty); }
#ifndef PRODUCT
bool verify_vtable_index(int index);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -674,8 +674,8 @@ const char* ObjArrayKlass::internal_name() const {
// Verification
void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
ArrayKlass::verify_on(st, check_dictionary);
void ObjArrayKlass::verify_on(outputStream* st) {
ArrayKlass::verify_on(st);
guarantee(element_klass()->is_klass(), "should be klass");
guarantee(bottom_klass()->is_klass(), "should be klass");
Klass* bk = bottom_klass();
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -151,7 +151,7 @@ class ObjArrayKlass : public ArrayKlass {
const char* internal_name() const;
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
......
......@@ -63,34 +63,14 @@ InlineTree::InlineTree(Compile* c,
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
if (UseOldInlining) {
// Update hierarchical counts, count_inline_bcs() and count_inlines()
InlineTree *caller = (InlineTree *)caller_tree;
for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
caller->_count_inline_bcs += count_inline_bcs();
NOT_PRODUCT(caller->_count_inlines++;)
}
// Update hierarchical counts, count_inline_bcs() and count_inlines()
InlineTree *caller = (InlineTree *)caller_tree;
for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
caller->_count_inline_bcs += count_inline_bcs();
NOT_PRODUCT(caller->_count_inlines++;)
}
}
InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
float site_invoke_ratio, int max_inline_level) :
C(c),
_caller_jvms(caller_jvms),
_caller_tree(NULL),
_method(callee_method),
_site_invoke_ratio(site_invoke_ratio),
_max_inline_level(max_inline_level),
_count_inline_bcs(method()->code_size()),
_msg(NULL)
{
#ifndef PRODUCT
_count_inlines = 0;
_forced_inline = false;
#endif
assert(!UseOldInlining, "do not use for old stuff");
}
/**
* Return true when EA is ON and a java constructor is called or
* a super constructor is called from an inlined java constructor.
......@@ -161,11 +141,6 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
return true;
}
if (!UseOldInlining) {
set_msg("!UseOldInlining");
return true; // size and frequency are represented in a new way
}
int default_max_inline_size = C->max_inline_size();
int inline_small_code_size = InlineSmallCode / 4;
int max_inline_size = default_max_inline_size;
......@@ -229,35 +204,6 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
fail_msg = "don't inline by annotation";
}
if (!UseOldInlining) {
if (fail_msg != NULL) {
*wci_result = *(WarmCallInfo::always_cold());
set_msg(fail_msg);
return true;
}
if (callee_method->has_unloaded_classes_in_signature()) {
wci_result->set_profit(wci_result->profit() * 0.1);
}
// don't inline exception code unless the top method belongs to an
// exception class
if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
wci_result->set_profit(wci_result->profit() * 0.1);
}
}
if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode) {
wci_result->set_profit(wci_result->profit() * 0.1);
// %%% adjust wci_result->size()?
}
return false;
}
// one more inlining restriction
if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
fail_msg = "unloaded signature classes";
......@@ -360,9 +306,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
int caller_bci, JVMState* jvms, ciCallProfile& profile,
WarmCallInfo* wci_result, bool& should_delay) {
// Old algorithm had funny accumulating BC-size counters
if (UseOldInlining && ClipInlining
&& (int)count_inline_bcs() >= DesiredMethodLimit) {
if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("size > DesiredMethodLimit");
return false;
......@@ -465,8 +409,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
int size = callee_method->code_size_for_inlining();
if (UseOldInlining && ClipInlining
&& (int)count_inline_bcs() + size >= DesiredMethodLimit) {
if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("size > DesiredMethodLimit");
return false;
......@@ -584,8 +527,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
jvms, profile, &wci, should_delay);
#ifndef PRODUCT
if (UseOldInlining && InlineWarmCalls
&& (PrintOpto || C->print_inlining())) {
if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
......@@ -599,13 +541,12 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
}
}
#endif
if (UseOldInlining) {
if (success) {
wci = *(WarmCallInfo::always_hot());
} else {
wci = *(WarmCallInfo::always_cold());
}
if (success) {
wci = *(WarmCallInfo::always_hot());
} else {
wci = *(WarmCallInfo::always_cold());
}
if (!InlineWarmCalls) {
if (!wci.is_cold() && !wci.is_hot()) {
// Do not inline the warm calls.
......@@ -619,8 +560,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
set_msg("inline (hot)");
}
print_inlining(callee_method, caller_bci, true /* success */);
if (UseOldInlining)
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
if (InlineWarmCalls && !wci.is_hot())
return new (C) WarmCallInfo(wci); // copy to heap
return WarmCallInfo::always_hot();
......
......@@ -350,9 +350,6 @@
"File to dump ideal graph to. If set overrides the " \
"use of the network") \
\
product(bool, UseOldInlining, true, \
"Enable the 1.3 inlining strategy") \
\
product(bool, UseBimorphicInlining, true, \
"Profiling based inlining for two receivers") \
\
......
......@@ -722,7 +722,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser)
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
const Type* t = gvn.type(m)->meet(gvn.type(n));
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
......@@ -975,7 +975,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_pa
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
const Type* t = gvn.type(m)->meet(gvn.type(n));
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
......
......@@ -951,7 +951,7 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
if (is_intf != ti_is_intf)
{ t = _type; break; }
}
t = t->meet(ti);
t = t->meet_speculative(ti);
}
}
......@@ -968,11 +968,11 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
//
// It is not possible to see Type::BOTTOM values as phi inputs,
// because the ciTypeFlow pre-pass produces verifier-quality types.
const Type* ft = t->filter(_type); // Worst case type
const Type* ft = t->filter_speculative(_type); // Worst case type
#ifdef ASSERT
// The following logic has been moved into TypeOopPtr::filter.
const Type* jt = t->join(_type);
const Type* jt = t->join_speculative(_type);
if( jt->empty() ) { // Emptied out???
// Check for evil case of 't' being a class and '_type' expecting an
......@@ -1757,7 +1757,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
break;
}
// Accumulate type for resulting Phi
type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
}
Node* base = NULL;
if (doit) {
......
......@@ -701,10 +701,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
print_compile_messages();
if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
_ilt = InlineTree::build_inline_tree_root();
else
_ilt = NULL;
_ilt = InlineTree::build_inline_tree_root();
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
assert(num_alias_types() >= AliasIdxRaw, "");
......@@ -3919,16 +3916,18 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
// which may optimize it out.
for (uint next = 0; next < worklist.size(); ++next) {
Node *n = worklist.at(next);
if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
if (n->is_Type()) {
TypeNode* tn = n->as_Type();
const TypeOopPtr* t = tn->type()->is_oopptr();
bool in_hash = igvn.hash_delete(n);
assert(in_hash, "node should be in igvn hash table");
tn->set_type(t->remove_speculative());
igvn.hash_insert(n);
igvn._worklist.push(n); // give it a chance to go away
modified++;
const Type* t = tn->type();
const Type* t_no_spec = t->remove_speculative();
if (t_no_spec != t) {
bool in_hash = igvn.hash_delete(n);
assert(in_hash, "node should be in igvn hash table");
tn->set_type(t_no_spec);
igvn.hash_insert(n);
igvn._worklist.push(n); // give it a chance to go away
modified++;
}
}
uint max = n->len();
for( uint i = 0; i < max; ++i ) {
......@@ -3942,6 +3941,27 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
if (modified > 0) {
igvn.optimize();
}
#ifdef ASSERT
// Verify that after the IGVN is over no speculative type has resurfaced
worklist.clear();
worklist.push(root());
for (uint next = 0; next < worklist.size(); ++next) {
Node *n = worklist.at(next);
const Type* t = igvn.type(n);
assert(t == t->remove_speculative(), "no more speculative types");
if (n->is_Type()) {
t = n->as_Type()->type();
assert(t == t->remove_speculative(), "no more speculative types");
}
uint max = n->len();
for( uint i = 0; i < max; ++i ) {
Node *m = n->in(i);
if (not_a_node(m)) continue;
worklist.push(m);
}
}
igvn.check_no_speculative_types();
#endif
}
}
......
......@@ -188,7 +188,7 @@ Node *CMoveNode::Identity( PhaseTransform *phase ) {
const Type *CMoveNode::Value( PhaseTransform *phase ) const {
if( phase->type(in(Condition)) == Type::TOP )
return Type::TOP;
return phase->type(in(IfFalse))->meet(phase->type(in(IfTrue)));
return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
}
//------------------------------make-------------------------------------------
......@@ -392,14 +392,14 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
//=============================================================================
// If input is already higher or equal to cast type, then this is an identity.
Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
return phase->type(in(1))->higher_equal(_type) ? in(1) : this;
return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
}
//------------------------------Value------------------------------------------
// Take 'join' of input and cast-up type
const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
const Type* ft = phase->type(in(1))->filter(_type);
const Type* ft = phase->type(in(1))->filter_speculative(_type);
#ifdef ASSERT
// Previous versions of this function had some special case logic,
......@@ -409,7 +409,7 @@ const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
{
const Type* t1 = phase->type(in(1));
if( t1 == Type::TOP ) assert(ft == Type::TOP, "special case #1");
const Type* rt = t1->join(_type);
const Type* rt = t1->join_speculative(_type);
if (rt->empty()) assert(ft == Type::TOP, "special case #2");
break;
}
......
......@@ -36,7 +36,7 @@ class MachNode;
// Simple constants
class ConNode : public TypeNode {
public:
ConNode( const Type *t ) : TypeNode(t,1) {
ConNode( const Type *t ) : TypeNode(t->remove_speculative(),1) {
init_req(0, (Node*)Compile::current()->root());
init_flags(Flag_is_Con);
}
......
......@@ -161,19 +161,8 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Try inlining a bytecoded method:
if (!call_does_dispatch) {
InlineTree* ilt;
if (UseOldInlining) {
ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
} else {
// Make a disembodied, stateless ILT.
// TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
float site_invoke_ratio = prof_factor;
// Note: ilt is for the root of this parse, not the present call site.
ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
}
InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
WarmCallInfo scratch_ci;
if (!UseOldInlining)
scratch_ci.init(jvms, callee, profile, prof_factor);
bool should_delay = false;
WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
assert(ci != &scratch_ci, "do not let this pointer escape");
......
......@@ -420,7 +420,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
}
const Type* srctype = _gvn.type(src);
if (phi->type() != srctype) {
const Type* dsttype = phi->type()->meet(srctype);
const Type* dsttype = phi->type()->meet_speculative(srctype);
if (phi->type() != dsttype) {
phi->set_type(dsttype);
_gvn.set_type(phi, dsttype);
......@@ -1223,7 +1223,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// See if mixing in the NULL pointer changes type.
// If so, then the NULL pointer was not allowed in the original
// type. In other words, "value" was not-null.
if (t->meet(TypePtr::NULL_PTR) != t) {
if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
// same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
explicit_null_checks_elided++;
return value; // Elided null check quickly!
......@@ -1356,7 +1356,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
const Type *t = _gvn.type(obj);
const Type *t_not_null = t->join(TypePtr::NOTNULL);
const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
// Object is already not-null?
if( t == t_not_null ) return obj;
......@@ -3009,7 +3009,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
if (failure_control != NULL) // failure is now impossible
(*failure_control) = top();
// adjust the type of the phi to the exact klass:
phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
}
}
......
......@@ -304,6 +304,7 @@ class LibraryCallKit : public GraphKit {
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
bool inline_encodeISOArray();
bool inline_updateCRC32();
bool inline_updateBytesCRC32();
......@@ -5936,10 +5937,22 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
if (k_start == NULL) return false;
// Call the stub.
make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start);
if (Matcher::pass_original_key_for_aes()) {
// on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
// compatibility issues between Java key expansion and SPARC crypto instructions
Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
if (original_k_start == NULL) return false;
// Call the stub.
make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start, original_k_start);
} else {
// Call the stub.
make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start);
}
return true;
}
......@@ -6017,14 +6030,29 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
if (objRvec == NULL) return false;
Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
// Call the stub, passing src_start, dest_start, k_start, r_start and src_len
make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::cipherBlockChaining_aescrypt_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start, r_start, len);
Node* cbcCrypt;
if (Matcher::pass_original_key_for_aes()) {
// on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
// compatibility issues between Java key expansion and SPARC crypto instructions
Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
if (original_k_start == NULL) return false;
// return is void so no result needs to be pushed
// Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::cipherBlockChaining_aescrypt_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start, r_start, len, original_k_start);
} else {
// Call the stub, passing src_start, dest_start, k_start, r_start and src_len
cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::cipherBlockChaining_aescrypt_Type(),
stubAddr, stubName, TypePtr::BOTTOM,
src_start, dest_start, k_start, r_start, len);
}
// return cipher length (int)
Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
set_result(retvalue);
return true;
}
......@@ -6039,6 +6067,17 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object)
return k_start;
}
//------------------------------get_original_key_start_from_aescrypt_object-----------------------
Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
if (objAESCryptKey == NULL) return (Node *) NULL;
// now have the array, need to get the start address of the lastKey array
Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
return original_k_start;
}
//----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
// Return node representing slow path of predicate check.
// the pseudo code we want to emulate with this predicate is:
......
......@@ -1115,8 +1115,8 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
Node *n2 = phi->in(i)->in(1)->in(2);
phi1->set_req( i, n1 );
phi2->set_req( i, n2 );
phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type()));
phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type()));
}
// See if these Phis have been made before.
// Register with optimizer
......@@ -1189,8 +1189,8 @@ CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
}
phi1->set_req( j, n1 );
phi2->set_req( j, n2 );
phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
}
// See if these Phis have been made before.
......
......@@ -286,6 +286,9 @@ public:
// CPU supports misaligned vectors store/load.
static const bool misaligned_vectors_ok();
// Should original key array reference be passed to AES stubs
static const bool pass_original_key_for_aes();
// Used to determine a "low complexity" 64-bit constant. (Zero is simple.)
// The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI).
// Depends on the details of 64-bit constant generation on the CPU.
......
......@@ -657,7 +657,7 @@ const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_c
// disregarding "null"-ness.
// (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
assert(cross_check->meet(tp_notnull) == cross_check,
assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
"real address must not escape from expected memory type");
}
#endif
......@@ -1681,7 +1681,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t.
if (off_beyond_header) { // is the offset beyond the header?
const Type* jt = t->join(_type);
const Type* jt = t->join_speculative(_type);
// In any case, do not allow the join, per se, to empty out the type.
if (jt->empty() && !t->empty()) {
// This can happen if a interface-typed array narrows to a class type.
......
......@@ -94,7 +94,7 @@ const Type* ProjNode::proj_type(const Type* t) const {
if ((_con == TypeFunc::Parms) &&
n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
// The result of autoboxing is always non-null on normal path.
t = t->join(TypePtr::NOTNULL);
t = t->join_speculative(TypePtr::NOTNULL);
}
return t;
}
......
......@@ -995,13 +995,13 @@ void Node::raise_bottom_type(const Type* new_type) {
if (is_Type()) {
TypeNode *n = this->as_Type();
if (VerifyAliases) {
assert(new_type->higher_equal(n->type()), "new type must refine old type");
assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
} else if (is_Load()) {
LoadNode *n = this->as_Load();
if (VerifyAliases) {
assert(new_type->higher_equal(n->type()), "new type must refine old type");
assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
}
......
......@@ -1649,7 +1649,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
map()->set_req(j, _gvn.transform_no_reclaim(phi));
debug_only(const Type* bt2 = phi->bottom_type());
assert(bt2->higher_equal(bt1), "must be consistent with type-flow");
assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
record_for_igvn(phi);
}
}
......@@ -2022,7 +2022,7 @@ void Parse::return_current(Node* value) {
!tp->klass()->is_interface()) {
// sharpen the type eagerly; this eases certain assert checking
if (tp->higher_equal(TypeInstPtr::NOTNULL))
tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
}
}
......
......@@ -88,7 +88,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
// If we load from "AbstractClass[]" we must see "ConcreteSubClass".
const Type* subklass = Type::get_const_type(toop->klass());
elemtype = subklass->join(el);
elemtype = subklass->join_speculative(el);
}
}
}
......@@ -1278,7 +1278,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
// or the narrowOop equivalent.
const Type* obj_type = _gvn.type(obj);
const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
tboth->higher_equal(obj_type)) {
// obj has to be of the exact type Foo if the CmpP succeeds.
......@@ -1288,7 +1288,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
const Type* tcc = ccast->as_Type()->type();
assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
_gvn.set_type_bottom(ccast);
......@@ -1318,7 +1318,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
switch (btest) {
case BoolTest::eq: // Constant test?
{
const Type* tboth = tcon->join(tval);
const Type* tboth = tcon->join_speculative(tval);
if (tboth == tval) break; // Nothing to gain.
if (tcon->isa_int()) {
ccast = new (C) CastIINode(val, tboth);
......@@ -1352,7 +1352,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
if (ccast != NULL) {
const Type* tcc = ccast->as_Type()->type();
assert(tcc != tval && tcc->higher_equal(tval), "must improve");
assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
ccast->set_req(0, control());
......
......@@ -337,7 +337,7 @@ bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_au
// should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
// An oop is not scavengable if it is in the perm gen.
if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
con_type = con_type->join(stable_type);
con_type = con_type->join_speculative(stable_type);
break;
case T_ILLEGAL:
......
......@@ -323,6 +323,23 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) {
}
}
void NodeHash::check_no_speculative_types() {
#ifdef ASSERT
uint max = size();
Node *sentinel_node = sentinel();
for (uint i = 0; i < max; ++i) {
Node *n = at(i);
if(n != NULL && n != sentinel_node && n->is_Type()) {
TypeNode* tn = n->as_Type();
const Type* t = tn->type();
const Type* t_no_spec = t->remove_speculative();
assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
}
}
#endif
}
#ifndef PRODUCT
//------------------------------dump-------------------------------------------
// Dump statistics for the hash table
......@@ -1392,11 +1409,11 @@ void PhaseIterGVN::remove_speculative_types() {
assert(UseTypeSpeculation, "speculation is off");
for (uint i = 0; i < _types.Size(); i++) {
const Type* t = _types.fast_lookup(i);
if (t != NULL && t->isa_oopptr()) {
const TypeOopPtr* to = t->is_oopptr();
_types.map(i, to->remove_speculative());
if (t != NULL) {
_types.map(i, t->remove_speculative());
}
}
_table.check_no_speculative_types();
}
//=============================================================================
......
......@@ -92,7 +92,8 @@ public:
}
void remove_useless_nodes(VectorSet &useful); // replace with sentinel
void replace_with(NodeHash* nh);
void replace_with(NodeHash* nh);
void check_no_speculative_types(); // Check no speculative part for type nodes in table
Node *sentinel() { return _sentinel; }
......@@ -501,6 +502,9 @@ public:
Deoptimization::DeoptReason reason);
void remove_speculative_types();
void check_no_speculative_types() {
_table.check_no_speculative_types();
}
#ifndef PRODUCT
protected:
......
......@@ -814,12 +814,18 @@ const TypeFunc* OptoRuntime::array_fill_Type() {
const TypeFunc* OptoRuntime::aescrypt_block_Type() {
// create input type (domain)
int num_args = 3;
if (Matcher::pass_original_key_for_aes()) {
num_args = 4;
}
int argcnt = num_args;
const Type** fields = TypeTuple::fields(argcnt);
int argp = TypeFunc::Parms;
fields[argp++] = TypePtr::NOTNULL; // src
fields[argp++] = TypePtr::NOTNULL; // dest
fields[argp++] = TypePtr::NOTNULL; // k array
if (Matcher::pass_original_key_for_aes()) {
fields[argp++] = TypePtr::NOTNULL; // original k array
}
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
......@@ -856,6 +862,9 @@ const TypeFunc* OptoRuntime::updateBytesCRC32_Type() {
const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
// create input type (domain)
int num_args = 5;
if (Matcher::pass_original_key_for_aes()) {
num_args = 6;
}
int argcnt = num_args;
const Type** fields = TypeTuple::fields(argcnt);
int argp = TypeFunc::Parms;
......@@ -864,13 +873,16 @@ const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
fields[argp++] = TypePtr::NOTNULL; // k array
fields[argp++] = TypePtr::NOTNULL; // r array
fields[argp++] = TypeInt::INT; // src len
if (Matcher::pass_original_key_for_aes()) {
fields[argp++] = TypePtr::NOTNULL; // original k array
}
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
// no result type needed
// returning cipher len (int)
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
fields[TypeFunc::Parms+0] = TypeInt::INT;
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
return TypeFunc::make(domain, range);
}
......
......@@ -236,6 +236,13 @@ int Type::cmp( const Type *const t1, const Type *const t2 ) {
return !t1->eq(t2); // Return ZERO if equal
}
const Type* Type::maybe_remove_speculative(bool include_speculative) const {
if (!include_speculative) {
return remove_speculative();
}
return this;
}
//------------------------------hash-------------------------------------------
int Type::uhash( const Type *const t ) {
return t->hash();
......@@ -628,41 +635,44 @@ bool Type::interface_vs_oop(const Type *t) const {
//------------------------------meet-------------------------------------------
// Compute the MEET of two types. NOT virtual. It enforces that meet is
// commutative and the lattice is symmetric.
const Type *Type::meet( const Type *t ) const {
const Type *Type::meet_helper(const Type *t, bool include_speculative) const {
if (isa_narrowoop() && t->isa_narrowoop()) {
const Type* result = make_ptr()->meet(t->make_ptr());
const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
return result->make_narrowoop();
}
if (isa_narrowklass() && t->isa_narrowklass()) {
const Type* result = make_ptr()->meet(t->make_ptr());
const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
return result->make_narrowklass();
}
const Type *mt = xmeet(t);
const Type *this_t = maybe_remove_speculative(include_speculative);
t = t->maybe_remove_speculative(include_speculative);
const Type *mt = this_t->xmeet(t);
if (isa_narrowoop() || t->isa_narrowoop()) return mt;
if (isa_narrowklass() || t->isa_narrowklass()) return mt;
#ifdef ASSERT
assert( mt == t->xmeet(this), "meet not commutative" );
assert(mt == t->xmeet(this_t), "meet not commutative");
const Type* dual_join = mt->_dual;
const Type *t2t = dual_join->xmeet(t->_dual);
const Type *t2this = dual_join->xmeet( _dual);
const Type *t2this = dual_join->xmeet(this_t->_dual);
// Interface meet Oop is Not Symmetric:
// Interface:AnyNull meet Oop:AnyNull == Interface:AnyNull
// Interface:NotNull meet Oop:NotNull == java/lang/Object:NotNull
if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != _dual) ) {
if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != this_t->_dual) ) {
tty->print_cr("=== Meet Not Symmetric ===");
tty->print("t = "); t->dump(); tty->cr();
tty->print("this= "); dump(); tty->cr();
tty->print("mt=(t meet this)= "); mt->dump(); tty->cr();
tty->print("t = "); t->dump(); tty->cr();
tty->print("this= "); this_t->dump(); tty->cr();
tty->print("mt=(t meet this)= "); mt->dump(); tty->cr();
tty->print("t_dual= "); t->_dual->dump(); tty->cr();
tty->print("this_dual= "); _dual->dump(); tty->cr();
tty->print("mt_dual= "); mt->_dual->dump(); tty->cr();
tty->print("t_dual= "); t->_dual->dump(); tty->cr();
tty->print("this_dual= "); this_t->_dual->dump(); tty->cr();
tty->print("mt_dual= "); mt->_dual->dump(); tty->cr();
tty->print("mt_dual meet t_dual= "); t2t ->dump(); tty->cr();
tty->print("mt_dual meet this_dual= "); t2this ->dump(); tty->cr();
tty->print("mt_dual meet t_dual= "); t2t ->dump(); tty->cr();
tty->print("mt_dual meet this_dual= "); t2this ->dump(); tty->cr();
fatal("meet not symmetric" );
}
......@@ -754,8 +764,8 @@ const Type *Type::xmeet( const Type *t ) const {
}
//-----------------------------filter------------------------------------------
const Type *Type::filter( const Type *kills ) const {
const Type* ft = join(kills);
const Type *Type::filter_helper(const Type *kills, bool include_speculative) const {
const Type* ft = join_helper(kills, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
return ft;
......@@ -1309,8 +1319,8 @@ const Type *TypeInt::narrow( const Type *old ) const {
}
//-----------------------------filter------------------------------------------
const Type *TypeInt::filter( const Type *kills ) const {
const TypeInt* ft = join(kills)->isa_int();
const Type *TypeInt::filter_helper(const Type *kills, bool include_speculative) const {
const TypeInt* ft = join_helper(kills, include_speculative)->isa_int();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
if (ft->_widen < this->_widen) {
......@@ -1570,8 +1580,8 @@ const Type *TypeLong::narrow( const Type *old ) const {
}
//-----------------------------filter------------------------------------------
const Type *TypeLong::filter( const Type *kills ) const {
const TypeLong* ft = join(kills)->isa_long();
const Type *TypeLong::filter_helper(const Type *kills, bool include_speculative) const {
const TypeLong* ft = join_helper(kills, include_speculative)->isa_long();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
if (ft->_widen < this->_widen) {
......@@ -1726,7 +1736,7 @@ const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig)
total_fields++;
field_array = fields(total_fields);
// Use get_const_type here because it respects UseUniqueSubclasses:
field_array[pos++] = get_const_type(recv)->join(TypePtr::NOTNULL);
field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
} else {
field_array = fields(total_fields);
}
......@@ -1916,7 +1926,7 @@ const Type *TypeAry::xmeet( const Type *t ) const {
case Array: { // Meeting 2 arrays?
const TypeAry *a = t->is_ary();
return TypeAry::make(_elem->meet(a->_elem),
return TypeAry::make(_elem->meet_speculative(a->_elem),
_size->xmeet(a->_size)->is_int(),
_stable & a->_stable);
}
......@@ -1949,6 +1959,13 @@ int TypeAry::hash(void) const {
return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
}
/**
* Return same type without a speculative part in the element
*/
const Type* TypeAry::remove_speculative() const {
return make(_elem->remove_speculative(), _size, _stable);
}
//----------------------interface_vs_oop---------------------------------------
#ifdef ASSERT
bool TypeAry::interface_vs_oop(const Type *t) const {
......@@ -2560,14 +2577,14 @@ const Type *TypeOopPtr::xmeet(const Type *t) const {
return res;
}
if (res->isa_oopptr() != NULL) {
const TypeOopPtr* res_oopptr = res->is_oopptr();
if (res_oopptr->speculative() != NULL) {
// type->speculative() == NULL means that speculation is no better
// than type, i.e. type->speculative() == type. So there are 2
// ways to represent the fact that we have no useful speculative
// data and we should use a single one to be able to test for
// equality between types. Check whether type->speculative() ==
// type and set speculative to NULL if it is the case.
const TypeOopPtr* res_oopptr = res->is_oopptr();
if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
return res_oopptr->remove_speculative();
}
......@@ -2633,7 +2650,7 @@ const Type *TypeOopPtr::xmeet_helper(const Type *t) const {
case OopPtr: { // Meeting to other OopPtrs
const TypeOopPtr *tp = t->is_oopptr();
int instance_id = meet_instance_id(tp->instance_id());
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative);
}
......@@ -2787,9 +2804,9 @@ intptr_t TypeOopPtr::get_con() const {
//-----------------------------filter------------------------------------------
// Do not allow interface-vs.-noninterface joins to collapse to top.
const Type *TypeOopPtr::filter(const Type *kills) const {
const Type *TypeOopPtr::filter_helper(const Type *kills, bool include_speculative) const {
const Type* ft = join(kills);
const Type* ft = join_helper(kills, include_speculative);
const TypeInstPtr* ftip = ft->isa_instptr();
const TypeInstPtr* ktip = kills->isa_instptr();
......@@ -2901,7 +2918,10 @@ const TypePtr *TypeOopPtr::add_offset(intptr_t offset) const {
/**
* Return same type without a speculative part
*/
const TypeOopPtr* TypeOopPtr::remove_speculative() const {
const Type* TypeOopPtr::remove_speculative() const {
if (_speculative == NULL) {
return this;
}
return make(_ptr, _offset, _instance_id, NULL);
}
......@@ -2927,7 +2947,7 @@ int TypeOopPtr::dual_instance_id( ) const {
*
* @param other type to meet with
*/
const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
const TypeOopPtr* TypeOopPtr::xmeet_speculative(const TypeOopPtr* other) const {
bool this_has_spec = (_speculative != NULL);
bool other_has_spec = (other->speculative() != NULL);
......@@ -2952,7 +2972,7 @@ const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
other_spec = other;
}
return this_spec->meet(other_spec)->is_oopptr();
return this_spec->meet_speculative(other_spec)->is_oopptr();
}
/**
......@@ -3111,7 +3131,7 @@ const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst) const {
int off = meet_offset(tinst->offset());
PTR ptr = meet_ptr(tinst->ptr());
int instance_id = meet_instance_id(tinst->instance_id());
const TypeOopPtr* speculative = meet_speculative(tinst);
const TypeOopPtr* speculative = xmeet_speculative(tinst);
const TypeInstPtr *loaded = is_loaded() ? this : tinst;
const TypeInstPtr *unloaded = is_loaded() ? tinst : this;
......@@ -3188,7 +3208,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
int instance_id = meet_instance_id(tp->instance_id());
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
switch (ptr) {
case TopPTR:
case AnyNull: // Fall 'down' to dual of object klass
......@@ -3238,14 +3258,14 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
case TopPTR:
case AnyNull: {
int instance_id = meet_instance_id(InstanceTop);
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(ptr, klass(), klass_is_exact(),
(ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
}
case NotNull:
case BotPTR: {
int instance_id = meet_instance_id(tp->instance_id());
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
return TypeOopPtr::make(ptr, offset, instance_id, speculative);
}
default: typerr(t);
......@@ -3297,7 +3317,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
int off = meet_offset( tinst->offset() );
PTR ptr = meet_ptr( tinst->ptr() );
int instance_id = meet_instance_id(tinst->instance_id());
const TypeOopPtr* speculative = meet_speculative(tinst);
const TypeOopPtr* speculative = xmeet_speculative(tinst);
// Check for easy case; klasses are equal (and perhaps not loaded!)
// If we have constants, then we created oops so classes are loaded
......@@ -3546,7 +3566,10 @@ const TypePtr *TypeInstPtr::add_offset(intptr_t offset) const {
return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
}
const TypeOopPtr *TypeInstPtr::remove_speculative() const {
const Type *TypeInstPtr::remove_speculative() const {
if (_speculative == NULL) {
return this;
}
return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL);
}
......@@ -3748,14 +3771,14 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
case TopPTR:
case AnyNull: {
int instance_id = meet_instance_id(InstanceTop);
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(ptr, (ptr == Constant ? const_oop() : NULL),
_ary, _klass, _klass_is_exact, offset, instance_id, speculative);
}
case BotPTR:
case NotNull: {
int instance_id = meet_instance_id(tp->instance_id());
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
return TypeOopPtr::make(ptr, offset, instance_id, speculative);
}
default: ShouldNotReachHere();
......@@ -3793,10 +3816,10 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
case AryPtr: { // Meeting 2 references?
const TypeAryPtr *tap = t->is_aryptr();
int off = meet_offset(tap->offset());
const TypeAry *tary = _ary->meet(tap->_ary)->is_ary();
const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary();
PTR ptr = meet_ptr(tap->ptr());
int instance_id = meet_instance_id(tap->instance_id());
const TypeOopPtr* speculative = meet_speculative(tap);
const TypeOopPtr* speculative = xmeet_speculative(tap);
ciKlass* lazy_klass = NULL;
if (tary->_elem->isa_int()) {
// Integral array element types have irrelevant lattice relations.
......@@ -3876,7 +3899,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
int instance_id = meet_instance_id(tp->instance_id());
const TypeOopPtr* speculative = meet_speculative(tp);
const TypeOopPtr* speculative = xmeet_speculative(tp);
switch (ptr) {
case TopPTR:
case AnyNull: // Fall 'down' to dual of object klass
......@@ -3990,8 +4013,8 @@ const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const {
return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
}
const TypeOopPtr *TypeAryPtr::remove_speculative() const {
return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL);
const Type *TypeAryPtr::remove_speculative() const {
return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL);
}
//=============================================================================
......@@ -4031,9 +4054,9 @@ const Type *TypeNarrowPtr::xdual() const { // Compute dual right now.
}
const Type *TypeNarrowPtr::filter( const Type *kills ) const {
const Type *TypeNarrowPtr::filter_helper(const Type *kills, bool include_speculative) const {
if (isa_same_narrowptr(kills)) {
const Type* ft =_ptrtype->filter(is_same_narrowptr(kills)->_ptrtype);
const Type* ft =_ptrtype->filter_helper(is_same_narrowptr(kills)->_ptrtype, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
if (ft->isa_ptr()) {
......@@ -4041,7 +4064,7 @@ const Type *TypeNarrowPtr::filter( const Type *kills ) const {
}
return ft;
} else if (kills->isa_ptr()) {
const Type* ft = _ptrtype->join(kills);
const Type* ft = _ptrtype->join_helper(kills, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
return ft;
......@@ -4171,8 +4194,8 @@ const TypePtr *TypeMetadataPtr::add_offset( intptr_t offset ) const {
//-----------------------------filter------------------------------------------
// Do not allow interface-vs.-noninterface joins to collapse to top.
const Type *TypeMetadataPtr::filter( const Type *kills ) const {
const TypeMetadataPtr* ft = join(kills)->isa_metadataptr();
const Type *TypeMetadataPtr::filter_helper(const Type *kills, bool include_speculative) const {
const TypeMetadataPtr* ft = join_helper(kills, include_speculative)->isa_metadataptr();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
return ft;
......@@ -4374,10 +4397,10 @@ bool TypeKlassPtr::singleton(void) const {
}
// Do not allow interface-vs.-noninterface joins to collapse to top.
const Type *TypeKlassPtr::filter(const Type *kills) const {
const Type *TypeKlassPtr::filter_helper(const Type *kills, bool include_speculative) const {
// logic here mirrors the one from TypeOopPtr::filter. See comments
// there.
const Type* ft = join(kills);
const Type* ft = join_helper(kills, include_speculative);
const TypeKlassPtr* ftkp = ft->isa_klassptr();
const TypeKlassPtr* ktkp = kills->isa_klassptr();
......
......@@ -164,6 +164,8 @@ private:
virtual bool interface_vs_oop_helper(const Type *t) const;
#endif
const Type *meet_helper(const Type *t, bool include_speculative) const;
protected:
// Each class of type is also identified by its base.
const TYPES _base; // Enum of Types type
......@@ -171,6 +173,10 @@ protected:
Type( TYPES t ) : _dual(NULL), _base(t) {} // Simple types
// ~Type(); // Use fast deallocation
const Type *hashcons(); // Hash-cons the type
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
const Type *join_helper(const Type *t, bool include_speculative) const {
return dual()->meet_helper(t->dual(), include_speculative)->dual();
}
public:
......@@ -202,10 +208,24 @@ public:
// Test for equivalence of types
static int cmp( const Type *const t1, const Type *const t2 );
// Test for higher or equal in lattice
int higher_equal( const Type *t ) const { return !cmp(meet(t),t); }
// Variant that drops the speculative part of the types
int higher_equal(const Type *t) const {
return !cmp(meet(t),t->remove_speculative());
}
// Variant that keeps the speculative part of the types
int higher_equal_speculative(const Type *t) const {
return !cmp(meet_speculative(t),t);
}
// MEET operation; lower in lattice.
const Type *meet( const Type *t ) const;
// Variant that drops the speculative part of the types
const Type *meet(const Type *t) const {
return meet_helper(t, false);
}
// Variant that keeps the speculative part of the types
const Type *meet_speculative(const Type *t) const {
return meet_helper(t, true);
}
// WIDEN: 'widens' for Ints and other range types
virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
// NARROW: complement for widen, used by pessimistic phases
......@@ -221,13 +241,26 @@ public:
// JOIN operation; higher in lattice. Done by finding the dual of the
// meet of the dual of the 2 inputs.
const Type *join( const Type *t ) const {
return dual()->meet(t->dual())->dual(); }
// Variant that drops the speculative part of the types
const Type *join(const Type *t) const {
return join_helper(t, false);
}
// Variant that keeps the speculative part of the types
const Type *join_speculative(const Type *t) const {
return join_helper(t, true);
}
// Modified version of JOIN adapted to the needs Node::Value.
// Normalizes all empty values to TOP. Does not kill _widen bits.
// Currently, it also works around limitations involving interface types.
virtual const Type *filter( const Type *kills ) const;
// Variant that drops the speculative part of the types
const Type *filter(const Type *kills) const {
return filter_helper(kills, false);
}
// Variant that keeps the speculative part of the types
const Type *filter_speculative(const Type *kills) const {
return filter_helper(kills, true);
}
#ifdef ASSERT
// One type is interface, the other is oop
......@@ -383,6 +416,8 @@ public:
// Speculative type. See TypeInstPtr
virtual ciKlass* speculative_type() const { return NULL; }
const Type* maybe_remove_speculative(bool include_speculative) const;
virtual const Type* remove_speculative() const { return this; }
private:
// support arrays
......@@ -450,12 +485,14 @@ public:
// upper bound, inclusive.
class TypeInt : public Type {
TypeInt( jint lo, jint hi, int w );
protected:
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
virtual bool singleton(void) const; // TRUE if type is a singleton
virtual bool empty(void) const; // TRUE if type is vacuous
public:
const jint _lo, _hi; // Lower bound, upper bound
const short _widen; // Limit on times we widen this sucker
......@@ -475,7 +512,6 @@ public:
virtual const Type *widen( const Type *t, const Type* limit_type ) const;
virtual const Type *narrow( const Type *t ) const;
// Do not kill _widen bits.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeInt *MINUS_1;
static const TypeInt *ZERO;
......@@ -506,6 +542,9 @@ public:
// an upper bound, inclusive.
class TypeLong : public Type {
TypeLong( jlong lo, jlong hi, int w );
protected:
// Do not kill _widen bits.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -530,8 +569,6 @@ public:
virtual const Type *xdual() const; // Compute dual right now.
virtual const Type *widen( const Type *t, const Type* limit_type ) const;
virtual const Type *narrow( const Type *t ) const;
// Do not kill _widen bits.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeLong *MINUS_1;
static const TypeLong *ZERO;
......@@ -622,6 +659,7 @@ public:
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
bool ary_must_be_exact() const; // true if arrays of such are never generic
virtual const Type* remove_speculative() const;
#ifdef ASSERT
// One type is interface, the other is oop
virtual bool interface_vs_oop(const Type *t) const;
......@@ -832,7 +870,7 @@ protected:
// utility methods to work on the speculative part of the type
const TypeOopPtr* dual_speculative() const;
const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
bool eq_speculative(const TypeOopPtr* other) const;
int hash_speculative() const;
const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
......@@ -840,6 +878,9 @@ protected:
void dump_speculative(outputStream *st) const;
#endif
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
// Creates a type given a klass. Correctly handles multi-dimensional arrays
// Respects UseUniqueSubclasses.
......@@ -895,16 +936,13 @@ public:
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type* remove_speculative() const;
virtual const Type *xmeet(const Type *t) const;
virtual const Type *xdual() const; // Compute dual right now.
// the core of the computation of the meet for TypeOopPtr and for its subclasses
virtual const Type *xmeet_helper(const Type *t) const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built type.
static const TypeOopPtr *BOTTOM;
#ifndef PRODUCT
......@@ -981,7 +1019,7 @@ class TypeInstPtr : public TypeOopPtr {
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type* remove_speculative() const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
......@@ -1059,7 +1097,7 @@ public:
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
virtual const TypeOopPtr* remove_speculative() const;
virtual const Type* remove_speculative() const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
......@@ -1100,6 +1138,8 @@ public:
class TypeMetadataPtr : public TypePtr {
protected:
TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset);
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -1125,9 +1165,6 @@ public:
virtual intptr_t get_con() const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeMetadataPtr *BOTTOM;
......@@ -1141,6 +1178,8 @@ public:
class TypeKlassPtr : public TypePtr {
TypeKlassPtr( PTR ptr, ciKlass* klass, int offset );
protected:
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -1202,9 +1241,6 @@ public:
virtual intptr_t get_con() const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeKlassPtr* OBJECT; // Not-null object klass or below
static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
......@@ -1228,6 +1264,8 @@ protected:
virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0;
virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0;
virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
......@@ -1238,9 +1276,6 @@ public:
virtual intptr_t get_con() const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter( const Type *kills ) const;
virtual bool empty(void) const; // TRUE if type is vacuous
// returns the equivalent ptr type for this compressed pointer
......@@ -1291,6 +1326,10 @@ public:
static const TypeNarrowOop *BOTTOM;
static const TypeNarrowOop *NULL_PTR;
virtual const Type* remove_speculative() const {
return make(_ptrtype->remove_speculative()->is_ptr());
}
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
#endif
......
......@@ -26,6 +26,7 @@
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
#include "code/vtableStubs.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiCodeBlobEvents.hpp"
......@@ -63,6 +64,7 @@ class CodeBlobCollector : StackObj {
// used during a collection
static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
static void do_blob(CodeBlob* cb);
static void do_vtable_stub(VtableStub* vs);
public:
CodeBlobCollector() {
_code_blobs = NULL;
......@@ -119,6 +121,10 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
return;
}
// exclude VtableStubs, which are processed separately
if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) {
return;
}
// check if this starting address has been seen already - the
// assumption is that stubs are inserted into the list before the
......@@ -136,6 +142,13 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
_global_code_blobs->append(scb);
}
// called for each VtableStub in VtableStubs
void CodeBlobCollector::do_vtable_stub(VtableStub* vs) {
JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(vs->is_vtable_stub() ? "vtable stub" : "itable stub",
vs->code_begin(), vs->code_end());
_global_code_blobs->append(scb);
}
// collects a list of CodeBlobs in the CodeCache.
//
......@@ -166,6 +179,10 @@ void CodeBlobCollector::collect() {
_global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
}
// Vtable stubs are not described with StubCodeDesc,
// process them separately
VtableStubs::vtable_stub_do(do_vtable_stub);
// next iterate over all the non-nmethod code blobs and add them to
// the list - as noted above this will filter out duplicates and
// enclosing blobs.
......
......@@ -290,6 +290,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "UsePermISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
......@@ -878,7 +879,7 @@ bool Arguments::process_argument(const char* arg,
arg_len = equal_sign - argname;
}
Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
if (found_flag != NULL) {
char locked_message_buf[BUFLEN];
found_flag->get_locked_message(locked_message_buf, BUFLEN);
......
......@@ -62,6 +62,14 @@ ARCH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, \
MATERIALIZE_FLAGS_EXT
static bool is_product_build() {
#ifdef PRODUCT
return true;
#else
return false;
#endif
}
void Flag::check_writable() {
if (is_constant_in_binary()) {
fatal(err_msg("flag is constant: %s", _name));
......@@ -235,6 +243,27 @@ bool Flag::is_unlocked() const {
// Get custom message for this locked flag, or return NULL if
// none is available.
void Flag::get_locked_message(char* buf, int buflen) const {
buf[0] = '\0';
if (is_diagnostic() && !is_unlocked()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
_name);
return;
}
if (is_experimental() && !is_unlocked()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
_name);
return;
}
if (is_develop() && is_product_build()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
_name);
return;
}
if (is_notproduct() && is_product_build()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
_name);
return;
}
get_locked_message_ext(buf, buflen);
}
......@@ -464,13 +493,13 @@ inline bool str_equal(const char* s, const char* q, size_t len) {
}
// Search the flag table for a named flag
Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
if (str_equal(current->_name, name, length)) {
// Found a matching entry.
// Don't report notproduct and develop flags in product builds.
if (current->is_constant_in_binary()) {
return NULL;
return (return_flag == true ? current : NULL);
}
// Report locked flags only if allowed.
if (!(current->is_unlocked() || current->is_unlocker())) {
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -241,7 +241,7 @@ struct Flag {
// number of flags
static size_t numFlags;
static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
void check_writable();
......@@ -1258,6 +1258,9 @@ class CommandLineFlags {
develop(bool, TraceJNICalls, false, \
"Trace JNI calls") \
\
develop(bool, StressRewriter, false, \
"Stress linktime bytecode rewriting") \
\
notproduct(bool, TraceJVMCalls, false, \
"Trace JVM calls") \
\
......
......@@ -1081,7 +1081,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
}
#ifndef PRODUCT
// Check if in metaspace.
if (ClassLoaderDataGraph::contains((address)addr)) {
// Use addr->print() from the debugger instead (not here)
......@@ -1089,7 +1088,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
" is pointing into metadata", addr);
return;
}
#endif
// Try an OS specific find
if (os::find(addr, st)) {
......
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -58,7 +58,7 @@ class ResourceArray: public ResourceObj {
void initialize(size_t esize, int length) {
assert(length >= 0, "illegal length");
assert(_data == NULL, "must be new object");
assert(StressRewriter || _data == NULL, "must be new object");
_length = length;
_data = resource_allocate_bytes(esize * length);
DEBUG_ONLY(init_nesting();)
......
......@@ -107,7 +107,7 @@ void BitMap::par_put_range_within_word(idx_t beg, idx_t end, bool value) {
while (true) {
intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
if (res == w) break;
w = *pw;
w = res;
nw = value ? (w | ~mr) : (w & mr);
}
}
......
......@@ -27,7 +27,7 @@
* @bug 6826736
* @summary CMS: core dump with -XX:+UseCompressedOops
*
* @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
* @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 -Xmx256m -XX:ParallelGCThreads=4 Test
*/
public class Test {
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -39,20 +39,32 @@ public class TestAESMain {
System.out.println(iters + " iterations");
TestAESEncode etest = new TestAESEncode();
etest.prepare();
// warm-up for 20K iterations
System.out.println("Starting encryption warm-up");
for (int i=0; i<20000; i++) {
etest.run();
}
System.out.println("Finished encryption warm-up");
long start = System.nanoTime();
for (int i=0; i<iters; i++) {
etest.run();
}
long end = System.nanoTime();
System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000000.0) + " ms");
System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000.0) + " ms");
TestAESDecode dtest = new TestAESDecode();
dtest.prepare();
// warm-up for 20K iterations
System.out.println("Starting decryption warm-up");
for (int i=0; i<20000; i++) {
dtest.run();
}
System.out.println("Finished decryption warm-up");
start = System.nanoTime();
for (int i=0; i<iters; i++) {
dtest.run();
}
end = System.nanoTime();
System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000000.0) + " ms");
System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000.0) + " ms");
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8027422
* @summary type methods shouldn't always operate on speculative part
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestSpeculationFailedHigherEqual
*
*/
public class TestSpeculationFailedHigherEqual {
static class A {
void m() {}
int i;
}
static class C extends A {
}
static C c;
static A m1(A a, boolean cond) {
// speculative type for a is C not null
if (cond ) {
a = c;
}
// speculative type for a is C (may be null)
int i = a.i;
return a;
}
static public void main(String[] args) {
C c = new C();
TestSpeculationFailedHigherEqual.c = c;
for (int i = 0; i < 20000; i++) {
m1(c, i%2 == 0);
}
System.out.println("TEST PASSED");
}
}
......@@ -34,10 +34,6 @@ import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
......@@ -56,6 +52,7 @@ import com.sun.jdi.event.EventQueue;
import com.sun.jdi.event.EventSet;
import com.sun.jdi.event.ModificationWatchpointEvent;
import com.sun.jdi.event.VMDeathEvent;
import com.sun.jdi.event.VMStartEvent;
import com.sun.jdi.event.VMDisconnectEvent;
import com.sun.jdi.request.ClassPrepareRequest;
import com.sun.jdi.request.EventRequest;
......@@ -71,24 +68,10 @@ public class FieldMonitor {
public static void main(String[] args)
throws IOException, InterruptedException {
StringBuffer sb = new StringBuffer();
for (int i=0; i < args.length; i++) {
sb.append(' ');
sb.append(args[i]);
}
//VirtualMachine vm = launchTarget(sb.toString());
VirtualMachine vm = launchTarget(CLASS_NAME);
System.out.println("Vm launched");
// set watch field on already loaded classes
List<ReferenceType> referenceTypes = vm
.classesByName(CLASS_NAME);
for (ReferenceType refType : referenceTypes) {
addFieldWatch(vm, refType);
}
// watch for loaded classes
addClassWatch(vm);
// process events
EventQueue eventQueue = vm.eventQueue();
......@@ -104,13 +87,15 @@ public class FieldMonitor {
errThread.start();
outThread.start();
vm.resume();
boolean connected = true;
int watched = 0;
while (connected) {
EventSet eventSet = eventQueue.remove();
for (Event event : eventSet) {
if (event instanceof VMDeathEvent
System.out.println("FieldMonitor-main receives: "+event);
if (event instanceof VMStartEvent) {
addClassWatch(vm);
} else if (event instanceof VMDeathEvent
|| event instanceof VMDisconnectEvent) {
// exit
connected = false;
......@@ -122,17 +107,17 @@ public class FieldMonitor {
.referenceType();
addFieldWatch(vm, refType);
} else if (event instanceof ModificationWatchpointEvent) {
watched++;
System.out.println("sleep for 500 ms");
Thread.sleep(500);
System.out.println("resume...");
ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event;
System.out.println("old="
+ modEvent.valueCurrent());
System.out.println("new=" + modEvent.valueToBe());
System.out.println();
}
}
System.out.println("resume...");
eventSet.resume();
}
// Shutdown begins when event thread terminates
......@@ -142,6 +127,10 @@ public class FieldMonitor {
} catch (InterruptedException exc) {
// we don't interrupt
}
if (watched != 11) { // init + 10 modifications in TestPostFieldModification class
throw new Error("Expected to receive 11 times ModificationWatchpointEvent, but got "+watched);
}
}
/**
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -33,8 +33,7 @@ import com.oracle.java.testlibrary.*;
public class CompilerConfigFileWarning {
public static void main(String[] args) throws Exception {
String vmVersion = System.getProperty("java.vm.version");
if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
if (Platform.isDebugBuild()) {
System.out.println("Skip on debug builds since we'll always read the file there");
return;
}
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -33,8 +33,7 @@ import com.oracle.java.testlibrary.*;
public class ConfigFileWarning {
public static void main(String[] args) throws Exception {
String vmVersion = System.getProperty("java.vm.version");
if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
if (Platform.isDebugBuild()) {
System.out.println("Skip on debug builds since we'll always read the file there");
return;
}
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8027314
* @summary Warn if diagnostic or experimental vm option is used and -XX:+UnlockDiagnosticVMOptions or -XX:+UnlockExperimentalVMOptions, respectively, isn't specified. Warn if develop or notproduct vm option is used with product version of VM.
* @library /testlibrary
*/
import com.oracle.java.testlibrary.*;
public class VMOptionWarning {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PredictedLoadedClassCount", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Error: VM option 'PredictedLoadedClassCount' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.");
if (Platform.isDebugBuild()) {
System.out.println("Skip the rest of the tests on debug builds since diagnostic, develop, and notproduct options are available on debug builds.");
return;
}
pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintInlining", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Error: VM option 'PrintInlining' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.");
pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJNICalls", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Error: VM option 'TraceJNICalls' is develop and is available only in debug version of VM.");
pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJVMCalls", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Error: VM option 'TraceJVMCalls' is notproduct and is available only in debug version of VM.");
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @bug 8032024
* @bug 8025937
* @bug 8033528
* @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref
* @run main/othervm -XX:+StressRewriter InvokespecialInterface
*/
import java.util.function.*;
import java.util.*;
interface I {
default void imethod() { System.out.println("I::imethod"); }
}
class C implements I {
public void foo() { I.super.imethod(); } // invokespecial InterfaceMethod
public void bar() { I i = this; i.imethod(); } // invokeinterface same
public void doSomeInvokedynamic() {
String str = "world";
Supplier<String> foo = ()->"hello, "+str;
String res = foo.get();
System.out.println(res);
}
}
public class InvokespecialInterface {
public static void main(java.lang.String[] unused) {
// need to create C and call I::foo()
C c = new C();
c.foo();
c.bar();
c.doSomeInvokedynamic();
}
};
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @bug 8032010
* @summary method lookup on an abstract method in a concrete class should be successful
* @run main TestConcreteClassWithAbstractMethod
*/
import jdk.internal.org.objectweb.asm.ClassWriter;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import static jdk.internal.org.objectweb.asm.Opcodes.*;
/*
* class T1 { public int m() {} }
* class T2 { public abstract int m(); }
* class T3 { public int m() {} }
*
* Call site: T3.test() { invokevirtual T2.m() }
* T3.m() should be invoked
*/
public class TestConcreteClassWithAbstractMethod {
static final String classT1 = "p1.T1";
static final String classT2 = "p1.T2";
static final String classT3 = "p1.T3";
static final String callerName = classT3;
public static void main(String[] args) throws Exception {
ClassLoader cl = new ClassLoader() {
public Class<?> loadClass(String name) throws ClassNotFoundException {
if (findLoadedClass(name) != null) {
return findLoadedClass(name);
}
if (classT1.equals(name)) {
byte[] classFile = dumpT1();
return defineClass(classT1, classFile, 0, classFile.length);
}
if (classT2.equals(name)) {
byte[] classFile = dumpT2();
return defineClass(classT2, classFile, 0, classFile.length);
}
if (classT3.equals(name)) {
byte[] classFile = dumpT3();
return defineClass(classT3, classFile, 0, classFile.length);
}
return super.loadClass(name);
}
};
cl.loadClass(classT1);
cl.loadClass(classT2);
cl.loadClass(classT3);
//cl.loadClass(callerName).getDeclaredMethod("m");
cl.loadClass(callerName).newInstance();
int result = (Integer)cl.loadClass(callerName).getDeclaredMethod("test").invoke(null);
System.out.println(""+result);
}
public static byte[] dumpT1() {
ClassWriter cw = new ClassWriter(0);
MethodVisitor mv;
cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T1", null, "java/lang/Object", null);
{
mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
mv.visitCode();
mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
mv.visitLdcInsn("p1/T1.m()");
mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
mv.visitIntInsn(BIPUSH, 3);
mv.visitInsn(IRETURN);
mv.visitMaxs(2, 1);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
public static byte[] dumpT2() {
ClassWriter cw = new ClassWriter(0);
MethodVisitor mv;
cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T2", null, "p1/T1", null);
{
mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "p1/T1", "<init>", "()V", false);
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
public static byte[] dumpT3() {
ClassWriter cw = new ClassWriter(0);
MethodVisitor mv;
cw.visit(52, ACC_PUBLIC + ACC_SUPER, "p1/T3", null, "p1/T2", null);
{
mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "p1/T2", "<init>", "()V", false);
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
mv.visitCode();
mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
mv.visitLdcInsn("p1/T3.m()");
mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
mv.visitIntInsn(BIPUSH, 2);
mv.visitInsn(IRETURN);
mv.visitMaxs(2, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null);
mv.visitCode();
mv.visitTypeInsn(NEW, "p1/T3");
mv.visitInsn(DUP);
mv.visitMethodInsn(INVOKESPECIAL, "p1/T3", "<init>", "()V", false);
mv.visitMethodInsn(INVOKEVIRTUAL, "p1/T2", "m", "()I", false);
mv.visitInsn(IRETURN);
mv.visitMaxs(3, 2);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部