提交 094c636e 编写于 作者: A asaha

Merge

......@@ -931,6 +931,8 @@ c2c4db2a42a215c98a4f027edb8bbb00dd62d9b9 jdk8u112-b14
b28d012a24cab8f4ceeee0c9d3252969757423ed jdk8u112-b15
e134dc1879b72124e478be01680b0646a2fbf585 jdk8u112-b16
87440ed4e1de7753a436f957d35555d8b4e26f1d jdk8u112-b31
ba25f5833a128b8062e597f794efda26b30f095d jdk8u112-b32
919ffdca10c2721ee0f6f233e704709174556510 jdk8u112-b33
3b0e5f01891f5ebbf67797b1aae786196f1bb4f6 jdk8u121-b00
251a2493b1857f2ff4f11eab2dfd8b2fe8ed441b jdk8u121-b01
70c4a50f576a01ec975d0a02b3642ee33db39ed8 jdk8u121-b02
......@@ -945,6 +947,12 @@ f26f6895c9dfb32dfb4c228d69b371d8ab118536 jdk8u121-b10
11f91811e4d7e5ddfaf938dcf386ec8fe5bf7b7c jdk8u121-b11
b132b08b28bf23a26329928cf6b4ffda5857f4d3 jdk8u121-b12
90f94521c3515e5f27af0ab9b31d036e88bb322a jdk8u121-b13
351bf1d4ff9a41137f91e2ec97ec59ed29a38d8b jdk8u121-b31
41daac438a2ac5a80755dc3de88b76e4ac66750a jdk8u121-b32
eb9e617d6f64d4ad689feac0707b5e4335b00ce2 jdk8u121-b33
c60b0994e8eee152666252c3ba4105db65c004db jdk8u121-b34
0612a789929b88612509668bea4b3138613e91e4 jdk8u121-b35
0ea269e49511a890e6fabfd468638dd1c0ed0be3 jdk8u121-b36
c0a1ba0df20fda10ddb8599e888eb56ad98b3874 jdk8u131-b00
0b85ccd6240991e1a501602ff5addec6b88ae0af jdk8u131-b01
ef90c721a4e59b01ca36f25619010a1afe9ed4d5 jdk8u131-b02
......@@ -957,6 +965,10 @@ db221c0a423e776bec5c3198d11d3f26827bd786 jdk8u131-b08
56e71d16083904ceddfdd1d66312582a42781646 jdk8u131-b09
1da23ae49386608550596502d90a381ee6c1dfaa jdk8u131-b10
829ea9b92cda9545652f1b309f56c57383024ebb jdk8u131-b11
41e0713bcca27cef5d6a9afd44c7ca4811937713 jdk8u131-b31
e318654a4fa352a06935dd56eebf88ae387b31f9 jdk8u131-b32
32998fc932dc58c6bbac185cc17d2752fa6dba4c jdk8u131-b33
50b3fa6791f46bc582528bdc7f6311b3b6832c51 jdk8u131-b34
692bc6b674dcab72453de08ee9da0856a7e41c0f jdk8u141-b00
0cee0db0180b64655751e7058c251103f9660f85 jdk8u141-b01
82435799636c8b50a090aebcb5af49946afa7bb5 jdk8u141-b02
......
......@@ -4261,6 +4261,7 @@ void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Lab
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
Register end = count;
int cache_line_size = VM_Version::prefetch_data_size();
assert(cache_line_size > 0, "cache line size should be known for this code");
// Minimum count when BIS zeroing can be used since
// it needs membar which is expensive.
int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
......
......@@ -74,7 +74,7 @@ void VM_Version::initialize() {
AllocatePrefetchDistance = AllocatePrefetchStepSize;
}
if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
if (AllocatePrefetchStyle == 3 && (!has_blk_init() || cache_line_size <= 0)) {
warning("BIS instructions are not available on this CPU");
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
}
......@@ -138,7 +138,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
}
if (is_niagara_plus()) {
if (has_blk_init() && UseTLAB &&
if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
// Use BIS instruction for TLAB allocation prefetch.
FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1);
......@@ -236,7 +236,7 @@ void VM_Version::initialize() {
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
char buf[512];
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", vis1" : ""),
......@@ -249,6 +249,7 @@ void VM_Version::initialize() {
(has_sha256() ? ", sha256" : ""),
(has_sha512() ? ", sha512" : ""),
(is_ultra3() ? ", ultra3" : ""),
(has_sparc5_instr() ? ", sparc5" : ""),
(is_sun4v() ? ", sun4v" : ""),
(is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
(is_sparc64() ? ", sparc64" : ""),
......@@ -364,6 +365,7 @@ void VM_Version::initialize() {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0) {
......@@ -447,9 +449,10 @@ void VM_Version::revert() {
unsigned int VM_Version::calc_parallel_worker_threads() {
unsigned int result;
if (is_M_series()) {
// for now, use same gc thread calculation for M-series as for niagara-plus
// in future, we may want to tweak parameters for nof_parallel_worker_thread
if (is_M_series() || is_S_series()) {
// for now, use same gc thread calculation for M-series and S-series as for
// niagara-plus. In future, we may want to tweak parameters for
// nof_parallel_worker_thread
result = nof_parallel_worker_threads(5, 16, 8);
} else if (is_niagara_plus()) {
result = nof_parallel_worker_threads(5, 16, 8);
......@@ -458,3 +461,37 @@ unsigned int VM_Version::calc_parallel_worker_threads() {
}
return result;
}
int VM_Version::parse_features(const char* implementation) {
int features = unknown_m;
// Convert to UPPER case before compare.
char* impl = os::strdup(implementation);
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
// M-series SPARC is based on T-series.
features |= (M_family_m | T_family_m);
} else if (strstr(impl, "SPARC-S") != NULL) {
// S-series SPARC is based on T-series.
features |= (S_family_m | T_family_m);
} else if (strstr(impl, "SPARC-T") != NULL) {
features |= T_family_m;
if (strstr(impl, "SPARC-T1") != NULL) {
features |= T1_model_m;
}
} else if (strstr(impl, "SUN4V-CPU") != NULL) {
// Generic or migration class LDOM
features |= T_family_m;
} else {
#ifndef PRODUCT
warning("Failed to parse CPU implementation = '%s'", impl);
#endif
}
os::free((void*)impl);
return features;
}
......@@ -47,13 +47,14 @@ protected:
cbcond_instructions = 13,
sparc64_family = 14,
M_family = 15,
T_family = 16,
T1_model = 17,
sparc5_instructions = 18,
aes_instructions = 19,
sha1_instruction = 20,
sha256_instruction = 21,
sha512_instruction = 22
S_family = 16,
T_family = 17,
T1_model = 18,
sparc5_instructions = 19,
aes_instructions = 20,
sha1_instruction = 21,
sha256_instruction = 22,
sha512_instruction = 23
};
enum Feature_Flag_Set {
......@@ -76,6 +77,7 @@ protected:
cbcond_instructions_m = 1 << cbcond_instructions,
sparc64_family_m = 1 << sparc64_family,
M_family_m = 1 << M_family,
S_family_m = 1 << S_family,
T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model,
sparc5_instructions_m = 1 << sparc5_instructions,
......@@ -105,6 +107,7 @@ protected:
// Returns true if the platform is in the niagara line (T series)
static bool is_M_family(int features) { return (features & M_family_m) != 0; }
static bool is_S_family(int features) { return (features & S_family_m) != 0; }
static bool is_T_family(int features) { return (features & T_family_m) != 0; }
static bool is_niagara() { return is_T_family(_features); }
#ifdef ASSERT
......@@ -119,7 +122,7 @@ protected:
static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
static int maximum_niagara1_processor_count() { return 32; }
static int parse_features(const char* implementation);
public:
// Initialization
static void initialize();
......@@ -152,6 +155,7 @@ public:
static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); }
static bool is_M_series() { return is_M_family(_features); }
static bool is_S_series() { return is_S_family(_features); }
static bool is_T4() { return is_T_family(_features) && has_cbcond(); }
static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); }
......
......@@ -406,6 +406,8 @@ void VM_Version::get_processor_features() {
_stepping = 0;
_cpuFeatures = 0;
_logical_processors_per_package = 1;
// i486 internal cache is both I&D and has a 16-byte line size
_L1_data_cache_line_size = 16;
if (!Use486InstrsOnly) {
// Get raw processor info
......@@ -424,6 +426,7 @@ void VM_Version::get_processor_features() {
// Logical processors are only available on P4s and above,
// and only if hyperthreading is available.
_logical_processors_per_package = logical_processor_count();
_L1_data_cache_line_size = L1_line_size();
}
}
......@@ -1034,6 +1037,7 @@ void VM_Version::get_processor_features() {
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print("UseSSE=%d", (int) UseSSE);
if (UseAVX > 0) {
tty->print(" UseAVX=%d", (int) UseAVX);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -595,7 +595,7 @@ public:
return (result == 0 ? 1 : result);
}
static intx prefetch_data_size() {
static intx L1_line_size() {
intx result = 0;
if (is_intel()) {
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
......@@ -607,6 +607,10 @@ public:
return result;
}
static intx prefetch_data_size() {
return L1_line_size();
}
//
// Feature identification
//
......
......@@ -2859,7 +2859,7 @@ void os::Linux::rebuild_cpu_to_node_map() {
// in the library.
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
size_t cpu_num = os::active_processor_count();
size_t cpu_num = processor_count();
size_t cpu_map_size = NCPUS / BitsPerCLong;
size_t cpu_map_valid_size =
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
......
......@@ -262,6 +262,7 @@ void PICL::close_library() {
// We need to keep these here as long as we have to build on Solaris
// versions before 10.
#ifndef SI_ARCHITECTURE_32
#define SI_ARCHITECTURE_32 516 /* basic 32-bit SI_ARCHITECTURE */
#endif
......@@ -270,231 +271,233 @@ void PICL::close_library() {
#define SI_ARCHITECTURE_64 517 /* basic 64-bit SI_ARCHITECTURE */
#endif
static void do_sysinfo(int si, const char* string, int* features, int mask) {
char tmp;
size_t bufsize = sysinfo(si, &tmp, 1);
#ifndef SI_CPUBRAND
#define SI_CPUBRAND 523 /* return cpu brand string */
#endif
// All SI defines used below must be supported.
guarantee(bufsize != -1, "must be supported");
class Sysinfo {
char* _string;
public:
Sysinfo(int si) : _string(NULL) {
char tmp;
size_t bufsize = sysinfo(si, &tmp, 1);
if (bufsize != -1) {
char* buf = (char*) os::malloc(bufsize, mtInternal);
if (buf != NULL) {
if (sysinfo(si, buf, bufsize) == bufsize) {
_string = buf;
} else {
os::free(buf);
}
}
}
}
char* buf = (char*) malloc(bufsize);
~Sysinfo() {
if (_string != NULL) {
os::free(_string);
}
}
if (buf == NULL)
return;
const char* value() const {
return _string;
}
if (sysinfo(si, buf, bufsize) == bufsize) {
// Compare the string.
if (strcmp(buf, string) == 0) {
*features |= mask;
}
bool valid() const {
return _string != NULL;
}
free(buf);
}
bool match(const char* s) const {
return valid() ? strcmp(_string, s) == 0 : false;
}
bool match_substring(const char* s) const {
return valid() ? strstr(_string, s) != NULL : false;
}
};
class Sysconf {
int _value;
public:
Sysconf(int sc) : _value(-1) {
_value = sysconf(sc);
}
bool valid() const {
return _value != -1;
}
int value() const {
return _value;
}
};
#ifndef _SC_DCACHE_LINESZ
#define _SC_DCACHE_LINESZ 508 /* Data cache line size */
#endif
#ifndef _SC_L2CACHE_LINESZ
#define _SC_L2CACHE_LINESZ 527 /* Size of L2 cache line */
#endif
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
assert(os::Solaris::supports_getisax(), "getisax() must be available");
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 32-bit architecture.
if (Sysinfo(SI_ARCHITECTURE_32).match("sparc")) {
features |= v8_instructions_m;
}
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Check 64-bit architecture.
if (Sysinfo(SI_ARCHITECTURE_64).match("sparcv9")) {
features |= generic_v9_m;
}
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
}
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
#ifndef AV2_SPARC_SPARC5
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
#endif
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
// We only build on Solaris 10 and up, but some of the values below
// are not defined on all versions of Solaris 10, so we define them,
// if necessary.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
#endif
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
#ifndef AV_SPARC_FMAU
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#endif
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
#ifndef AV_SPARC_VIS3
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#endif
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
#ifndef AV_SPARC_CBCOND
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
if (av & AV_SPARC_AES) features |= aes_instructions_m;
#ifndef AV_SPARC_SHA1
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
#endif
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
#ifndef AV_SPARC_SHA256
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
#endif
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
#ifndef AV_SPARC_SHA512
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
#endif
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) malloc(bufsize);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
free(buf);
}
}
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
if (Sysinfo(SI_MACHINE).match("sun4v")) {
features |= sun4v_m;
}
{
// Using kstat to determine the machine type.
kstat_ctl_t* kc = kstat_open();
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
const char* implementation = "UNKNOWN";
if (ksp != NULL) {
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
#ifndef KSTAT_DATA_STRING
#define KSTAT_DATA_STRING 9
#endif
if (knm[i].data_type == KSTAT_DATA_CHAR) {
// VM is running on Solaris 8 which does not have value.str.
implementation = &(knm[i].value.c[0]);
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
// VM is running on Solaris 10.
#ifndef KSTAT_NAMED_STR_PTR
// Solaris 8 was used to build VM, define the structure it misses.
struct str_t {
union {
char *ptr; /* NULL-term string */
char __pad[8]; /* 64-bit padding */
} addr;
uint32_t len; /* # bytes for strlen + '\0' */
};
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
#endif
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
}
// If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes
// is available to us as well
Sysinfo cpu_info(SI_CPUBRAND);
bool use_solaris_12_api = cpu_info.valid();
const char* impl;
int impl_m = 0;
if (use_solaris_12_api) {
impl = cpu_info.value();
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("cpu_info.implementation: %s", implementation);
}
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Parsing CPU implementation from %s", impl);
}
#endif
// Convert to UPPER case before compare.
char* impl = strdup(implementation);
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
// M-series SPARC is based on T-series.
features |= (M_family_m | T_family_m);
} else if (strstr(impl, "SPARC-T") != NULL) {
features |= T_family_m;
if (strstr(impl, "SPARC-T1") != NULL) {
features |= T1_model_m;
}
} else {
if (strstr(impl, "SPARC") == NULL) {
impl_m = parse_features(impl);
} else {
// Otherwise use kstat to determine the machine type.
kstat_ctl_t* kc = kstat_open();
if (kc != NULL) {
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
if (ksp != NULL) {
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name), "implementation") == 0) {
impl = KSTAT_NAMED_STR_PTR(&knm[i]);
#ifndef PRODUCT
// kstat on Solaris 8 virtual machines (branded zones)
// returns "(unsupported)" implementation.
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
#endif
implementation = "SPARC";
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Parsing CPU implementation from %s", impl);
}
#endif
impl_m = parse_features(impl);
break;
}
free((void*)impl);
break;
}
} // for(
}
}
kstat_close(kc);
}
assert(strcmp(implementation, "UNKNOWN") != 0,
"unknown cpu info (changed kstat interface?)");
kstat_close(kc);
}
assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl));
features |= impl_m;
bool is_sun4v = (features & sun4v_m) != 0;
if (use_solaris_12_api && is_sun4v) {
// If Solaris 12 API is supported and it's sun4v use sysconf() to get the cache line sizes
Sysconf l1_dcache_line_size(_SC_DCACHE_LINESZ);
if (l1_dcache_line_size.valid()) {
_L1_data_cache_line_size = l1_dcache_line_size.value();
}
// Figure out cache line sizes using PICL
PICL picl((features & sparc64_family_m) != 0, (features & sun4v_m) != 0);
_L2_data_cache_line_size = picl.L2_data_cache_line_size();
Sysconf l2_dcache_line_size(_SC_L2CACHE_LINESZ);
if (l2_dcache_line_size.valid()) {
_L2_data_cache_line_size = l2_dcache_line_size.value();
}
} else {
// Otherwise figure out the cache line sizes using PICL
bool is_fujitsu = (features & sparc64_family_m) != 0;
PICL picl(is_fujitsu, is_sun4v);
_L1_data_cache_line_size = picl.L1_data_cache_line_size();
_L2_data_cache_line_size = picl.L2_data_cache_line_size();
}
return features;
}
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -78,7 +78,7 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen
// The null-class-loader should always be kept alive.
_keep_alive(is_anonymous || h_class_loader.is_null()),
_metaspace(NULL), _unloading(false), _klasses(NULL),
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
_claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
_next(NULL), _dependencies(dependencies),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
// empty
......@@ -96,6 +96,45 @@ void ClassLoaderData::Dependencies::init(TRAPS) {
_list_head = oopFactory::new_objectArray(2, CHECK);
}
ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
Chunk* c = _head;
while (c != NULL) {
Chunk* next = c->_next;
delete c;
c = next;
}
}
oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == NULL || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head);
OrderAccess::release_store_ptr(&_head, next);
}
oop* handle = &_head->_data[_head->_size];
*handle = o;
OrderAccess::release_store(&_head->_size, _head->_size + 1);
return handle;
}
inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
for (juint i = 0; i < size; i++) {
if (c->_data[i] != NULL) {
f->do_oop(&c->_data[i]);
}
}
}
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head);
if (head != NULL) {
// Must be careful when reading size of head
oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
for (Chunk* c = head->_next; c != NULL; c = c->_next) {
oops_do_chunk(f, c, c->_size);
}
}
}
bool ClassLoaderData::claim() {
if (_claimed == 1) {
return false;
......@@ -111,7 +150,7 @@ void ClassLoaderData::oops_do(OopClosure* f, KlassClosure* klass_closure, bool m
f->do_oop(&_class_loader);
_dependencies.oops_do(f);
_handles->oops_do(f);
_handles.oops_do(f);
if (klass_closure != NULL) {
classes_do(klass_closure);
}
......@@ -342,11 +381,6 @@ ClassLoaderData::~ClassLoaderData() {
_metaspace = NULL;
// release the metaspace
delete m;
// release the handles
if (_handles != NULL) {
JNIHandleBlock::release_block(_handles);
_handles = NULL;
}
}
// Clear all the JNI handles for methods
......@@ -406,15 +440,9 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
return _metaspace;
}
JNIHandleBlock* ClassLoaderData::handles() const { return _handles; }
void ClassLoaderData::set_handles(JNIHandleBlock* handles) { _handles = handles; }
jobject ClassLoaderData::add_handle(Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (handles() == NULL) {
set_handles(JNIHandleBlock::allocate_block());
}
return handles()->allocate_handle(h());
return (jobject) _handles.add(h());
}
// Add this metadata pointer to be freed when it's safe. This is only during
......@@ -479,7 +507,6 @@ void ClassLoaderData::dump(outputStream * const out) {
p2i(class_loader() != NULL ? class_loader()->klass() : NULL), loader_name());
if (claimed()) out->print(" claimed ");
if (is_unloading()) out->print(" unloading ");
out->print(" handles " INTPTR_FORMAT, p2i(handles()));
out->cr();
if (metaspace_or_null() != NULL) {
out->print_cr("metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
......
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -51,7 +51,6 @@
class ClassLoaderData;
class JNIMethodBlock;
class JNIHandleBlock;
class Metadebug;
// GC root for walking class loader data created
......@@ -145,6 +144,31 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void oops_do(OopClosure* f);
};
class ChunkedHandleList VALUE_OBJ_CLASS_SPEC {
struct Chunk : public CHeapObj<mtClass> {
static const size_t CAPACITY = 32;
oop _data[CAPACITY];
volatile juint _size;
Chunk* _next;
Chunk(Chunk* c) : _next(c), _size(0) { }
};
Chunk* _head;
void oops_do_chunk(OopClosure* f, Chunk* c, const juint size);
public:
ChunkedHandleList() : _head(NULL) {}
~ChunkedHandleList();
// Only one thread at a time can add, guarded by ClassLoaderData::metaspace_lock().
// However, multiple threads can execute oops_do concurrently with add.
oop* add(oop o);
void oops_do(OopClosure* f);
};
friend class ClassLoaderDataGraph;
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphMetaspaceIterator;
......@@ -169,7 +193,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// Has to be an int because we cas it.
Klass* _klasses; // The classes defined by the class loader.
JNIHandleBlock* _handles; // Handles to constant pool arrays
ChunkedHandleList _handles; // Handles to constant pool arrays, etc, which
// have the same life cycle of the corresponding ClassLoader.
// These method IDs are created for the class loader and set to NULL when the
// class loader is unloaded. They are rarely freed, only for redefine classes
......@@ -196,9 +221,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void set_metaspace(Metaspace* m) { _metaspace = m; }
JNIHandleBlock* handles() const;
void set_handles(JNIHandleBlock* handles);
Mutex* metaspace_lock() const { return _metaspace_lock; }
// GC interface.
......
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -2852,6 +2852,15 @@ void java_lang_invoke_MemberName::set_vmindex(oop mname, intptr_t index) {
mname->address_field_put(_vmindex_offset, (address) index);
}
bool java_lang_invoke_MemberName::equals(oop mn1, oop mn2) {
if (mn1 == mn2) {
return true;
}
return (vmtarget(mn1) == vmtarget(mn2) && flags(mn1) == flags(mn2) &&
vmindex(mn1) == vmindex(mn2) &&
clazz(mn1) == clazz(mn2));
}
oop java_lang_invoke_LambdaForm::vmentry(oop lform) {
assert(is_instance(lform), "wrong type");
return lform->obj_field(_vmentry_offset);
......
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -1132,6 +1132,8 @@ class java_lang_invoke_MemberName: AllStatic {
static int flags_offset_in_bytes() { return _flags_offset; }
static int vmtarget_offset_in_bytes() { return _vmtarget_offset; }
static int vmindex_offset_in_bytes() { return _vmindex_offset; }
static bool equals(oop mt1, oop mt2);
};
......
......@@ -1084,15 +1084,18 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
THREAD);
const char* pkg = "java/";
size_t pkglen = strlen(pkg);
if (!HAS_PENDING_EXCEPTION &&
!class_loader.is_null() &&
parsed_name != NULL &&
!strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) {
parsed_name->utf8_length() >= (int)pkglen &&
!strncmp((const char*)parsed_name->bytes(), pkg, pkglen)) {
// It is illegal to define classes in the "java." package from
// JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
ResourceMark rm(THREAD);
char* name = parsed_name->as_C_string();
char* index = strrchr(name, '/');
assert(index != NULL, "must be");
*index = '\0'; // chop to just the package name
while ((index = strchr(name, '/')) != NULL) {
*index = '.'; // replace '/' with '.' in package name
......
......@@ -1151,6 +1151,7 @@ void nmethod::clear_inline_caches() {
// Clear ICStubs of all compiled ICs
void nmethod::clear_ic_stubs() {
assert_locked_or_safepoint(CompiledIC_lock);
ResourceMark rm;
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
......
......@@ -3018,7 +3018,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
return NULL;
}
bool InstanceKlass::add_member_name(Handle mem_name) {
oop InstanceKlass::add_member_name(Handle mem_name, bool intern) {
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
MutexLocker ml(MemberNameTable_lock);
DEBUG_ONLY(No_Safepoint_Verifier nsv);
......@@ -3028,7 +3028,7 @@ bool InstanceKlass::add_member_name(Handle mem_name) {
// is called!
Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name());
if (method->is_obsolete()) {
return false;
return NULL;
} else if (method->is_old()) {
// Replace method with redefined version
java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum()));
......@@ -3037,8 +3037,11 @@ bool InstanceKlass::add_member_name(Handle mem_name) {
if (_member_names == NULL) {
_member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
}
_member_names->add_member_name(mem_name_wref);
return true;
if (intern) {
return _member_names->find_or_add_member_name(mem_name_wref);
} else {
return _member_names->add_member_name(mem_name_wref);
}
}
// -----------------------------------------------------------------------------------------------------
......
......@@ -1105,7 +1105,7 @@ public:
// JSR-292 support
MemberNameTable* member_names() { return _member_names; }
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
bool add_member_name(Handle member_name);
oop add_member_name(Handle member_name, bool intern);
public:
// JVMTI support
......
......@@ -119,6 +119,9 @@ class JProjNode : public ProjNode {
// input in slot 0.
class PhiNode : public TypeNode {
const TypePtr* const _adr_type; // non-null only for Type::MEMORY nodes.
// The following fields are only used for data PhiNodes to indicate
// that the PhiNode represents the value of a known instance field.
int _inst_mem_id; // Instance memory id (node index of the memory Phi)
const int _inst_id; // Instance id of the memory slice.
const int _inst_index; // Alias index of the instance memory slice.
// Array elements references have the same alias_idx but different offset.
......@@ -138,11 +141,13 @@ public:
};
PhiNode( Node *r, const Type *t, const TypePtr* at = NULL,
const int imid = -1,
const int iid = TypeOopPtr::InstanceTop,
const int iidx = Compile::AliasIdxTop,
const int ioffs = Type::OffsetTop )
: TypeNode(t,r->req()),
_adr_type(at),
_inst_mem_id(imid),
_inst_id(iid),
_inst_index(iidx),
_inst_offset(ioffs)
......@@ -187,11 +192,14 @@ public:
virtual bool pinned() const { return in(0) != 0; }
virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
const int inst_mem_id() const { return _inst_mem_id; }
const int inst_id() const { return _inst_id; }
const int inst_index() const { return _inst_index; }
const int inst_offset() const { return _inst_offset; }
bool is_same_inst_field(const Type* tp, int id, int index, int offset) {
bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
return type()->basic_type() == tp->basic_type() &&
inst_mem_id() == mem_id &&
inst_id() == id &&
inst_index() == index &&
inst_offset() == offset &&
......
......@@ -401,7 +401,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
Node* phi = region->fast_out(k);
if (phi->is_Phi() && phi != mem &&
phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) {
phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
return phi;
}
}
......@@ -420,7 +420,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
GrowableArray <Node *> values(length, length, NULL, false);
// create a new Phi for the value
PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
transform_later(phi);
value_phis->push(phi, mem->_idx);
......
......@@ -1155,7 +1155,7 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
Node* phi = region->fast_out(i);
if (phi->is_Phi() && phi != mem &&
phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) {
phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) {
return phi;
}
}
......@@ -1400,7 +1400,7 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
this_iid = base->_idx;
}
PhaseIterGVN* igvn = phase->is_IterGVN();
Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
Node* phi = new (C) PhiNode(region, this_type, NULL, mem->_idx, this_iid, this_index, this_offset);
for (uint i = 1; i < region->req(); i++) {
Node* x;
Node* the_clone = NULL;
......
......@@ -481,6 +481,8 @@ PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
uint current_idx = 0; // The current new node ID. Incremented after every assignment.
for (uint i = 0; i < _useful.size(); i++) {
Node* n = _useful.at(i);
// Sanity check that fails if we ever decide to execute this phase after EA
assert(!n->is_Phi() || n->as_Phi()->inst_mem_id() == -1, "should not be linked to data Phi");
const Type* type = gvn->type_or_null(n);
new_type_array.map(current_idx, type);
......@@ -1378,6 +1380,18 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
i -= num_edges; // we deleted 1 or more copies of this edge
}
// Search for instance field data PhiNodes in the same region pointing to the old
// memory PhiNode and update their instance memory ids to point to the new node.
if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != NULL) {
Node* region = old->in(0);
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
PhiNode* phi = region->fast_out(i)->isa_Phi();
if (phi != NULL && phi->inst_mem_id() == (int)old->_idx) {
phi->set_inst_mem_id((int)nn->_idx);
}
}
}
// Smash all inputs to 'old', isolating him completely
Node *temp = new (C) Node(1);
temp->init_req(0,nn); // Add a use to nn to prevent him from dying
......
......@@ -882,7 +882,7 @@ protected:
// If not InstanceTop or InstanceBot, indicates that this is
// a particular instance of this type which is distinct.
// This is the the node index of the allocation node creating this instance.
// This is the node index of the allocation node creating this instance.
int _instance_id;
// Extra type information profiling gave us. We propagate it the
......
......@@ -5129,6 +5129,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestKlass_test());
run_unit_test(Test_linked_list());
run_unit_test(TestChunkedList_test());
run_unit_test(ObjectMonitor::sanity_checks());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif
......
......@@ -643,7 +643,7 @@ JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle))
// This can safepoint and redefine method, so need both new_obj and method
// in a handle, for two different reasons. new_obj can move, method can be
// deleted if nothing is using it on the stack.
m->method_holder()->add_member_name(new_obj());
m->method_holder()->add_member_name(new_obj(), false);
}
}
......
......@@ -173,7 +173,7 @@ oop MethodHandles::init_MemberName(Handle mname, Handle target) {
return NULL;
}
oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, bool intern) {
assert(info.resolved_appendix().is_null(), "only normal methods here");
methodHandle m = info.resolved_method();
KlassHandle m_klass = m->method_holder();
......@@ -270,13 +270,7 @@ oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
// If relevant, the vtable or itable value is stored as vmindex.
// This is done eagerly, since it is readily available without
// constructing any new objects.
// TO DO: maybe intern mname_oop
if (m->method_holder()->add_member_name(mname)) {
return mname();
} else {
// Redefinition caused this to fail. Return NULL (and an exception?)
return NULL;
}
return m->method_holder()->add_member_name(mname, intern);
}
oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
......@@ -917,7 +911,9 @@ int MethodHandles::find_MemberNames(KlassHandle k,
if (!java_lang_invoke_MemberName::is_instance(result()))
return -99; // caller bug!
CallInfo info(m);
oop saved = MethodHandles::init_method_MemberName(result, info);
// Since this is going through the methods to create MemberNames, don't search
// for matching methods already in the table
oop saved = MethodHandles::init_method_MemberName(result, info, /*intern*/false);
if (saved != result())
results->obj_at_put(rfill-1, saved); // show saved instance to user
} else if (++overflow >= overflow_limit) {
......@@ -949,9 +945,34 @@ MemberNameTable::~MemberNameTable() {
}
}
void MemberNameTable::add_member_name(jweak mem_name_wref) {
oop MemberNameTable::add_member_name(jweak mem_name_wref) {
assert_locked_or_safepoint(MemberNameTable_lock);
this->push(mem_name_wref);
return JNIHandles::resolve(mem_name_wref);
}
oop MemberNameTable::find_or_add_member_name(jweak mem_name_wref) {
assert_locked_or_safepoint(MemberNameTable_lock);
oop new_mem_name = JNIHandles::resolve(mem_name_wref);
// Find matching member name in the list.
// This is linear because these are short lists.
int len = this->length();
int new_index = len;
for (int idx = 0; idx < len; idx++) {
oop mname = JNIHandles::resolve(this->at(idx));
if (mname == NULL) {
new_index = idx;
continue;
}
if (java_lang_invoke_MemberName::equals(new_mem_name, mname)) {
JNIHandles::destroy_weak_global(mem_name_wref);
return mname;
}
}
// Not found, push the new one, or reuse empty slot
this->at_put_grow(new_index, mem_name_wref);
return new_mem_name;
}
#if INCLUDE_JVMTI
......
/*
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -60,7 +60,7 @@ class MethodHandles: AllStatic {
static Handle new_MemberName(TRAPS); // must be followed by init_MemberName
static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
static oop init_method_MemberName(Handle mname_h, CallInfo& info);
static oop init_method_MemberName(Handle mname_h, CallInfo& info, bool intern = true);
static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
int mflags, KlassHandle caller,
......@@ -236,7 +236,8 @@ class MemberNameTable : public GrowableArray<jweak> {
public:
MemberNameTable(int methods_cnt);
~MemberNameTable();
void add_member_name(jweak mem_name_ref);
oop add_member_name(jweak mem_name_ref);
oop find_or_add_member_name(jweak mem_name_ref);
#if INCLUDE_JVMTI
// RedefineClasses() API support:
......
......@@ -2529,6 +2529,10 @@ void ObjectMonitor::DeferredInitialize () {
SETKNOB(FastHSSEC) ;
#undef SETKNOB
if (Knob_Verbose) {
sanity_checks();
}
if (os::is_MP()) {
BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
......@@ -2549,6 +2553,66 @@ void ObjectMonitor::DeferredInitialize () {
InitDone = 1 ;
}
void ObjectMonitor::sanity_checks() {
int error_cnt = 0;
int warning_cnt = 0;
bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
if (verbose) {
tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
sizeof(ObjectMonitor));
}
uint cache_line_size = VM_Version::L1_data_cache_line_size();
if (verbose) {
tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
}
ObjectMonitor dummy;
u_char *addr_begin = (u_char*)&dummy;
u_char *addr_header = (u_char*)&dummy._header;
u_char *addr_owner = (u_char*)&dummy._owner;
uint offset_header = (uint)(addr_header - addr_begin);
if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header);
uint offset_owner = (uint)(addr_owner - addr_begin);
if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner);
if ((uint)(addr_header - addr_begin) != 0) {
tty->print_cr("ERROR: offset(_header) must be zero (0).");
error_cnt++;
}
if (cache_line_size != 0) {
// We were able to determine the L1 data cache line size so
// do some cache line specific sanity checks
if ((offset_owner - offset_header) < cache_line_size) {
tty->print_cr("WARNING: the _header and _owner fields are closer "
"than a cache line which permits false sharing.");
warning_cnt++;
}
if ((sizeof(ObjectMonitor) % cache_line_size) != 0) {
tty->print_cr("WARNING: ObjectMonitor size is not a multiple of "
"a cache line which permits false sharing.");
warning_cnt++;
}
}
ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
&warning_cnt);
if (verbose || error_cnt != 0 || warning_cnt != 0) {
tty->print_cr("INFO: error_cnt=%d", error_cnt);
tty->print_cr("INFO: warning_cnt=%d", warning_cnt);
}
guarantee(error_cnt == 0,
"Fatal error(s) found in ObjectMonitor::sanity_checks()");
}
#ifndef PRODUCT
void ObjectMonitor::verify() {
}
......
......@@ -189,6 +189,8 @@ public:
bool check(TRAPS); // true if the thread owns the monitor.
void check_slow(TRAPS);
void clear();
static void sanity_checks(); // public for -XX:+ExecuteInternalVMTests
// in PRODUCT for -XX:SyncKnobs=Verbose=1
#ifndef PRODUCT
void verify();
void print();
......@@ -234,8 +236,6 @@ public:
// WARNING: this must be the very first word of ObjectMonitor
// This means this class can't use any virtual member functions.
// TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the
// implicit 0 offset in emitted code.
volatile markOop _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
......
......@@ -319,6 +319,7 @@ void NMethodSweeper::possibly_sweep() {
}
void NMethodSweeper::sweep_code_cache() {
ResourceMark rm;
Ticks sweep_start_counter = Ticks::now();
_flushed_count = 0;
......@@ -626,6 +627,7 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
// state of the code cache if it's requested.
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
if (PrintMethodFlushing) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
......@@ -643,6 +645,7 @@ void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
}
if (LogCompilation && (xtty != NULL)) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
......
......@@ -437,19 +437,22 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
// Hash Code handling
//
// Performance concern:
// OrderAccess::storestore() calls release() which STs 0 into the global volatile
// OrderAccess::Dummy variable. This store is unnecessary for correctness.
// Many threads STing into a common location causes considerable cache migration
// or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
// until it's repaired. In some cases OrderAccess::fence() -- which incurs local
// latency on the executing processor -- is a better choice as it scales on SMP
// systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
// discussion of coherency costs. Note that all our current reference platforms
// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
// OrderAccess::storestore() calls release() which at one time stored 0
// into the global volatile OrderAccess::dummy variable. This store was
// unnecessary for correctness. Many threads storing into a common location
// causes considerable cache migration or "sloshing" on large SMP systems.
// As such, I avoided using OrderAccess::storestore(). In some cases
// OrderAccess::fence() -- which incurs local latency on the executing
// processor -- is a better choice as it scales on SMP systems.
//
// See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
// a discussion of coherency costs. Note that all our current reference
// platforms provide strong ST-ST order, so the issue is moot on IA32,
// x64, and SPARC.
//
// As a general policy we use "volatile" to control compiler-based reordering
// and explicit fences (barriers) to control for architectural reordering performed
// by the CPU(s) or platform.
// and explicit fences (barriers) to control for architectural reordering
// performed by the CPU(s) or platform.
struct SharedGlobals {
// These are highly shared mostly-read variables.
......@@ -1636,7 +1639,55 @@ void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
}
//------------------------------------------------------------------------------
// Non-product code
// Debugging code
void ObjectSynchronizer::sanity_checks(const bool verbose,
const uint cache_line_size,
int *error_cnt_ptr,
int *warning_cnt_ptr) {
u_char *addr_begin = (u_char*)&GVars;
u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
if (verbose) {
tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
sizeof(SharedGlobals));
}
uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
if (verbose) {
tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
}
if (cache_line_size != 0) {
// We were able to determine the L1 data cache line size so
// do some cache line specific sanity checks
if (offset_stwRandom < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
"to the struct beginning than a cache line which permits "
"false sharing.");
(*warning_cnt_ptr)++;
}
if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
"SharedGlobals.hcSequence fields are closer than a cache "
"line which permits false sharing.");
(*warning_cnt_ptr)++;
}
if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
"to the struct end than a cache line which permits false "
"sharing.");
(*warning_cnt_ptr)++;
}
}
}
#ifndef PRODUCT
......
......@@ -121,6 +121,9 @@ class ObjectSynchronizer : AllStatic {
static void oops_do(OopClosure* f);
// debugging
static void sanity_checks(const bool verbose,
const unsigned int cache_line_size,
int *error_cnt_ptr, int *warning_cnt_ptr);
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
......
......@@ -50,6 +50,7 @@ bool Abstract_VM_Version::_supports_atomic_getset8 = false;
bool Abstract_VM_Version::_supports_atomic_getadd4 = false;
bool Abstract_VM_Version::_supports_atomic_getadd8 = false;
unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U;
unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0;
int Abstract_VM_Version::_reserve_for_allocation_prefetch = 0;
#ifndef HOTSPOT_RELEASE_VERSION
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -42,6 +42,7 @@ class Abstract_VM_Version: AllStatic {
static bool _supports_atomic_getadd4;
static bool _supports_atomic_getadd8;
static unsigned int _logical_processors_per_package;
static unsigned int _L1_data_cache_line_size;
static int _vm_major_version;
static int _vm_minor_version;
static int _vm_build_number;
......@@ -114,6 +115,10 @@ class Abstract_VM_Version: AllStatic {
return _logical_processors_per_package;
}
static unsigned int L1_data_cache_line_size() {
return _L1_data_cache_line_size;
}
// Need a space at the end of TLAB for prefetch instructions
// which may fault when accessing memory outside of heap.
static int reserve_for_allocation_prefetch() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册