提交 e0052d8a 编写于 作者: R robm

Merge

......@@ -236,7 +236,7 @@ void VM_Version::initialize() {
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
char buf[512];
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", vis1" : ""),
......@@ -249,6 +249,7 @@ void VM_Version::initialize() {
(has_sha256() ? ", sha256" : ""),
(has_sha512() ? ", sha512" : ""),
(is_ultra3() ? ", ultra3" : ""),
(has_sparc5_instr() ? ", sparc5" : ""),
(is_sun4v() ? ", sun4v" : ""),
(is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
(is_sparc64() ? ", sparc64" : ""),
......@@ -364,6 +365,7 @@ void VM_Version::initialize() {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0) {
......@@ -447,9 +449,10 @@ void VM_Version::revert() {
unsigned int VM_Version::calc_parallel_worker_threads() {
unsigned int result;
if (is_M_series()) {
// for now, use same gc thread calculation for M-series as for niagara-plus
// in future, we may want to tweak parameters for nof_parallel_worker_thread
if (is_M_series() || is_S_series()) {
// for now, use same gc thread calculation for M-series and S-series as for
// niagara-plus. In future, we may want to tweak parameters for
// nof_parallel_worker_thread
result = nof_parallel_worker_threads(5, 16, 8);
} else if (is_niagara_plus()) {
result = nof_parallel_worker_threads(5, 16, 8);
......@@ -458,3 +461,37 @@ unsigned int VM_Version::calc_parallel_worker_threads() {
}
return result;
}
int VM_Version::parse_features(const char* implementation) {
int features = unknown_m;
// Convert to UPPER case before compare.
char* impl = os::strdup(implementation);
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
// M-series SPARC is based on T-series.
features |= (M_family_m | T_family_m);
} else if (strstr(impl, "SPARC-S") != NULL) {
// S-series SPARC is based on T-series.
features |= (S_family_m | T_family_m);
} else if (strstr(impl, "SPARC-T") != NULL) {
features |= T_family_m;
if (strstr(impl, "SPARC-T1") != NULL) {
features |= T1_model_m;
}
} else if (strstr(impl, "SUN4V-CPU") != NULL) {
// Generic or migration class LDOM
features |= T_family_m;
} else {
#ifndef PRODUCT
warning("Failed to parse CPU implementation = '%s'", impl);
#endif
}
os::free((void*)impl);
return features;
}
......@@ -47,13 +47,14 @@ protected:
cbcond_instructions = 13,
sparc64_family = 14,
M_family = 15,
T_family = 16,
T1_model = 17,
sparc5_instructions = 18,
aes_instructions = 19,
sha1_instruction = 20,
sha256_instruction = 21,
sha512_instruction = 22
S_family = 16,
T_family = 17,
T1_model = 18,
sparc5_instructions = 19,
aes_instructions = 20,
sha1_instruction = 21,
sha256_instruction = 22,
sha512_instruction = 23
};
enum Feature_Flag_Set {
......@@ -76,6 +77,7 @@ protected:
cbcond_instructions_m = 1 << cbcond_instructions,
sparc64_family_m = 1 << sparc64_family,
M_family_m = 1 << M_family,
S_family_m = 1 << S_family,
T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model,
sparc5_instructions_m = 1 << sparc5_instructions,
......@@ -105,6 +107,7 @@ protected:
// Returns true if the platform is in the niagara line (T series)
static bool is_M_family(int features) { return (features & M_family_m) != 0; }
static bool is_S_family(int features) { return (features & S_family_m) != 0; }
static bool is_T_family(int features) { return (features & T_family_m) != 0; }
static bool is_niagara() { return is_T_family(_features); }
#ifdef ASSERT
......@@ -119,7 +122,7 @@ protected:
static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
static int maximum_niagara1_processor_count() { return 32; }
static int parse_features(const char* implementation);
public:
// Initialization
static void initialize();
......@@ -152,6 +155,7 @@ public:
static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); }
static bool is_M_series() { return is_M_family(_features); }
static bool is_S_series() { return is_S_family(_features); }
static bool is_T4() { return is_T_family(_features) && has_cbcond(); }
static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); }
......
......@@ -406,6 +406,8 @@ void VM_Version::get_processor_features() {
_stepping = 0;
_cpuFeatures = 0;
_logical_processors_per_package = 1;
// i486 internal cache is both I&D and has a 16-byte line size
_L1_data_cache_line_size = 16;
if (!Use486InstrsOnly) {
// Get raw processor info
......@@ -424,6 +426,7 @@ void VM_Version::get_processor_features() {
// Logical processors are only available on P4s and above,
// and only if hyperthreading is available.
_logical_processors_per_package = logical_processor_count();
_L1_data_cache_line_size = L1_line_size();
}
}
......@@ -1034,6 +1037,7 @@ void VM_Version::get_processor_features() {
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print("UseSSE=%d", (int) UseSSE);
if (UseAVX > 0) {
tty->print(" UseAVX=%d", (int) UseAVX);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -595,7 +595,7 @@ public:
return (result == 0 ? 1 : result);
}
static intx prefetch_data_size() {
static intx L1_line_size() {
intx result = 0;
if (is_intel()) {
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
......@@ -607,6 +607,10 @@ public:
return result;
}
static intx prefetch_data_size() {
return L1_line_size();
}
//
// Feature identification
//
......
......@@ -262,6 +262,7 @@ void PICL::close_library() {
// We need to keep these here as long as we have to build on Solaris
// versions before 10.
#ifndef SI_ARCHITECTURE_32
#define SI_ARCHITECTURE_32 516 /* basic 32-bit SI_ARCHITECTURE */
#endif
......@@ -270,231 +271,233 @@ void PICL::close_library() {
#define SI_ARCHITECTURE_64 517 /* basic 64-bit SI_ARCHITECTURE */
#endif
static void do_sysinfo(int si, const char* string, int* features, int mask) {
char tmp;
size_t bufsize = sysinfo(si, &tmp, 1);
#ifndef SI_CPUBRAND
#define SI_CPUBRAND 523 /* return cpu brand string */
#endif
// All SI defines used below must be supported.
guarantee(bufsize != -1, "must be supported");
class Sysinfo {
char* _string;
public:
Sysinfo(int si) : _string(NULL) {
char tmp;
size_t bufsize = sysinfo(si, &tmp, 1);
if (bufsize != -1) {
char* buf = (char*) os::malloc(bufsize, mtInternal);
if (buf != NULL) {
if (sysinfo(si, buf, bufsize) == bufsize) {
_string = buf;
} else {
os::free(buf);
}
}
}
}
char* buf = (char*) malloc(bufsize);
~Sysinfo() {
if (_string != NULL) {
os::free(_string);
}
}
if (buf == NULL)
return;
const char* value() const {
return _string;
}
if (sysinfo(si, buf, bufsize) == bufsize) {
// Compare the string.
if (strcmp(buf, string) == 0) {
*features |= mask;
}
bool valid() const {
return _string != NULL;
}
free(buf);
}
bool match(const char* s) const {
return valid() ? strcmp(_string, s) == 0 : false;
}
bool match_substring(const char* s) const {
return valid() ? strstr(_string, s) != NULL : false;
}
};
class Sysconf {
int _value;
public:
Sysconf(int sc) : _value(-1) {
_value = sysconf(sc);
}
bool valid() const {
return _value != -1;
}
int value() const {
return _value;
}
};
#ifndef _SC_DCACHE_LINESZ
#define _SC_DCACHE_LINESZ 508 /* Data cache line size */
#endif
#ifndef _SC_L2CACHE_LINESZ
#define _SC_L2CACHE_LINESZ 527 /* Size of L2 cache line */
#endif
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
assert(os::Solaris::supports_getisax(), "getisax() must be available");
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 32-bit architecture.
if (Sysinfo(SI_ARCHITECTURE_32).match("sparc")) {
features |= v8_instructions_m;
}
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Check 64-bit architecture.
if (Sysinfo(SI_ARCHITECTURE_64).match("sparcv9")) {
features |= generic_v9_m;
}
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
}
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
#ifndef AV2_SPARC_SPARC5
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
#endif
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
// We only build on Solaris 10 and up, but some of the values below
// are not defined on all versions of Solaris 10, so we define them,
// if necessary.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
#endif
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
#ifndef AV_SPARC_FMAU
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#endif
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
#ifndef AV_SPARC_VIS3
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#endif
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
#ifndef AV_SPARC_CBCOND
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
if (av & AV_SPARC_AES) features |= aes_instructions_m;
#ifndef AV_SPARC_SHA1
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
#endif
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
#ifndef AV_SPARC_SHA256
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
#endif
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
#ifndef AV_SPARC_SHA512
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
#endif
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) malloc(bufsize);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
free(buf);
}
}
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
if (Sysinfo(SI_MACHINE).match("sun4v")) {
features |= sun4v_m;
}
{
// Using kstat to determine the machine type.
kstat_ctl_t* kc = kstat_open();
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
const char* implementation = "UNKNOWN";
if (ksp != NULL) {
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
#ifndef KSTAT_DATA_STRING
#define KSTAT_DATA_STRING 9
#endif
if (knm[i].data_type == KSTAT_DATA_CHAR) {
// VM is running on Solaris 8 which does not have value.str.
implementation = &(knm[i].value.c[0]);
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
// VM is running on Solaris 10.
#ifndef KSTAT_NAMED_STR_PTR
// Solaris 8 was used to build VM, define the structure it misses.
struct str_t {
union {
char *ptr; /* NULL-term string */
char __pad[8]; /* 64-bit padding */
} addr;
uint32_t len; /* # bytes for strlen + '\0' */
};
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
#endif
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
}
// If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes
// is available to us as well
Sysinfo cpu_info(SI_CPUBRAND);
bool use_solaris_12_api = cpu_info.valid();
const char* impl;
int impl_m = 0;
if (use_solaris_12_api) {
impl = cpu_info.value();
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("cpu_info.implementation: %s", implementation);
}
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Parsing CPU implementation from %s", impl);
}
#endif
// Convert to UPPER case before compare.
char* impl = strdup(implementation);
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
// M-series SPARC is based on T-series.
features |= (M_family_m | T_family_m);
} else if (strstr(impl, "SPARC-T") != NULL) {
features |= T_family_m;
if (strstr(impl, "SPARC-T1") != NULL) {
features |= T1_model_m;
}
} else {
if (strstr(impl, "SPARC") == NULL) {
impl_m = parse_features(impl);
} else {
// Otherwise use kstat to determine the machine type.
kstat_ctl_t* kc = kstat_open();
if (kc != NULL) {
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
if (ksp != NULL) {
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name), "implementation") == 0) {
impl = KSTAT_NAMED_STR_PTR(&knm[i]);
#ifndef PRODUCT
// kstat on Solaris 8 virtual machines (branded zones)
// returns "(unsupported)" implementation.
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
#endif
implementation = "SPARC";
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Parsing CPU implementation from %s", impl);
}
#endif
impl_m = parse_features(impl);
break;
}
free((void*)impl);
break;
}
} // for(
}
}
kstat_close(kc);
}
assert(strcmp(implementation, "UNKNOWN") != 0,
"unknown cpu info (changed kstat interface?)");
kstat_close(kc);
}
assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl));
features |= impl_m;
bool is_sun4v = (features & sun4v_m) != 0;
if (use_solaris_12_api && is_sun4v) {
// If Solaris 12 API is supported and it's sun4v use sysconf() to get the cache line sizes
Sysconf l1_dcache_line_size(_SC_DCACHE_LINESZ);
if (l1_dcache_line_size.valid()) {
_L1_data_cache_line_size = l1_dcache_line_size.value();
}
// Figure out cache line sizes using PICL
PICL picl((features & sparc64_family_m) != 0, (features & sun4v_m) != 0);
_L2_data_cache_line_size = picl.L2_data_cache_line_size();
Sysconf l2_dcache_line_size(_SC_L2CACHE_LINESZ);
if (l2_dcache_line_size.valid()) {
_L2_data_cache_line_size = l2_dcache_line_size.value();
}
} else {
// Otherwise figure out the cache line sizes using PICL
bool is_fujitsu = (features & sparc64_family_m) != 0;
PICL picl(is_fujitsu, is_sun4v);
_L1_data_cache_line_size = picl.L1_data_cache_line_size();
_L2_data_cache_line_size = picl.L2_data_cache_line_size();
}
return features;
}
......@@ -1516,6 +1516,17 @@ void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr
append(c);
}
void LIR_List::null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null) {
if (deoptimize_on_null) {
// Emit an explicit null check and deoptimize if opr is null
CodeStub* deopt = new DeoptimizeStub(info);
cmp(lir_cond_equal, opr, LIR_OprFact::oopConst(NULL));
branch(lir_cond_equal, T_OBJECT, deopt);
} else {
// Emit an implicit null check
append(new LIR_Op1(lir_null_check, opr, info));
}
}
void LIR_List::cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
LIR_Opr t1, LIR_Opr t2, LIR_Opr result) {
......
......@@ -2153,7 +2153,7 @@ class LIR_List: public CompilationResourceObj {
void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); }
void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
}
......
......@@ -1700,8 +1700,10 @@ void LIRGenerator::do_StoreField(StoreField* x) {
if (x->needs_null_check() &&
(needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()))) {
// emit an explicit null check because the offset is too large
__ null_check(object.result(), new CodeEmitInfo(info));
// Emit an explicit null check because the offset is too large.
// If the class is not loaded and the object is NULL, we need to deoptimize to throw a
// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
LIR_Address* address;
......@@ -1785,8 +1787,10 @@ void LIRGenerator::do_LoadField(LoadField* x) {
obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
}
// emit an explicit null check because the offset is too large
__ null_check(obj, new CodeEmitInfo(info));
// Emit an explicit null check because the offset is too large.
// If the class is not loaded and the object is NULL, we need to deoptimize to throw a
// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
LIR_Opr reg = rlock_result(x, field_type);
......
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -254,7 +254,8 @@ bool ExceptionCache::match_exception_with_space(Handle exception) {
address ExceptionCache::test_address(address addr) {
for (int i=0; i<count(); i++) {
int limit = count();
for (int i = 0; i < limit; i++) {
if (pc_at(i) == addr) {
return handler_at(i);
}
......@@ -265,9 +266,11 @@ address ExceptionCache::test_address(address addr) {
bool ExceptionCache::add_address_and_handler(address addr, address handler) {
if (test_address(addr) == handler) return true;
if (count() < cache_size) {
set_pc_at(count(),addr);
set_handler_at(count(), handler);
int index = count();
if (index < cache_size) {
set_pc_at(index, addr);
set_handler_at(index, handler);
increment_count();
return true;
}
......@@ -380,10 +383,11 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(new_entry != NULL,"Must be non null");
assert(new_entry->next() == NULL, "Must be null");
if (exception_cache() != NULL) {
new_entry->set_next(exception_cache());
ExceptionCache *ec = exception_cache();
if (ec != NULL) {
new_entry->set_next(ec);
}
set_exception_cache(new_entry);
release_set_exception_cache(new_entry);
}
void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -39,15 +39,16 @@ class ExceptionCache : public CHeapObj<mtCode> {
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
int _count;
volatile int _count;
ExceptionCache* _next;
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
int count() { return _count; }
void increment_count() { _count++; }
int count() { return OrderAccess::load_acquire(&_count); }
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
public:
......@@ -237,7 +238,7 @@ class nmethod : public CodeBlob {
// counter is decreased (by 1) while sweeping.
int _hotness_counter;
ExceptionCache *_exception_cache;
ExceptionCache * volatile _exception_cache;
PcDescCache _pc_desc_cache;
// These are used for compiled synchronized native methods to
......@@ -433,7 +434,7 @@ class nmethod : public CodeBlob {
// flag accessing and manipulation
bool is_in_use() const { return _state == in_use; }
bool is_alive() const { return _state == in_use || _state == not_entrant; }
bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }
......@@ -555,8 +556,10 @@ public:
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
// Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
......
......@@ -999,8 +999,7 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
#ifdef _LP64
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) or
// ConvI2L(CastII(AddI(x, y))) to AddL(ConvI2L(CastII(x)), ConvI2L(CastII(y))),
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y))
// but only if x and y have subranges that cannot cause 32-bit overflow,
// under the assumption that x+y is in my own subrange this->type().
......@@ -1024,13 +1023,6 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* z = in(1);
int op = z->Opcode();
Node* ctrl = NULL;
if (op == Op_CastII && z->as_CastII()->has_range_check()) {
// Skip CastII node but save control dependency
ctrl = z->in(0);
z = z->in(1);
op = z->Opcode();
}
if (op == Op_AddI || op == Op_SubI) {
Node* x = z->in(1);
Node* y = z->in(2);
......@@ -1090,8 +1082,8 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
assert(rxlo == (int)rxlo && rxhi == (int)rxhi, "x should not overflow");
assert(rylo == (int)rylo && ryhi == (int)ryhi, "y should not overflow");
Node* cx = phase->C->constrained_convI2L(phase, x, TypeInt::make(rxlo, rxhi, widen), ctrl);
Node* cy = phase->C->constrained_convI2L(phase, y, TypeInt::make(rylo, ryhi, widen), ctrl);
Node* cx = phase->C->constrained_convI2L(phase, x, TypeInt::make(rxlo, rxhi, widen), NULL);
Node* cy = phase->C->constrained_convI2L(phase, y, TypeInt::make(rylo, ryhi, widen), NULL);
switch (op) {
case Op_AddI: return new (phase->C) AddLNode(cx, cy);
case Op_SubI: return new (phase->C) SubLNode(cx, cy);
......
......@@ -5129,6 +5129,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestKlass_test());
run_unit_test(Test_linked_list());
run_unit_test(TestChunkedList_test());
run_unit_test(ObjectMonitor::sanity_checks());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif
......
......@@ -2529,6 +2529,10 @@ void ObjectMonitor::DeferredInitialize () {
SETKNOB(FastHSSEC) ;
#undef SETKNOB
if (Knob_Verbose) {
sanity_checks();
}
if (os::is_MP()) {
BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
......@@ -2549,6 +2553,66 @@ void ObjectMonitor::DeferredInitialize () {
InitDone = 1 ;
}
void ObjectMonitor::sanity_checks() {
int error_cnt = 0;
int warning_cnt = 0;
bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
if (verbose) {
tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
sizeof(ObjectMonitor));
}
uint cache_line_size = VM_Version::L1_data_cache_line_size();
if (verbose) {
tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
}
ObjectMonitor dummy;
u_char *addr_begin = (u_char*)&dummy;
u_char *addr_header = (u_char*)&dummy._header;
u_char *addr_owner = (u_char*)&dummy._owner;
uint offset_header = (uint)(addr_header - addr_begin);
if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header);
uint offset_owner = (uint)(addr_owner - addr_begin);
if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner);
if ((uint)(addr_header - addr_begin) != 0) {
tty->print_cr("ERROR: offset(_header) must be zero (0).");
error_cnt++;
}
if (cache_line_size != 0) {
// We were able to determine the L1 data cache line size so
// do some cache line specific sanity checks
if ((offset_owner - offset_header) < cache_line_size) {
tty->print_cr("WARNING: the _header and _owner fields are closer "
"than a cache line which permits false sharing.");
warning_cnt++;
}
if ((sizeof(ObjectMonitor) % cache_line_size) != 0) {
tty->print_cr("WARNING: ObjectMonitor size is not a multiple of "
"a cache line which permits false sharing.");
warning_cnt++;
}
}
ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
&warning_cnt);
if (verbose || error_cnt != 0 || warning_cnt != 0) {
tty->print_cr("INFO: error_cnt=%d", error_cnt);
tty->print_cr("INFO: warning_cnt=%d", warning_cnt);
}
guarantee(error_cnt == 0,
"Fatal error(s) found in ObjectMonitor::sanity_checks()");
}
#ifndef PRODUCT
void ObjectMonitor::verify() {
}
......
......@@ -189,6 +189,8 @@ public:
bool check(TRAPS); // true if the thread owns the monitor.
void check_slow(TRAPS);
void clear();
static void sanity_checks(); // public for -XX:+ExecuteInternalVMTests
// in PRODUCT for -XX:SyncKnobs=Verbose=1
#ifndef PRODUCT
void verify();
void print();
......@@ -234,8 +236,6 @@ public:
// WARNING: this must be the very first word of ObjectMonitor
// This means this class can't use any virtual member functions.
// TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the
// implicit 0 offset in emitted code.
volatile markOop _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
......
......@@ -437,19 +437,22 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
// Hash Code handling
//
// Performance concern:
// OrderAccess::storestore() calls release() which STs 0 into the global volatile
// OrderAccess::Dummy variable. This store is unnecessary for correctness.
// Many threads STing into a common location causes considerable cache migration
// or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
// until it's repaired. In some cases OrderAccess::fence() -- which incurs local
// latency on the executing processor -- is a better choice as it scales on SMP
// systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
// discussion of coherency costs. Note that all our current reference platforms
// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
// OrderAccess::storestore() calls release() which at one time stored 0
// into the global volatile OrderAccess::dummy variable. This store was
// unnecessary for correctness. Many threads storing into a common location
// causes considerable cache migration or "sloshing" on large SMP systems.
// As such, I avoided using OrderAccess::storestore(). In some cases
// OrderAccess::fence() -- which incurs local latency on the executing
// processor -- is a better choice as it scales on SMP systems.
//
// See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
// a discussion of coherency costs. Note that all our current reference
// platforms provide strong ST-ST order, so the issue is moot on IA32,
// x64, and SPARC.
//
// As a general policy we use "volatile" to control compiler-based reordering
// and explicit fences (barriers) to control for architectural reordering performed
// by the CPU(s) or platform.
// and explicit fences (barriers) to control for architectural reordering
// performed by the CPU(s) or platform.
struct SharedGlobals {
// These are highly shared mostly-read variables.
......@@ -1636,7 +1639,55 @@ void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
}
//------------------------------------------------------------------------------
// Non-product code
// Debugging code
void ObjectSynchronizer::sanity_checks(const bool verbose,
const uint cache_line_size,
int *error_cnt_ptr,
int *warning_cnt_ptr) {
u_char *addr_begin = (u_char*)&GVars;
u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
if (verbose) {
tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
sizeof(SharedGlobals));
}
uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
if (verbose) {
tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
}
if (cache_line_size != 0) {
// We were able to determine the L1 data cache line size so
// do some cache line specific sanity checks
if (offset_stwRandom < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
"to the struct beginning than a cache line which permits "
"false sharing.");
(*warning_cnt_ptr)++;
}
if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
"SharedGlobals.hcSequence fields are closer than a cache "
"line which permits false sharing.");
(*warning_cnt_ptr)++;
}
if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
"to the struct end than a cache line which permits false "
"sharing.");
(*warning_cnt_ptr)++;
}
}
}
#ifndef PRODUCT
......
......@@ -121,6 +121,9 @@ class ObjectSynchronizer : AllStatic {
static void oops_do(OopClosure* f);
// debugging
static void sanity_checks(const bool verbose,
const unsigned int cache_line_size,
int *error_cnt_ptr, int *warning_cnt_ptr);
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
......
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -879,7 +879,7 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(nmethod, _stack_traversal_mark, long) \
nonstatic_field(nmethod, _compile_id, int) \
nonstatic_field(nmethod, _comp_level, int) \
nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \
volatile_nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \
nonstatic_field(nmethod, _marked_for_deoptimization, bool) \
\
unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \
......
......@@ -50,6 +50,7 @@ bool Abstract_VM_Version::_supports_atomic_getset8 = false;
bool Abstract_VM_Version::_supports_atomic_getadd4 = false;
bool Abstract_VM_Version::_supports_atomic_getadd8 = false;
unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U;
unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0;
int Abstract_VM_Version::_reserve_for_allocation_prefetch = 0;
#ifndef HOTSPOT_RELEASE_VERSION
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -42,6 +42,7 @@ class Abstract_VM_Version: AllStatic {
static bool _supports_atomic_getadd4;
static bool _supports_atomic_getadd8;
static unsigned int _logical_processors_per_package;
static unsigned int _L1_data_cache_line_size;
static int _vm_major_version;
static int _vm_minor_version;
static int _vm_build_number;
......@@ -114,6 +115,10 @@ class Abstract_VM_Version: AllStatic {
return _logical_processors_per_package;
}
static unsigned int L1_data_cache_line_size() {
return _L1_data_cache_line_size;
}
// Need a space at the end of TLAB for prefetch instructions
// which may fault when accessing memory outside of heap.
static int reserve_for_allocation_prefetch() {
......
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
public class compiler/c1/TestUnresolvedField version 52:0 {
public static Method testGetField:"()V" stack 1 locals 1 {
aconst_null;
getfield Field T.f:I; // T does not exist
return;
}
public static Method testPutField:"()V" stack 2 locals 1 {
aconst_null;
iconst_0;
putfield Field T.f:I; // T does not exist
return;
}
}
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8173373
* @compile TestUnresolvedField.jasm
* @run main/othervm -XX:TieredStopAtLevel=1 -Xcomp
* -XX:CompileCommand=compileonly,compiler.c1.TestUnresolvedField::test*
* compiler.c1.TestUnresolvedFieldMain
*/
package compiler.c1;
public class TestUnresolvedFieldMain {
public static void main(String[] args) {
try {
TestUnresolvedField.testGetField();
} catch (java.lang.NoClassDefFoundError error) {
// Expected
}
try {
TestUnresolvedField.testPutField();
} catch (java.lang.NoClassDefFoundError error) {
// Expected
}
}
}
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -23,10 +23,16 @@
/*
* @test
* @bug 8078262
* @bug 8078262 8177095
* @summary Tests correct dominator information after loop peeling.
* @run main/othervm -Xcomp -XX:CompileCommand=compileonly,TestLoopPeeling::test* TestLoopPeeling
*
* @run main/othervm -Xcomp
* -XX:CompileCommand=compileonly,compiler.loopopts.TestLoopPeeling::test*
* compiler.loopopts.TestLoopPeeling
*/
package compiler.loopopts;
public class TestLoopPeeling {
public int[] array = new int[100];
......@@ -34,14 +40,16 @@ public class TestLoopPeeling {
public static void main(String args[]) {
TestLoopPeeling test = new TestLoopPeeling();
try {
test.testArrayAccess(0, 1);
test.testArrayAccess1(0, 1);
test.testArrayAccess2(0);
test.testArrayAccess3(0, false);
test.testArrayAllocation(0, 1);
} catch (Exception e) {
// Ignore exceptions
}
}
public void testArrayAccess(int index, int inc) {
public void testArrayAccess1(int index, int inc) {
int storeIndex = -1;
for (; index < 10; index += inc) {
......@@ -57,7 +65,7 @@ public class TestLoopPeeling {
if (index == 42) {
// This store and the corresponding range check are moved out of the
// loop and both used after old loop and the peeled iteration exit.
// loop and both used after main loop and the peeled iteration exit.
// For the peeled iteration, storeIndex is always -1 and the ConvI2L
// is replaced by TOP. However, the range check is not folded because
// we don't do the split if optimization in PhaseIdealLoop2.
......@@ -71,6 +79,44 @@ public class TestLoopPeeling {
}
}
public int testArrayAccess2(int index) {
// Load1 and the corresponding range check are moved out of the loop
// and both are used after the main loop and the peeled iteration exit.
// For the peeled iteration, storeIndex is always Integer.MIN_VALUE and
// for the main loop it is 0. Hence, the merging phi has type int:<=0.
// Load1 reads the array at index ConvI2L(CastII(AddI(storeIndex, -1)))
// where the CastII is range check dependent and has type int:>=0.
// The CastII gets pushed through the AddI and its type is changed to int:>=1
// which does not overlap with the input type of storeIndex (int:<=0).
// The CastII is replaced by TOP causing a cascade of other eliminations.
// Since the control path through the range check CmpU(AddI(storeIndex, -1))
// is not eliminated, the graph is in a corrupted state. We fail once we merge
// with the result of Load2 because we get data from a non-dominating region.
int storeIndex = Integer.MIN_VALUE;
for (; index < 10; ++index) {
if (index == 42) {
return array[storeIndex-1]; // Load1
}
storeIndex = 0;
}
return array[42]; // Load2
}
public int testArrayAccess3(int index, boolean b) {
// Same as testArrayAccess2 but manifests as crash in register allocator.
int storeIndex = Integer.MIN_VALUE;
for (; index < 10; ++index) {
if (b) {
return 0;
}
if (index == 42) {
return array[storeIndex-1]; // Load1
}
storeIndex = 0;
}
return array[42]; // Load2
}
public byte[] testArrayAllocation(int index, int inc) {
int allocationCount = -1;
byte[] result;
......@@ -82,7 +128,7 @@ public class TestLoopPeeling {
if (index == 42) {
// This allocation and the corresponding size check are moved out of the
// loop and both used after old loop and the peeled iteration exit.
// loop and both used after main loop and the peeled iteration exit.
// For the peeled iteration, allocationCount is always -1 and the ConvI2L
// is replaced by TOP. However, the size check is not folded because
// we don't do the split if optimization in PhaseIdealLoop2.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册