提交 c5234d8d 编写于 作者: Y ysr

Merge

......@@ -2,3 +2,4 @@ a61af66fc99eb5ec9d50c05b0c599757b1289ceb jdk7-b24
7836be3e92d0a4f9ee7566f602c91f5609534e66 jdk7-b25
ad0b851458ff9d1d490ed2d79bb84f75a9fdb753 jdk7-b26
e3d2692f8442e2d951166dc9bd9a330684754438 jdk7-b27
c14dab40ed9bf45ad21150bd70c9c80cdf655415 jdk7-b28
......@@ -261,3 +261,21 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmti.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
# A list of object files built without the platform specific PIC flags, e.g.
# -fPIC on linux. Performance measurements show that by compiling GC related
# code, we could significantly reduce the GC pause time on 32 bit Linux/Unix
# platforms. See 6454213 for more details.
include $(GAMMADIR)/make/scm.make
ifneq ($(OSNAME), windows)
ifndef LP64
NONPIC_DIRS = memory oops gc_implementation gc_interface
NONPIC_DIRS := $(foreach dir,$(NONPIC_DIRS), $(GAMMADIR)/src/share/vm/$(dir))
# Look for source files under NONPIC_DIRS
NONPIC_FILES := $(foreach dir,$(NONPIC_DIRS),\
$(shell find $(dir) \( $(SCM_DIRS) \) -prune -o \
-name '*.cpp' -print))
NONPIC_OBJ_FILES := $(notdir $(subst .cpp,.o,$(NONPIC_FILES)))
endif
endif
......@@ -33,9 +33,9 @@
# Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2008
HS_MAJOR_VER=13
HS_MAJOR_VER=14
HS_MINOR_VER=0
HS_BUILD_NUMBER=02
HS_BUILD_NUMBER=01
JDK_MAJOR_VER=1
JDK_MINOR_VER=7
......
......@@ -50,14 +50,7 @@ PICFLAG = -fPIC
VM_PICFLAG/LIBJVM = $(PICFLAG)
VM_PICFLAG/AOUT =
ifneq ($(BUILDARCH), i486)
VM_PICFLAG = $(VM_PICFLAG/$(LINK_INTO))
else
# PIC has significant overhead on x86, build nonpic VM for now.
# Link JVM at a "good" base location to avoid unnecessary .text patching.
JVM_BASE_ADDR = 0x06000000
endif
CFLAGS += $(VM_PICFLAG)
CFLAGS += -fno-rtti
......
......@@ -133,10 +133,22 @@ ifeq ($(findstring j,$(MFLAGS)),j)
COMPILE_DONE = && { echo Done with $<; }
endif
include $(GAMMADIR)/make/defs.make
# The non-PIC object files are only generated for 32 bit platforms.
ifdef LP64
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)
else
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
$(subst $(VM_PICFLAG), ,$(COMPILE.CC)) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CC) -o $@ $< $(COMPILE_DONE))
endif
%.o: %.s
@echo Assembling $<
......
......@@ -30,7 +30,7 @@ DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)
ifeq ("${Platform_compiler}", "sparcWorks")
ifeq ($(COMPILER_REV),5.8))
ifeq ($(COMPILER_REV),5.8)
# SS11 SEGV when compiling with -g and -xarch=v8, using different backend
DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0
DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0
......
......@@ -133,19 +133,7 @@ ifeq ($(findstring j,$(MFLAGS)),j)
COMPILE_DONE = && { echo Done with $<; }
endif
# A list of directories under which all source code are built without -KPIC/-Kpic
# flag. Performance measurements show that compiling GC related code will
# dramatically reduce the gc pause time. See bug 6454213 for more details.
include $(GAMMADIR)/make/scm.make
NONPIC_DIRS = memory oops gc_implementation gc_interface
NONPIC_DIRS := $(foreach dir,$(NONPIC_DIRS), $(GAMMADIR)/src/share/vm/$(dir))
# Look for source code under NONPIC_DIRS
NONPIC_FILES := $(foreach dir,$(NONPIC_DIRS),\
$(shell find $(dir) \( $(SCM_DIRS) \) -prune -o \
-name '*.cpp' -print))
NONPIC_OBJ_FILES := $(notdir $(subst .cpp,.o,$(NONPIC_FILES)))
include $(GAMMADIR)/make/defs.make
# Sun compiler for 64 bit Solaris does not support building non-PIC object files.
ifdef LP64
......
......@@ -132,7 +132,7 @@ JVM_END
*/
struct siglabel {
char *name;
const char *name;
int number;
};
......
......@@ -75,8 +75,8 @@ int os::Linux::_page_size = -1;
bool os::Linux::_is_floating_stack = false;
bool os::Linux::_is_NPTL = false;
bool os::Linux::_supports_fast_thread_cpu_time = false;
char * os::Linux::_glibc_version = NULL;
char * os::Linux::_libpthread_version = NULL;
const char * os::Linux::_glibc_version = NULL;
const char * os::Linux::_libpthread_version = NULL;
static jlong initial_time_count=0;
......@@ -213,9 +213,9 @@ pid_t os::Linux::gettid() {
// the system call returns 1. This causes the VM to act as if it is
// a single processor and elide locking (see is_MP() call).
static bool unsafe_chroot_detected = false;
static char *unstable_chroot_error = "/proc file system not found.\n"
"Java may be unstable running multithreaded in a chroot "
"environment on Linux when /proc filesystem is not mounted.";
static const char *unstable_chroot_error = "/proc file system not found.\n"
"Java may be unstable running multithreaded in a chroot "
"environment on Linux when /proc filesystem is not mounted.";
void os::Linux::initialize_system_info() {
_processor_count = sysconf(_SC_NPROCESSORS_CONF);
......@@ -544,26 +544,23 @@ void os::Linux::libpthread_init() {
if (n > 0) {
char *str = (char *)malloc(n);
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
// us "NPTL-0.29" even we are running with LinuxThreads. Check if this
// is the case:
// is the case. LinuxThreads has a hard limit on max number of threads.
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
// On the other hand, NPTL does not have such a limit, sysconf()
// will return -1 and errno is not changed. Check if it is really NPTL.
if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
strstr(str, "NPTL")) {
// LinuxThreads has a hard limit on max number of threads. So
// sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
// On the other hand, NPTL does not have such a limit, sysconf()
// will return -1 and errno is not changed. Check if it is really
// NPTL:
if (sysconf(_SC_THREAD_THREADS_MAX) > 0) {
free(str);
str = "linuxthreads";
}
strstr(str, "NPTL") &&
sysconf(_SC_THREAD_THREADS_MAX) > 0) {
free(str);
os::Linux::set_libpthread_version("linuxthreads");
} else {
os::Linux::set_libpthread_version(str);
}
os::Linux::set_libpthread_version(str);
} else {
// glibc before 2.3.2 only has LinuxThreads.
os::Linux::set_libpthread_version("linuxthreads");
// glibc before 2.3.2 only has LinuxThreads.
os::Linux::set_libpthread_version("linuxthreads");
}
if (strstr(libpthread_version(), "NPTL")) {
......@@ -4643,11 +4640,7 @@ extern char** environ;
// Unlike system(), this function can be called from signal handler. It
// doesn't block SIGINT et al.
int os::fork_and_exec(char* cmd) {
char * argv[4];
argv[0] = "sh";
argv[1] = "-c";
argv[2] = cmd;
argv[3] = NULL;
const char * argv[4] = {"sh", "-c", cmd, NULL};
// fork() in LinuxThreads/NPTL is not async-safe. It needs to run
// pthread_atfork handlers and reset pthread library. All we need is a
......@@ -4672,7 +4665,7 @@ int os::fork_and_exec(char* cmd) {
// IA64 should use normal execve() from glibc to match the glibc fork()
// above.
NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
IA64_ONLY(execve("/bin/sh", argv, environ);)
IA64_ONLY(execve("/bin/sh", (char* const*)argv, environ);)
// execve failed
_exit(-1);
......
......@@ -52,8 +52,8 @@ class Linux {
static address _initial_thread_stack_bottom;
static uintptr_t _initial_thread_stack_size;
static char *_glibc_version;
static char *_libpthread_version;
static const char *_glibc_version;
static const char *_libpthread_version;
static bool _is_floating_stack;
static bool _is_NPTL;
......@@ -72,8 +72,8 @@ class Linux {
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
static void set_glibc_version(char *s) { _glibc_version = s; }
static void set_libpthread_version(char *s) { _libpthread_version = s; }
static void set_glibc_version(const char *s) { _glibc_version = s; }
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
static bool supports_variable_stack_size();
......@@ -134,8 +134,8 @@ class Linux {
static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
// GNU libc and libpthread version strings
static char *glibc_version() { return _glibc_version; }
static char *libpthread_version() { return _libpthread_version; }
static const char *glibc_version() { return _glibc_version; }
static const char *libpthread_version() { return _libpthread_version; }
// NPTL or LinuxThreads?
static bool is_LinuxThreads() { return !_is_NPTL; }
......
......@@ -283,6 +283,7 @@
template(cache_field_name, "cache") \
template(value_name, "value") \
template(frontCacheEnabled_name, "frontCacheEnabled") \
template(stringCacheEnabled_name, "stringCacheEnabled") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \
......
......@@ -1004,6 +1004,9 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
DEBUG_ONLY(mark_bitmap()->verify_clear();)
DEBUG_ONLY(summary_data().verify_clear();)
// Have worker threads release resources the next time they run a task.
gc_task_manager()->release_all_resources();
}
void PSParallelCompact::post_compact()
......@@ -1949,12 +1952,6 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
TimeStamp compaction_start;
TimeStamp collection_exit;
// "serial_CM" is needed until the parallel implementation
// of the move and update is done.
ParCompactionManager* serial_CM = new ParCompactionManager();
// Don't initialize more than once.
// serial_CM->initialize(&summary_data(), mark_bitmap());
ParallelScavengeHeap* heap = gc_heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSYoungGen* young_gen = heap->young_gen();
......@@ -1969,6 +1966,10 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PreGCValues pre_gc_values;
pre_compact(&pre_gc_values);
// Get the compaction manager reserved for the VM thread.
ParCompactionManager* const vmthread_cm =
ParCompactionManager::manager_array(gc_task_manager()->workers());
// Place after pre_compact() where the number of invocations is incremented.
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
......@@ -2008,7 +2009,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
bool marked_for_unloading = false;
marking_start.update();
marking_phase(serial_CM, maximum_heap_compaction);
marking_phase(vmthread_cm, maximum_heap_compaction);
#ifndef PRODUCT
if (TraceParallelOldGCMarkingPhase) {
......@@ -2039,7 +2040,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
#endif
bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);
summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
#ifdef ASSERT
if (VerifyParallelOldWithMarkSweep &&
......@@ -2067,13 +2068,13 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// code can use the the forwarding pointers to
// check the new pointer calculation. The restore_marks()
// has to be done before the real compact.
serial_CM->set_action(ParCompactionManager::VerifyUpdate);
compact_perm(serial_CM);
compact_serial(serial_CM);
serial_CM->set_action(ParCompactionManager::ResetObjects);
compact_perm(serial_CM);
compact_serial(serial_CM);
serial_CM->set_action(ParCompactionManager::UpdateAndCopy);
vmthread_cm->set_action(ParCompactionManager::VerifyUpdate);
compact_perm(vmthread_cm);
compact_serial(vmthread_cm);
vmthread_cm->set_action(ParCompactionManager::ResetObjects);
compact_perm(vmthread_cm);
compact_serial(vmthread_cm);
vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy);
// For debugging only
PSMarkSweep::restore_marks();
......@@ -2084,16 +2085,14 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
compaction_start.update();
// Does the perm gen always have to be done serially because
// klasses are used in the update of an object?
compact_perm(serial_CM);
compact_perm(vmthread_cm);
if (UseParallelOldGCCompacting) {
compact();
} else {
compact_serial(serial_CM);
compact_serial(vmthread_cm);
}
delete serial_CM;
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
// done before resizing.
post_compact();
......
......@@ -253,11 +253,17 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top())
return false; // Conservative answer for dead code
// Check 'dom'.
// Check 'dom'. Skip Proj and CatchProj nodes.
dom = dom->find_exact_control(dom);
if (dom == NULL || dom->is_top())
return false; // Conservative answer for dead code
if (dom == sub) {
// For the case when, for example, 'sub' is Initialize and the original
// 'dom' is Proj node of the 'sub'.
return false;
}
if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub)
return true;
......@@ -271,6 +277,7 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
sub->is_Region(), "expecting only these nodes");
// Get control edge of 'sub'.
Node* orig_sub = sub;
sub = sub->find_exact_control(sub->in(0));
if (sub == NULL || sub->is_top())
return false; // Conservative answer for dead code
......@@ -296,14 +303,16 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
for (uint next = 0; next < dom_list.size(); next++) {
Node* n = dom_list.at(next);
if (n == orig_sub)
return false; // One of dom's inputs dominated by sub.
if (!n->is_CFG() && n->pinned()) {
// Check only own control edge for pinned non-control nodes.
n = n->find_exact_control(n->in(0));
if (n == NULL || n->is_top())
return false; // Conservative answer for dead code
assert(n->is_CFG(), "expecting control");
}
if (n->is_Con() || n->is_Start() || n->is_Root()) {
dom_list.push(n);
} else if (n->is_Con() || n->is_Start() || n->is_Root()) {
only_dominating_controls = true;
} else if (n->is_CFG()) {
if (n->dominates(sub, nlist))
......
......@@ -1039,6 +1039,9 @@ Node* Node::find_exact_control(Node* ctrl) {
//--------------------------dominates------------------------------------------
// Helper function for MemNode::all_controls_dominate().
// Check if 'this' control node dominates or equal to 'sub' control node.
// We already know that if any path back to Root or Start reaches 'this',
// then all paths so, so this is a simple search for one example,
// not an exhaustive search for a counterexample.
bool Node::dominates(Node* sub, Node_List &nlist) {
assert(this->is_CFG(), "expecting control");
assert(sub != NULL && sub->is_CFG(), "expecting control");
......@@ -1047,110 +1050,115 @@ bool Node::dominates(Node* sub, Node_List &nlist) {
int iterations_without_region_limit = DominatorSearchLimit;
Node* orig_sub = sub;
Node* dom = this;
bool met_dom = false;
nlist.clear();
bool this_dominates = false;
bool result = false; // Conservative answer
while (sub != NULL) { // walk 'sub' up the chain to 'this'
if (sub == this) {
// Walk 'sub' backward up the chain to 'dom', watching for regions.
// After seeing 'dom', continue up to Root or Start.
// If we hit a region (backward split point), it may be a loop head.
// Keep going through one of the region's inputs. If we reach the
// same region again, go through a different input. Eventually we
// will either exit through the loop head, or give up.
// (If we get confused, break out and return a conservative 'false'.)
while (sub != NULL) {
if (sub->is_top()) break; // Conservative answer for dead code.
if (sub == dom) {
if (nlist.size() == 0) {
// No Region nodes except loops were visited before and the EntryControl
// path was taken for loops: it did not walk in a cycle.
result = true;
break;
} else if (this_dominates) {
result = false; // already met before: walk in a cycle
break;
return true;
} else if (met_dom) {
break; // already met before: walk in a cycle
} else {
// Region nodes were visited. Continue walk up to Start or Root
// to make sure that it did not walk in a cycle.
this_dominates = true; // first time meet
met_dom = true; // first time meet
iterations_without_region_limit = DominatorSearchLimit; // Reset
}
}
if (sub->is_Start() || sub->is_Root()) {
result = this_dominates;
break;
// Success if we met 'dom' along a path to Start or Root.
// We assume there are no alternative paths that avoid 'dom'.
// (This assumption is up to the caller to ensure!)
return met_dom;
}
Node* up = sub->find_exact_control(sub->in(0));
if (up == NULL || up->is_top()) {
result = false; // Conservative answer for dead code
break;
}
if (sub == up && (sub->is_Loop() || sub->is_Region() && sub->req() != 3)) {
// Take first valid path on the way up to 'this'.
Node* up = sub->in(0);
// Normalize simple pass-through regions and projections:
up = sub->find_exact_control(up);
// If sub == up, we found a self-loop. Try to push past it.
if (sub == up && sub->is_Loop()) {
// Take loop entry path on the way up to 'dom'.
up = sub->in(1); // in(LoopNode::EntryControl);
} else if (sub == up && sub->is_Region() && sub->req() != 3) {
// Always take in(1) path on the way up to 'dom' for clone regions
// (with only one input) or regions which merge > 2 paths
// (usually used to merge fast/slow paths).
up = sub->in(1);
} else if (sub == up && sub->is_Region()) {
assert(sub->req() == 3, "sanity");
// Try both paths for Regions with 2 input paths (it may be a loop head).
// It could give conservative 'false' answer without information
// which region's input is the entry path.
iterations_without_region_limit = DominatorSearchLimit; // Reset
// Try both paths for such Regions.
// It is not accurate without regions dominating information.
// With such information the other path should be checked for
// the most dominating Region which was visited before.
bool region_was_visited_before = false;
uint i = 1;
uint size = nlist.size();
if (size == 0) {
// No such Region nodes were visited before.
// Take first valid path on the way up to 'this'.
} else {
// Was this Region node visited before?
intptr_t ni;
int j = size - 1;
for (; j >= 0; j--) {
ni = (intptr_t)nlist.at(j);
if ((Node*)(ni & ~1) == sub) {
if ((ni & 1) != 0) {
break; // Visited 2 paths. Give up.
} else {
// The Region node was visited before only once.
nlist.remove(j);
region_was_visited_before = true;
for (; i < sub->req(); i++) {
Node* in = sub->in(i);
if (in != NULL && !in->is_top() && in != sub) {
break;
}
}
i++; // Take other path.
break;
}
// Was this Region node visited before?
// If so, we have reached it because we accidentally took a
// loop-back edge from 'sub' back into the body of the loop,
// and worked our way up again to the loop header 'sub'.
// So, take the first unexplored path on the way up to 'dom'.
for (int j = nlist.size() - 1; j >= 0; j--) {
intptr_t ni = (intptr_t)nlist.at(j);
Node* visited = (Node*)(ni & ~1);
bool visited_twice_already = ((ni & 1) != 0);
if (visited == sub) {
if (visited_twice_already) {
// Visited 2 paths, but still stuck in loop body. Give up.
return false;
}
}
if (j >= 0 && (ni & 1) != 0) {
result = false; // Visited 2 paths. Give up.
// The Region node was visited before only once.
// (We will repush with the low bit set, below.)
nlist.remove(j);
// We will find a new edge and re-insert.
region_was_visited_before = true;
break;
}
// The Region node was not visited before.
}
for (; i < sub->req(); i++) {
// Find an incoming edge which has not been seen yet; walk through it.
assert(up == sub, "");
uint skip = region_was_visited_before ? 1 : 0;
for (uint i = 1; i < sub->req(); i++) {
Node* in = sub->in(i);
if (in != NULL && !in->is_top() && in != sub) {
break;
}
}
if (i < sub->req()) {
up = sub->in(i);
if (region_was_visited_before && sub != up) {
// Set 0 bit to indicate that both paths were taken.
nlist.push((Node*)((intptr_t)sub + 1));
} else {
nlist.push(sub);
if (skip == 0) {
up = in;
break;
}
--skip; // skip this nontrivial input
}
}
// Set 0 bit to indicate that both paths were taken.
nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
}
if (sub == up) {
result = false; // some kind of tight cycle
break;
if (up == sub) {
break; // some kind of tight cycle
}
if (up == orig_sub && met_dom) {
// returned back after visiting 'dom'
break; // some kind of cycle
}
if (--iterations_without_region_limit < 0) {
result = false; // dead cycle
break;
break; // dead cycle
}
sub = up;
}
return result;
// Did not meet Root or Start node in pred. chain.
// Conservative answer for dead code.
return false;
}
//------------------------------remove_dead_region-----------------------------
......
......@@ -2261,6 +2261,9 @@ class CommandLineFlags {
product(bool, AggressiveOpts, false, \
"Enable aggressive optimizations - see arguments.cpp") \
\
product(bool, UseStringCache, false, \
"Enable String cache capabilities on String.java") \
\
/* statistics */ \
develop(bool, UseVTune, false, \
"enable support for Intel's VTune profiler") \
......
......@@ -2947,21 +2947,42 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
if (AggressiveOpts) {
// Forcibly initialize java/util/HashMap and mutate the private
// static final "frontCacheEnabled" field before we start creating instances
{
// Forcibly initialize java/util/HashMap and mutate the private
// static final "frontCacheEnabled" field before we start creating instances
#ifdef ASSERT
klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet");
klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet");
#endif
klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
KlassHandle k = KlassHandle(THREAD, k_o);
guarantee(k.not_null(), "Must find java/util/HashMap");
instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
ik->initialize(CHECK_0);
fieldDescriptor fd;
// Possible we might not find this field; if so, don't break
if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
k()->bool_field_put(fd.offset(), true);
klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
KlassHandle k = KlassHandle(THREAD, k_o);
guarantee(k.not_null(), "Must find java/util/HashMap");
instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
ik->initialize(CHECK_0);
fieldDescriptor fd;
// Possible we might not find this field; if so, don't break
if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
k()->bool_field_put(fd.offset(), true);
}
}
if (UseStringCache) {
// Forcibly initialize java/lang/String and mutate the private
// static final "stringCacheEnabled" field before we start creating instances
#ifdef ASSERT
klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_lang_String(), Handle(), Handle(), CHECK_0);
assert(tmp_k == NULL, "java/lang/String should not be loaded yet");
#endif
klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_String(), Handle(), Handle(), CHECK_0);
KlassHandle k = KlassHandle(THREAD, k_o);
guarantee(k.not_null(), "Must find java/lang/String");
instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
ik->initialize(CHECK_0);
fieldDescriptor fd;
// Possible we might not find this field; if so, don't break
if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
k()->bool_field_put(fd.offset(), true);
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册