提交 913b86e8 编写于 作者: S stefank

8056240: Investigate increased GC remark time after class unloading changes in CRM Fuse

Reviewed-by: mgerdin, coleenp, bdelsart
上级 3d967eae
......@@ -747,7 +747,7 @@ bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
// Move class loader data from main list to the unloaded list for unloading
// and deallocation later.
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
ClassLoaderData* data = _head;
ClassLoaderData* prev = NULL;
bool seen_dead_loader = false;
......@@ -756,16 +756,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
// purging and we don't want to rewalk the previously unloaded class loader data.
_saved_unloading = _unloading;
// mark metadata seen on the stack and code cache so we can delete
// unneeded entries.
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
MetadataOnStackMark md_on_stack;
while (data != NULL) {
if (data->is_alive(is_alive_closure)) {
if (has_redefined_a_class) {
data->classes_do(InstanceKlass::purge_previous_versions);
}
data->free_deallocate_list();
prev = data;
data = data->next();
continue;
......@@ -787,6 +779,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
_unloading = dead;
}
if (clean_alive) {
// Clean previous versions and the deallocate list.
ClassLoaderDataGraph::clean_metaspaces();
}
if (seen_dead_loader) {
post_class_unload_events();
}
......@@ -794,6 +791,26 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
return seen_dead_loader;
}
void ClassLoaderDataGraph::clean_metaspaces() {
// mark metadata seen on the stack and code cache so we can delete unneeded entries.
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
MetadataOnStackMark md_on_stack(has_redefined_a_class);
if (has_redefined_a_class) {
// purge_previous_versions also cleans weak method links. Because
// one method's MDO can reference another method from another
// class loader, we need to first clean weak method links for all
// class loaders here. Below, we can then free redefined methods
// for all class loaders.
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
data->classes_do(InstanceKlass::purge_previous_versions);
}
}
// Need to purge the previous version before deallocating.
free_deallocate_lists();
}
void ClassLoaderDataGraph::purge() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
......@@ -821,6 +838,14 @@ void ClassLoaderDataGraph::post_class_unload_events(void) {
#endif
}
void ClassLoaderDataGraph::free_deallocate_lists() {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
// We need to keep this data until InstanceKlass::purge_previous_version has been
// called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces.
cld->free_deallocate_list();
}
}
// CDS support
// Global metaspaces for writing information to the shared archive. When
......
......@@ -71,6 +71,7 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
static void clean_metaspaces();
public:
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
......@@ -89,7 +90,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void classes_do(void f(Klass* const));
static void loaded_classes_do(KlassClosure* klass_closure);
static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive);
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
// CMS support.
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
......@@ -105,6 +106,8 @@ class ClassLoaderDataGraph : public AllStatic {
}
}
static void free_deallocate_lists();
static void dump_on(outputStream * const out) PRODUCT_RETURN;
static void dump() { dump_on(tty); }
static void verify();
......
......@@ -31,25 +31,23 @@
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "services/threadService.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/chunkedList.hpp"
volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
// Keep track of marked on-stack metadata so it can be cleared.
GrowableArray<Metadata*>* _marked_objects = NULL;
NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
// Walk metadata on the stack and mark it so that redefinition doesn't delete
// it. Class unloading also walks the previous versions and might try to
// delete it, so this class is used by class unloading also.
MetadataOnStackMark::MetadataOnStackMark() {
MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
assert(_used_buffers == NULL, "sanity check");
NOT_PRODUCT(_is_active = true;)
if (_marked_objects == NULL) {
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
}
Threads::metadata_do(Metadata::mark_on_stack);
if (JvmtiExport::has_redefined_a_class()) {
if (visit_code_cache) {
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
}
CompileBroker::mark_on_stack();
......@@ -62,15 +60,93 @@ MetadataOnStackMark::~MetadataOnStackMark() {
// Unmark everything that was marked. Can't do the same walk because
// redefine classes messes up the code cache so the set of methods
// might not be the same.
for (int i = 0; i< _marked_objects->length(); i++) {
_marked_objects->at(i)->set_on_stack(false);
retire_buffer_for_thread(Thread::current());
MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
while (buffer != NULL) {
// Clear on stack state for all metadata.
size_t size = buffer->size();
for (size_t i = 0; i < size; i++) {
Metadata* md = buffer->at(i);
md->set_on_stack(false);
}
MetadataOnStackBuffer* next = buffer->next_used();
// Move the buffer to the free list.
buffer->clear();
buffer->set_next_used(NULL);
buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
_free_buffers = buffer;
// Step to next used buffer.
buffer = next;
}
_marked_objects->clear(); // reuse growable array for next time.
_used_buffers = NULL;
NOT_PRODUCT(_is_active = false;)
}
void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) {
if (buffer == NULL) {
return;
}
MetadataOnStackBuffer* old_head;
do {
old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
buffer->set_next_used(old_head);
} while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
}
void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
retire_buffer(thread->metadata_on_stack_buffer());
thread->set_metadata_on_stack_buffer(NULL);
}
bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
return thread->metadata_on_stack_buffer() != NULL;
}
MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
MetadataOnStackBuffer* allocated;
MetadataOnStackBuffer* new_head;
do {
allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
if (allocated == NULL) {
break;
}
new_head = allocated->next_free();
} while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
if (allocated == NULL) {
allocated = new MetadataOnStackBuffer();
}
assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated)));
return allocated;
}
// Record which objects are marked so we can unmark the same objects.
void MetadataOnStackMark::record(Metadata* m) {
void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
assert(_is_active, "metadata on stack marking is active");
_marked_objects->push(m);
MetadataOnStackBuffer* buffer = thread->metadata_on_stack_buffer();
if (buffer != NULL && buffer->is_full()) {
retire_buffer(buffer);
buffer = NULL;
}
if (buffer == NULL) {
buffer = allocate_buffer();
thread->set_metadata_on_stack_buffer(buffer);
}
buffer->push(m);
}
......@@ -26,9 +26,12 @@
#define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
#include "memory/allocation.hpp"
#include "utilities/chunkedList.hpp"
class Metadata;
typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
// Helper class to mark and unmark metadata used on the stack as either handles
// or executing methods, so that it can't be deleted during class redefinition
// and class unloading.
......@@ -36,10 +39,20 @@ class Metadata;
// metadata during parsing, relocated methods, and methods in backtraces.
class MetadataOnStackMark : public StackObj {
NOT_PRODUCT(static bool _is_active;)
static volatile MetadataOnStackBuffer* _used_buffers;
static volatile MetadataOnStackBuffer* _free_buffers;
static MetadataOnStackBuffer* allocate_buffer();
static void retire_buffer(MetadataOnStackBuffer* buffer);
public:
MetadataOnStackMark();
~MetadataOnStackMark();
static void record(Metadata* m);
MetadataOnStackMark(bool visit_code_cache);
~MetadataOnStackMark();
static void record(Metadata* m, Thread* thread);
static void retire_buffer_for_thread(Thread* thread);
static bool has_buffer_for_thread(Thread* thread);
};
#endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
......@@ -1691,9 +1691,9 @@ public:
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
if (unloading_occurred) {
dictionary()->do_unloading();
constraints()->purge_loader_constraints();
......
......@@ -341,7 +341,7 @@ public:
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
// Used by DumpSharedSpaces only to remove classes that failed verification
static void remove_classes_in_error_state();
......
......@@ -1720,11 +1720,17 @@ void nmethod::post_compiled_method_unload() {
set_unload_reported();
}
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (mark_on_stack) {
Metadata::mark_on_stack(cichk_oop->holder_method());
Metadata::mark_on_stack(cichk_oop->holder_klass());
}
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
return;
......@@ -1732,6 +1738,10 @@ void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_a
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (mark_on_stack) {
Metadata::mark_on_stack(ic_oop);
}
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
return;
......@@ -1792,7 +1802,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive);
clean_ic_if_metadata_is_dead(ic, is_alive, false);
}
}
}
......@@ -1860,6 +1870,53 @@ static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClos
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
}
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
oop_Relocation* r = iter_at_oop->oop_reloc();
// Traverse those oops directly embedded in the code.
// Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
// Unload this nmethod if the oop is dead.
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
return true;;
}
}
return false;
}
void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
metadata_Relocation* r = iter_at_metadata->metadata_reloc();
// In this metadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
"metadata must be found in exactly one place");
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value();
if (md != _method) Metadata::mark_on_stack(md);
}
}
void nmethod::mark_metadata_on_stack_non_relocs() {
// Visit the metadata section
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
Metadata* md = *p;
Metadata::mark_on_stack(md);
}
// Visit metadata not embedded in the other places.
if (_method != NULL) Metadata::mark_on_stack(_method);
}
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
......@@ -1889,6 +1946,11 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
unloading_occurred = true;
}
// When class redefinition is used all metadata in the CodeCache has to be recorded,
// so that unused "previous versions" can be purged. Since walking the CodeCache can
// be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
bool mark_metadata_on_stack = a_class_was_redefined;
// Exception cache
clean_exception_cache(is_alive);
......@@ -1904,7 +1966,7 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
......@@ -1920,24 +1982,21 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
case relocInfo::oop_type:
if (!is_unloaded) {
// Unload check
oop_Relocation* r = iter.oop_reloc();
// Traverse those oops directly embedded in the code.
// Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
is_unloaded = true;
}
}
is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
}
break;
case relocInfo::metadata_type:
if (mark_metadata_on_stack) {
mark_metadata_on_stack_at(&iter);
}
}
}
if (mark_metadata_on_stack) {
mark_metadata_on_stack_non_relocs();
}
if (is_unloaded) {
return postponed;
}
......@@ -2085,7 +2144,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type ) {
metadata_Relocation* r = iter.metadata_reloc();
// In this lmetadata, we must only follow those metadatas directly embedded in
// In this metadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
......@@ -2119,7 +2178,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
f(md);
}
// Call function Method*, not embedded in these other places.
// Visit metadata not embedded in the other places.
if (_method != NULL) f(_method);
}
......
......@@ -614,9 +614,16 @@ public:
// The parallel versions are used by G1.
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
private:
// Unload a nmethod if the *root object is dead.
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
void mark_metadata_on_stack_non_relocs();
public:
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
void oops_do(OopClosure* f) { oops_do(f, false); }
......
......@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/g1/concurrentMark.inline.hpp"
......@@ -2602,17 +2603,27 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
if (ClassUnloadingWithConcurrentMark) {
// Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
// part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
// Defer the cleaning until we have complete on_stack data.
MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
bool purged_classes;
{
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
{
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
{
G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest());
ClassLoaderDataGraph::free_deallocate_lists();
}
}
if (G1StringDedup::is_enabled()) {
......
......@@ -27,6 +27,7 @@
#endif
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/bufferingOopClosure.hpp"
......@@ -5133,6 +5134,10 @@ private:
clean_nmethod(claimed_nmethods[i]);
}
}
// The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
// Need to retire the buffers now that this thread has stopped cleaning nmethods.
MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
}
void work_second_pass(uint worker_id) {
......@@ -5185,6 +5190,9 @@ public:
// G1 specific cleanup work that has
// been moved here to be done in parallel.
ik->clean_dependent_nmethods();
if (JvmtiExport::has_redefined_a_class()) {
InstanceKlass::purge_previous_versions(ik);
}
}
void work() {
......@@ -5219,8 +5227,18 @@ public:
_klass_cleaning_task(is_alive) {
}
void pre_work_verification() {
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
}
void post_work_verification() {
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
}
// The parallel work done by all worker threads.
void work(uint worker_id) {
pre_work_verification();
// Do first pass of code cache cleaning.
_code_cache_task.work_first_pass(worker_id);
......@@ -5239,6 +5257,8 @@ public:
// Clean all klasses that were not unloaded.
_klass_cleaning_task.work();
post_work_verification();
}
};
......
......@@ -1817,11 +1817,22 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
void ConstantPool::set_on_stack(const bool value) {
if (value) {
_flags |= _on_stack;
int old_flags = *const_cast<volatile int *>(&_flags);
while ((old_flags & _on_stack) == 0) {
int new_flags = old_flags | _on_stack;
int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
if (result == old_flags) {
// Succeeded.
MetadataOnStackMark::record(this, Thread::current());
return;
}
old_flags = result;
}
} else {
// Clearing is done single-threadedly.
_flags &= ~_on_stack;
}
if (value) MetadataOnStackMark::record(this);
}
// JSR 292 support for patching constant pool oops after the class is linked and
......
......@@ -1863,9 +1863,12 @@ Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
void Method::set_on_stack(const bool value) {
// Set both the method itself and its constant pool. The constant pool
// on stack means some method referring to it is also on the stack.
_access_flags.set_on_stack(value);
constants()->set_on_stack(value);
if (value) MetadataOnStackMark::record(this);
bool succeeded = _access_flags.set_on_stack(value);
if (value && succeeded) {
MetadataOnStackMark::record(this, Thread::current());
}
}
// Called when the class loader is unloaded to make all methods weak.
......
......@@ -5080,6 +5080,7 @@ void TestVirtualSpaceNode_test();
void TestNewSize_test();
void TestKlass_test();
void Test_linked_list();
void TestChunkedList_test();
#if INCLUDE_ALL_GCS
void TestOldFreeSpaceCalculation_test();
void TestG1BiasedArray_test();
......@@ -5108,6 +5109,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestNewSize_test());
run_unit_test(TestKlass_test());
run_unit_test(Test_linked_list());
run_unit_test(TestChunkedList_test());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif
......
......@@ -135,7 +135,7 @@ void VM_RedefineClasses::doit() {
// Mark methods seen on stack and everywhere else so old methods are not
// cleaned up if they're on the stack.
MetadataOnStackMark md_on_stack;
MetadataOnStackMark md_on_stack(true);
HandleMark hm(thread); // make sure any handles created are deleted
// before the stack walk again.
......
......@@ -234,6 +234,8 @@ Thread::Thread() {
// This initial value ==> never claimed.
_oops_do_parity = 0;
_metadata_on_stack_buffer = NULL;
// the handle mark links itself to last_handle_mark
new HandleMark(this);
......
......@@ -42,11 +42,10 @@
#include "runtime/threadLocalStorage.hpp"
#include "runtime/thread_ext.hpp"
#include "runtime/unhandledOops.hpp"
#include "utilities/macros.hpp"
#include "trace/traceBackend.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "utilities/top.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/dirtyCardQueue.hpp"
......@@ -83,6 +82,10 @@ class GCTaskQueue;
class ThreadClosure;
class IdealGraphPrinter;
class Metadata;
template <class T, MEMFLAGS F> class ChunkedList;
typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
DEBUG_ONLY(class ResourceMark;)
class WorkerThread;
......@@ -256,6 +259,9 @@ class Thread: public ThreadShadow {
jlong _allocated_bytes; // Cumulative number of bytes allocated on
// the Java heap
// Thread-local buffer used by MetadataOnStackMark.
MetadataOnStackBuffer* _metadata_on_stack_buffer;
TRACE_DATA _trace_data; // Thread-local data for tracing
ThreadExt _ext;
......@@ -517,7 +523,10 @@ public:
// creation fails due to lack of memory, too many threads etc.
bool set_as_starting_thread();
protected:
void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
MetadataOnStackBuffer* metadata_on_stack_buffer() const { return _metadata_on_stack_buffer; }
protected:
// OS data associated with the thread
OSThread* _osthread; // Platform-specific thread information
......
......@@ -62,6 +62,21 @@ void AccessFlags::atomic_clear_bits(jint bits) {
} while(f != old_flags);
}
// Returns true iff this thread succeeded setting the bit.
bool AccessFlags::atomic_set_one_bit(jint bit) {
// Atomically update the flags with the bit given
jint old_flags, new_flags, f;
bool is_setting_bit = false;
do {
old_flags = _flags;
new_flags = old_flags | bit;
is_setting_bit = old_flags != new_flags;
f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
} while(f != old_flags);
return is_setting_bit;
}
#if !defined(PRODUCT) || INCLUDE_JVMTI
void AccessFlags::print_on(outputStream* st) const {
......
......@@ -170,6 +170,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
// Atomic update of flags
void atomic_set_bits(jint bits);
bool atomic_set_one_bit(jint bit);
void atomic_clear_bits(jint bits);
private:
......@@ -230,12 +231,13 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
}
void set_on_stack(const bool value)
bool set_on_stack(const bool value)
{
if (value) {
atomic_set_bits(JVM_ACC_ON_STACK);
return atomic_set_one_bit(JVM_ACC_ON_STACK);
} else {
atomic_clear_bits(JVM_ACC_ON_STACK);
return true; // Ignored
}
}
// Conversion
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "utilities/chunkedList.hpp"
#include "utilities/debug.hpp"
/////////////// Unit tests ///////////////
#ifndef PRODUCT
template <typename T>
class TestChunkedList {
typedef ChunkedList<T, mtOther> ChunkedListT;
public:
static void testEmpty() {
ChunkedListT buffer;
assert(buffer.size() == 0, "assert");
}
static void testFull() {
ChunkedListT buffer;
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
buffer.push((T)i);
}
assert(buffer.size() == ChunkedListT::BufferSize, "assert");
assert(buffer.is_full(), "assert");
}
static void testSize() {
ChunkedListT buffer;
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
assert(buffer.size() == i, "assert");
buffer.push((T)i);
assert(buffer.size() == i + 1, "assert");
}
}
static void testClear() {
ChunkedListT buffer;
buffer.clear();
assert(buffer.size() == 0, "assert");
for (uintptr_t i = 0; i < ChunkedListT::BufferSize / 2; i++) {
buffer.push((T)i);
}
buffer.clear();
assert(buffer.size() == 0, "assert");
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
buffer.push((T)i);
}
buffer.clear();
assert(buffer.size() == 0, "assert");
}
static void testAt() {
ChunkedListT buffer;
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
buffer.push((T)i);
assert(buffer.at(i) == (T)i, "assert");
}
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
assert(buffer.at(i) == (T)i, "assert");
}
}
static void test() {
testEmpty();
testFull();
testSize();
testClear();
testAt();
}
};
class Metadata;
void TestChunkedList_test() {
TestChunkedList<Metadata*>::test();
TestChunkedList<size_t>::test();
}
#endif
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
#define SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
template <class T, MEMFLAGS F> class ChunkedList : public CHeapObj<F> {
template <class U> friend class TestChunkedList;
static const size_t BufferSize = 64;
T _values[BufferSize];
T* _top;
ChunkedList<T, F>* _next_used;
ChunkedList<T, F>* _next_free;
T const * end() const {
return &_values[BufferSize];
}
public:
ChunkedList<T, F>() : _top(_values), _next_used(NULL), _next_free(NULL) {}
bool is_full() const {
return _top == end();
}
void clear() {
_top = _values;
// Don't clear the next pointers since that would interfere
// with other threads trying to iterate through the lists.
}
void push(T m) {
assert(!is_full(), "Buffer is full");
*_top = m;
_top++;
}
void set_next_used(ChunkedList<T, F>* buffer) { _next_used = buffer; }
void set_next_free(ChunkedList<T, F>* buffer) { _next_free = buffer; }
ChunkedList<T, F>* next_used() const { return _next_used; }
ChunkedList<T, F>* next_free() const { return _next_free; }
size_t size() const {
return pointer_delta(_top, _values, sizeof(T));
}
T at(size_t i) {
assert(i < size(), err_msg("IOOBE i: " SIZE_FORMAT " size(): " SIZE_FORMAT, i, size()));
return _values[i];
}
};
#endif // SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册