提交 29a8feb1 编写于 作者: E ehelin

Merge

......@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1HotCardCache.hpp"
#include "runtime/java.hpp"
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
_threads(NULL), _n_threads(0),
......@@ -62,6 +63,10 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
for (int i = _n_threads - 1; i >= 0; i--) {
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
assert(t != NULL, "Conc refine should have been created");
if (t->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
}
assert(t->cg1r() == this, "Conc refine thread should refer to this");
_threads[i] = t;
next = t;
......
......@@ -553,6 +553,9 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_cmThread = new ConcurrentMarkThread(this);
assert(cmThread() != NULL, "CM Thread should have been created");
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
if (_cmThread->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
}
assert(CGC_lock != NULL, "Where's the CGC_lock?");
assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
......
......@@ -2433,20 +2433,6 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::follow_class_loader(cm, cld);
}
void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::adjust_class_loader(cm, cld);
}
void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
......@@ -2455,13 +2441,6 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
}
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
PSParallelCompact::adjust_klass_closure(),
true);
}
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
......
......@@ -1200,13 +1200,10 @@ class PSParallelCompact : AllStatic {
T* p);
template <class T> static inline void adjust_pointer(T* p);
static void follow_klass(ParCompactionManager* cm, Klass* klass);
static void adjust_klass(ParCompactionManager* cm, Klass* klass);
static inline void follow_klass(ParCompactionManager* cm, Klass* klass);
static void follow_class_loader(ParCompactionManager* cm,
ClassLoaderData* klass);
static void adjust_class_loader(ParCompactionManager* cm,
ClassLoaderData* klass);
// Compaction support.
// Return true if p is in the range [beg_addr, end_addr).
......@@ -1380,6 +1377,11 @@ inline void PSParallelCompact::adjust_pointer(T* p) {
}
}
inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
oop holder = klass->klass_holder();
PSParallelCompact::mark_and_push(cm, &holder);
}
template <class T>
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
mark_and_push(_compaction_manager, p);
......
......@@ -287,7 +287,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
// Returns true if "word_size" is available in the VirtualSpace
bool is_available(size_t word_size) { return _top + word_size <= end(); }
bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
MetaWord* top() const { return _top; }
void inc_top(size_t word_size) { _top += word_size; }
......@@ -3641,10 +3641,82 @@ class TestVirtualSpaceNodeTest {
}
}
#define assert_is_available_positive(word_size) \
assert(vsn.is_available(word_size), \
err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
"VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
(uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
#define assert_is_available_negative(word_size) \
assert(!vsn.is_available(word_size), \
err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
"VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
(uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
static void test_is_available_positive() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Check that is_available accepts the committed size.
assert_is_available_positive(commit_word_size);
// Check that is_available accepts half the committed size.
size_t expand_word_size = commit_word_size / 2;
assert_is_available_positive(expand_word_size);
}
static void test_is_available_negative() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Check that is_available doesn't accept a too large size.
size_t two_times_commit_word_size = commit_word_size * 2;
assert_is_available_negative(two_times_commit_word_size);
}
static void test_is_available_overflow() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Calculate a size that will overflow the virtual space size.
void* virtual_space_max = (void*)(uintptr_t)-1;
size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
size_t overflow_size = bottom_to_max + BytesPerWord;
size_t overflow_word_size = overflow_size / BytesPerWord;
// Check that is_available can handle the overflow.
assert_is_available_negative(overflow_word_size);
}
static void test_is_available() {
TestVirtualSpaceNodeTest::test_is_available_positive();
TestVirtualSpaceNodeTest::test_is_available_negative();
TestVirtualSpaceNodeTest::test_is_available_overflow();
}
};
void TestVirtualSpaceNode_test() {
TestVirtualSpaceNodeTest::test();
TestVirtualSpaceNodeTest::test_is_available();
}
#endif
......@@ -150,10 +150,6 @@ void InstanceClassLoaderKlass::oop_push_contents(PSPromotionManager* pm, oop obj
int InstanceClassLoaderKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
InstanceKlass::oop_update_pointers(cm, obj);
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
if (loader_data != NULL) {
PSParallelCompact::adjust_class_loader(cm, loader_data);
}
return size_helper();
}
#endif // INCLUDE_ALL_GCS
......
......@@ -2199,7 +2199,6 @@ int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
obj, \
PSParallelCompact::adjust_pointer(p), \
assert_is_in)
obj->update_header(cm);
return size;
}
......
......@@ -155,8 +155,13 @@ void InstanceMirrorKlass::oop_follow_contents(oop obj) {
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
// For anonymous classes we need to handle the class loader data,
// otherwise it won't be claimed and can be unloaded.
// An anonymous class doesn't have its own class loader, so the call
// to follow_klass will mark and push its java mirror instead of the
// class loader. When handling the java mirror for an anonymous class
// we need to make sure its class loader data is claimed, this is done
// by calling follow_class_loader explicitly. For non-anonymous classes
// the call to follow_class_loader is made when the class loader itself
// is handled.
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
MarkSweep::follow_class_loader(klass->class_loader_data());
} else {
......@@ -183,7 +188,18 @@ void InstanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
// An anonymous class doesn't have its own class loader, so the call
// to follow_klass will mark and push its java mirror instead of the
// class loader. When handling the java mirror for an anonymous class
// we need to make sure its class loader data is claimed, this is done
// by calling follow_class_loader explicitly. For non-anonymous classes
// the call to follow_class_loader is made when the class loader itself
// is handled.
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
} else {
PSParallelCompact::follow_klass(cm, klass);
}
} else {
// If klass is NULL then this a mirror for a primitive type.
// We don't have to follow them, since they are handled as strong
......@@ -332,17 +348,6 @@ int InstanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj)
int size = oop_size(obj);
InstanceKlass::oop_update_pointers(cm, obj);
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
PSParallelCompact::adjust_klass(cm, klass);
} else {
// If klass is NULL then this a mirror for a primitive type.
// We don't have to follow them, since they are handled as strong
// roots in Universe::oops_do.
assert(java_lang_Class::is_primitive(obj), "Sanity check");
}
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
PSParallelCompact::adjust_pointer(p), \
......
......@@ -587,7 +587,6 @@ int ObjArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
int size = a->object_size();
a->update_header(cm);
ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
return size;
}
......
......@@ -328,11 +328,6 @@ class oopDesc {
// return the size of this oop. This is used by the MarkSweep collector.
int adjust_pointers();
#if INCLUDE_ALL_GCS
// Parallel old
void update_header(ParCompactionManager* cm);
#endif // INCLUDE_ALL_GCS
// mark-sweep support
void follow_body(int begin, int end);
......
......@@ -80,8 +80,4 @@ inline oop oopDesc::forward_to_atomic(oop p) {
return forwardee();
}
inline void oopDesc::update_header(ParCompactionManager* cm) {
PSParallelCompact::adjust_klass(cm, klass());
}
#endif // SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册