提交 c5e576c9 编写于 作者: H hseigel

Merge

...@@ -38,6 +38,9 @@ ...@@ -38,6 +38,9 @@
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// the number of buckets a thread claims
const int ClaimChunkSize = 32;
SymbolTable* SymbolTable::_the_table = NULL; SymbolTable* SymbolTable::_the_table = NULL;
// Static arena for symbols that are not deallocated // Static arena for symbols that are not deallocated
Arena* SymbolTable::_arena = NULL; Arena* SymbolTable::_arena = NULL;
...@@ -83,16 +86,12 @@ void SymbolTable::symbols_do(SymbolClosure *cl) { ...@@ -83,16 +86,12 @@ void SymbolTable::symbols_do(SymbolClosure *cl) {
} }
} }
int SymbolTable::symbols_removed = 0; int SymbolTable::_symbols_removed = 0;
int SymbolTable::symbols_counted = 0; int SymbolTable::_symbols_counted = 0;
volatile int SymbolTable::_parallel_claimed_idx = 0;
// Remove unreferenced symbols from the symbol table void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) {
// This is done late during GC. for (int i = start_idx; i < end_idx; ++i) {
void SymbolTable::unlink() {
int removed = 0;
int total = 0;
size_t memory_total = 0;
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
while (entry != NULL) { while (entry != NULL) {
...@@ -104,14 +103,14 @@ void SymbolTable::unlink() { ...@@ -104,14 +103,14 @@ void SymbolTable::unlink() {
break; break;
} }
Symbol* s = entry->literal(); Symbol* s = entry->literal();
memory_total += s->size(); (*memory_total) += s->size();
total++; (*processed)++;
assert(s != NULL, "just checking"); assert(s != NULL, "just checking");
// If reference count is zero, remove. // If reference count is zero, remove.
if (s->refcount() == 0) { if (s->refcount() == 0) {
assert(!entry->is_shared(), "shared entries should be kept live"); assert(!entry->is_shared(), "shared entries should be kept live");
delete s; delete s;
removed++; (*removed)++;
*p = entry->next(); *p = entry->next();
the_table()->free_entry(entry); the_table()->free_entry(entry);
} else { } else {
...@@ -121,12 +120,45 @@ void SymbolTable::unlink() { ...@@ -121,12 +120,45 @@ void SymbolTable::unlink() {
entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p); entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
} }
} }
symbols_removed += removed; }
symbols_counted += total;
// Remove unreferenced symbols from the symbol table
// This is done late during GC.
void SymbolTable::unlink(int* processed, int* removed) {
size_t memory_total = 0;
buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total);
_symbols_removed += *removed;
_symbols_counted += *processed;
// Exclude printing for normal PrintGCDetails because people parse
// this output.
if (PrintGCDetails && Verbose && WizardMode) {
gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", *processed,
(memory_total*HeapWordSize)/1024);
}
}
void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
const int limit = the_table()->table_size();
size_t memory_total = 0;
for (;;) {
// Grab next set of buckets to scan
int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
if (start_idx >= limit) {
// End of table
break;
}
int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
buckets_unlink(start_idx, end_idx, processed, removed, &memory_total);
}
Atomic::add(*processed, &_symbols_counted);
Atomic::add(*removed, &_symbols_removed);
// Exclude printing for normal PrintGCDetails because people parse // Exclude printing for normal PrintGCDetails because people parse
// this output. // this output.
if (PrintGCDetails && Verbose && WizardMode) { if (PrintGCDetails && Verbose && WizardMode) {
gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total, gclog_or_tty->print(" [Symbols: scanned=%d removed=%d size=" SIZE_FORMAT "K] ", *processed, *removed,
(memory_total*HeapWordSize)/1024); (memory_total*HeapWordSize)/1024);
} }
} }
...@@ -494,11 +526,11 @@ void SymbolTable::print_histogram() { ...@@ -494,11 +526,11 @@ void SymbolTable::print_histogram() {
tty->print_cr("Total number of symbols %5d", count); tty->print_cr("Total number of symbols %5d", count);
tty->print_cr("Total size in memory %5dK", tty->print_cr("Total size in memory %5dK",
(memory_total*HeapWordSize)/1024); (memory_total*HeapWordSize)/1024);
tty->print_cr("Total counted %5d", symbols_counted); tty->print_cr("Total counted %5d", _symbols_counted);
tty->print_cr("Total removed %5d", symbols_removed); tty->print_cr("Total removed %5d", _symbols_removed);
if (symbols_counted > 0) { if (_symbols_counted > 0) {
tty->print_cr("Percent removed %3.2f", tty->print_cr("Percent removed %3.2f",
((float)symbols_removed/(float)symbols_counted)* 100); ((float)_symbols_removed/(float)_symbols_counted)* 100);
} }
tty->print_cr("Reference counts %5d", Symbol::_total_count); tty->print_cr("Reference counts %5d", Symbol::_total_count);
tty->print_cr("Symbol arena size %5d used %5d", tty->print_cr("Symbol arena size %5d used %5d",
...@@ -739,39 +771,38 @@ oop StringTable::intern(const char* utf8_string, TRAPS) { ...@@ -739,39 +771,38 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
return result; return result;
} }
void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed);
}
void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
// Readers of the table are unlocked, so we should only be removing // Readers of the table are unlocked, so we should only be removing
// entries at a safepoint. // entries at a safepoint.
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
for (int i = 0; i < the_table()->table_size(); ++i) { const int limit = the_table()->table_size();
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
while (entry != NULL) {
assert(!entry->is_shared(), "CDS not used for the StringTable");
if (is_alive->do_object_b(entry->literal())) { for (;;) {
if (f != NULL) { // Grab next set of buckets to scan
f->do_oop((oop*)entry->literal_addr()); int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
} if (start_idx >= limit) {
p = entry->next_addr(); // End of table
} else { break;
*p = entry->next();
the_table()->free_entry(entry);
}
entry = *p;
} }
int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed);
} }
} }
void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) { void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
const int limit = the_table()->table_size(); const int limit = the_table()->table_size();
assert(0 <= start_idx && start_idx <= limit, assert(0 <= start_idx && start_idx <= limit,
err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx)); err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
assert(0 <= end_idx && end_idx <= limit, assert(0 <= end_idx && end_idx <= limit,
err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx)); err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
assert(start_idx <= end_idx, assert(start_idx <= end_idx,
err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
start_idx, end_idx)); start_idx, end_idx));
for (int i = start_idx; i < end_idx; i += 1) { for (int i = start_idx; i < end_idx; i += 1) {
...@@ -786,12 +817,44 @@ void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) { ...@@ -786,12 +817,44 @@ void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
} }
} }
void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) {
const int limit = the_table()->table_size();
assert(0 <= start_idx && start_idx <= limit,
err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
assert(0 <= end_idx && end_idx <= limit,
err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
assert(start_idx <= end_idx,
err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
start_idx, end_idx));
for (int i = start_idx; i < end_idx; ++i) {
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
while (entry != NULL) {
assert(!entry->is_shared(), "CDS not used for the StringTable");
if (is_alive->do_object_b(entry->literal())) {
if (f != NULL) {
f->do_oop((oop*)entry->literal_addr());
}
p = entry->next_addr();
} else {
*p = entry->next();
the_table()->free_entry(entry);
(*removed)++;
}
(*processed)++;
entry = *p;
}
}
}
void StringTable::oops_do(OopClosure* f) { void StringTable::oops_do(OopClosure* f) {
buckets_do(f, 0, the_table()->table_size()); buckets_oops_do(f, 0, the_table()->table_size());
} }
void StringTable::possibly_parallel_oops_do(OopClosure* f) { void StringTable::possibly_parallel_oops_do(OopClosure* f) {
const int ClaimChunkSize = 32;
const int limit = the_table()->table_size(); const int limit = the_table()->table_size();
for (;;) { for (;;) {
...@@ -803,7 +866,7 @@ void StringTable::possibly_parallel_oops_do(OopClosure* f) { ...@@ -803,7 +866,7 @@ void StringTable::possibly_parallel_oops_do(OopClosure* f) {
} }
int end_idx = MIN2(limit, start_idx + ClaimChunkSize); int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
buckets_do(f, start_idx, end_idx); buckets_oops_do(f, start_idx, end_idx);
} }
} }
......
...@@ -86,8 +86,8 @@ private: ...@@ -86,8 +86,8 @@ private:
static bool _needs_rehashing; static bool _needs_rehashing;
// For statistics // For statistics
static int symbols_removed; static int _symbols_removed;
static int symbols_counted; static int _symbols_counted;
Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
...@@ -121,6 +121,11 @@ private: ...@@ -121,6 +121,11 @@ private:
static Arena* arena() { return _arena; } // called for statistics static Arena* arena() { return _arena; } // called for statistics
static void initialize_symbols(int arena_alloc_size = 0); static void initialize_symbols(int arena_alloc_size = 0);
static volatile int _parallel_claimed_idx;
// Release any dead symbols
static void buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total);
public: public:
enum { enum {
symbol_alloc_batch_size = 8, symbol_alloc_batch_size = 8,
...@@ -177,7 +182,14 @@ public: ...@@ -177,7 +182,14 @@ public:
unsigned int* hashValues, TRAPS); unsigned int* hashValues, TRAPS);
// Release any dead symbols // Release any dead symbols
static void unlink(); static void unlink() {
int processed = 0;
int removed = 0;
unlink(&processed, &removed);
}
static void unlink(int* processed, int* removed);
// Release any dead symbols, possibly parallel version
static void possibly_parallel_unlink(int* processed, int* removed);
// iterate over symbols // iterate over symbols
static void symbols_do(SymbolClosure *cl); static void symbols_do(SymbolClosure *cl);
...@@ -235,6 +247,9 @@ public: ...@@ -235,6 +247,9 @@ public:
// Rehash the symbol table if it gets out of balance // Rehash the symbol table if it gets out of balance
static void rehash_table(); static void rehash_table();
static bool needs_rehashing() { return _needs_rehashing; } static bool needs_rehashing() { return _needs_rehashing; }
// Parallel chunked scanning
static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
static int parallel_claimed_index() { return _parallel_claimed_idx; }
}; };
class StringTable : public Hashtable<oop, mtSymbol> { class StringTable : public Hashtable<oop, mtSymbol> {
...@@ -258,7 +273,10 @@ private: ...@@ -258,7 +273,10 @@ private:
// Apply the give oop closure to the entries to the buckets // Apply the give oop closure to the entries to the buckets
// in the range [start_idx, end_idx). // in the range [start_idx, end_idx).
static void buckets_do(OopClosure* f, int start_idx, int end_idx); static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx);
// Unlink or apply the give oop closure to the entries to the buckets
// in the range [start_idx, end_idx).
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize, StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {} sizeof (HashtableEntry<oop, mtSymbol>)) {}
...@@ -280,15 +298,28 @@ public: ...@@ -280,15 +298,28 @@ public:
// GC support // GC support
// Delete pointers to otherwise-unreachable objects. // Delete pointers to otherwise-unreachable objects.
static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f); static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) {
int processed = 0;
int removed = 0;
unlink_or_oops_do(cl, f, &processed, &removed);
}
static void unlink(BoolObjectClosure* cl) { static void unlink(BoolObjectClosure* cl) {
unlink_or_oops_do(cl, NULL); int processed = 0;
int removed = 0;
unlink_or_oops_do(cl, NULL, &processed, &removed);
}
static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
static void unlink(BoolObjectClosure* cl, int* processed, int* removed) {
unlink_or_oops_do(cl, NULL, processed, removed);
} }
// Serially invoke "f->do_oop" on the locations of all oops in the table. // Serially invoke "f->do_oop" on the locations of all oops in the table.
static void oops_do(OopClosure* f); static void oops_do(OopClosure* f);
// Possibly parallel version of the above // Possibly parallel versions of the above
static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) {
possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed);
}
static void possibly_parallel_oops_do(OopClosure* f); static void possibly_parallel_oops_do(OopClosure* f);
// Hashing algorithm, used as the hash value used by the // Hashing algorithm, used as the hash value used by the
...@@ -349,5 +380,6 @@ public: ...@@ -349,5 +380,6 @@ public:
// Parallel chunked scanning // Parallel chunked scanning
static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
static int parallel_claimed_index() { return _parallel_claimed_idx; }
}; };
#endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
...@@ -98,116 +98,4 @@ public: ...@@ -98,116 +98,4 @@ public:
_closure_app_seconds(0.0) { } _closure_app_seconds(0.0) { }
}; };
class BufferingOopsInGenClosure: public OopsInGenClosure {
BufferingOopClosure _boc;
OopsInGenClosure* _oc;
protected:
template <class T> inline void do_oop_work(T* p) {
assert(generation()->is_in_reserved((void*)p), "Must be in!");
_boc.do_oop(p);
}
public:
BufferingOopsInGenClosure(OopsInGenClosure *oc) :
_boc(oc), _oc(oc) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
void done() {
_boc.done();
}
double closure_app_seconds () {
return _boc.closure_app_seconds();
}
void set_generation(Generation* gen) {
OopsInGenClosure::set_generation(gen);
_oc->set_generation(gen);
}
void reset_generation() {
// Make sure we finish the current work with the current generation.
_boc.done();
OopsInGenClosure::reset_generation();
_oc->reset_generation();
}
};
class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure {
private:
enum PrivateConstants {
BufferLength = 1024
};
StarTask _buffer[BufferLength];
StarTask* _buffer_top;
StarTask* _buffer_curr;
HeapRegion* _hr_buffer[BufferLength];
HeapRegion** _hr_curr;
OopsInHeapRegionClosure* _oc;
double _closure_app_seconds;
void process_buffer () {
assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer),
"the two lengths should be the same");
double start = os::elapsedTime();
HeapRegion** hr_curr = _hr_buffer;
HeapRegion* hr_prev = NULL;
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
HeapRegion* region = *hr_curr;
if (region != hr_prev) {
_oc->set_region(region);
hr_prev = region;
}
if (curr->is_narrow()) {
assert(UseCompressedOops, "Error");
_oc->do_oop((narrowOop*)(*curr));
} else {
_oc->do_oop((oop*)(*curr));
}
++hr_curr;
}
_buffer_curr = _buffer;
_hr_curr = _hr_buffer;
_closure_app_seconds += (os::elapsedTime() - start);
}
public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
if (_buffer_curr == _buffer_top) {
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
process_buffer();
}
StarTask new_ref(p);
*_buffer_curr = new_ref;
++_buffer_curr;
*_hr_curr = _from;
++_hr_curr;
}
void done () {
if (_buffer_curr > _buffer) {
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
process_buffer();
}
}
double closure_app_seconds () {
return _closure_app_seconds;
}
BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) :
_oc(oc),
_buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
_hr_curr(_hr_buffer),
_closure_app_seconds(0.0) { }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
...@@ -1619,7 +1619,6 @@ public: ...@@ -1619,7 +1619,6 @@ public:
} }
}; };
class G1ParVerifyFinalCountTask: public AbstractGangTask { class G1ParVerifyFinalCountTask: public AbstractGangTask {
protected: protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
...@@ -2529,10 +2528,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { ...@@ -2529,10 +2528,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(!rp->discovery_enabled(), "Post condition"); assert(!rp->discovery_enabled(), "Post condition");
} }
// Now clean up stale oops in StringTable g1h->unlink_string_and_symbol_table(&g1_is_alive,
StringTable::unlink(&g1_is_alive); /* process_strings */ false, // currently strings are always roots
// Clean up unreferenced symbols in symbol table. /* process_symbols */ true);
SymbolTable::unlink();
} }
void ConcurrentMark::swapMarkBitMaps() { void ConcurrentMark::swapMarkBitMaps() {
......
...@@ -50,8 +50,8 @@ ...@@ -50,8 +50,8 @@
#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp" #include "memory/gcLocker.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generationSpec.hpp" #include "memory/generationSpec.hpp"
#include "memory/iterator.hpp"
#include "memory/referenceProcessor.hpp" #include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp" #include "oops/oop.pcgc.inline.hpp"
...@@ -3096,11 +3096,7 @@ const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) { ...@@ -3096,11 +3096,7 @@ const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
return NULL; // keep some compilers happy return NULL; // keep some compilers happy
} }
// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can class VerifyRootsClosure: public OopClosure {
// pass it as the perm_blk to SharedHeap::process_strong_roots.
// When process_strong_roots stop calling perm_blk->younger_refs_iterate
// we can change this closure to extend the simpler OopClosure.
class VerifyRootsClosure: public OopsInGenClosure {
private: private:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
VerifyOption _vo; VerifyOption _vo;
...@@ -3136,7 +3132,7 @@ public: ...@@ -3136,7 +3132,7 @@ public:
void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(narrowOop* p) { do_oop_nv(p); }
}; };
class G1VerifyCodeRootOopClosure: public OopsInGenClosure { class G1VerifyCodeRootOopClosure: public OopClosure {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
OopClosure* _root_cl; OopClosure* _root_cl;
nmethod* _nm; nmethod* _nm;
...@@ -4670,8 +4666,8 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, ...@@ -4670,8 +4666,8 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
_during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
_mark_in_progress(_g1->mark_in_progress()) { } _mark_in_progress(_g1->mark_in_progress()) { }
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) { void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
#ifdef ASSERT #ifdef ASSERT
HeapRegion* hr = _g1->heap_region_containing(obj); HeapRegion* hr = _g1->heap_region_containing(obj);
assert(hr != NULL, "sanity"); assert(hr != NULL, "sanity");
...@@ -4682,8 +4678,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop ...@@ -4682,8 +4678,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id); _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
} }
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> void G1ParCopyClosure<barrier, do_mark_object>
::mark_forwarded_object(oop from_obj, oop to_obj) { ::mark_forwarded_object(oop from_obj, oop to_obj) {
#ifdef ASSERT #ifdef ASSERT
assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->is_forwarded(), "from obj should be forwarded");
...@@ -4706,8 +4702,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4706,8 +4702,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
} }
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> oop G1ParCopyClosure<barrier, do_mark_object>
::copy_to_survivor_space(oop old) { ::copy_to_survivor_space(oop old) {
size_t word_sz = old->size(); size_t word_sz = old->size();
HeapRegion* from_region = _g1->heap_region_containing_raw(old); HeapRegion* from_region = _g1->heap_region_containing_raw(old);
...@@ -4803,13 +4799,11 @@ void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { ...@@ -4803,13 +4799,11 @@ void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
} }
} }
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
template <class T> template <class T>
void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> void G1ParCopyClosure<barrier, do_mark_object>
::do_oop_work(T* p) { ::do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p); oop obj = oopDesc::load_decode_heap_oop(p);
assert(barrier != G1BarrierRS || obj != NULL,
"Precondition: G1BarrierRS implies obj is non-NULL");
assert(_worker_id == _par_scan_state->queue_num(), "sanity"); assert(_worker_id == _par_scan_state->queue_num(), "sanity");
...@@ -4829,10 +4823,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4829,10 +4823,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
mark_forwarded_object(obj, forwardee); mark_forwarded_object(obj, forwardee);
} }
// When scanning the RS, we only care about objs in CS. if (barrier == G1BarrierKlass) {
if (barrier == G1BarrierRS) {
_par_scan_state->update_rs(_from, p, _worker_id);
} else if (barrier == G1BarrierKlass) {
do_klass_barrier(p, forwardee); do_klass_barrier(p, forwardee);
} }
} else { } else {
...@@ -4847,14 +4838,10 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4847,14 +4838,10 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
if (barrier == G1BarrierEvac && obj != NULL) { if (barrier == G1BarrierEvac && obj != NULL) {
_par_scan_state->update_rs(_from, p, _worker_id); _par_scan_state->update_rs(_from, p, _worker_id);
} }
if (do_gen_barrier && obj != NULL) {
par_do_barrier(p);
}
} }
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
assert(has_partial_array_mask(p), "invariant"); assert(has_partial_array_mask(p), "invariant");
...@@ -5212,6 +5199,99 @@ G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { ...@@ -5212,6 +5199,99 @@ G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
} }
class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;
int _initial_string_table_size;
int _initial_symbol_table_size;
bool _process_strings;
int _strings_processed;
int _strings_removed;
bool _process_symbols;
int _symbols_processed;
int _symbols_removed;
public:
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
_initial_string_table_size = StringTable::the_table()->table_size();
_initial_symbol_table_size = SymbolTable::the_table()->table_size();
if (process_strings) {
StringTable::clear_parallel_claimed_index();
}
if (process_symbols) {
SymbolTable::clear_parallel_claimed_index();
}
}
~G1StringSymbolTableUnlinkTask() {
guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
StringTable::parallel_claimed_index(), _initial_string_table_size));
guarantee(!_process_strings || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
}
void work(uint worker_id) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
int strings_processed = 0;
int strings_removed = 0;
int symbols_processed = 0;
int symbols_removed = 0;
if (_process_strings) {
StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
Atomic::add(strings_processed, &_strings_processed);
Atomic::add(strings_removed, &_strings_removed);
}
if (_process_symbols) {
SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
Atomic::add(symbols_processed, &_symbols_processed);
Atomic::add(symbols_removed, &_symbols_removed);
}
} else {
if (_process_strings) {
StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
}
if (_process_symbols) {
SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
}
}
}
size_t strings_processed() const { return (size_t)_strings_processed; }
size_t strings_removed() const { return (size_t)_strings_removed; }
size_t symbols_processed() const { return (size_t)_symbols_processed; }
size_t symbols_removed() const { return (size_t)_symbols_removed; }
};
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
bool process_strings, bool process_symbols) {
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
_g1h->workers()->active_workers() : 1);
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
if (G1CollectedHeap::use_parallel_gc_threads()) {
set_par_threads(n_workers);
workers()->run_task(&g1_unlink_task);
set_par_threads(0);
} else {
g1_unlink_task.work(0);
}
if (G1TraceStringSymbolTableScrubbing) {
gclog_or_tty->print_cr("Cleaned string and symbol table, "
"strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
"symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
}
}
// Weak Reference Processing support // Weak Reference Processing support
// An always "is_alive" closure that is used to preserve referents. // An always "is_alive" closure that is used to preserve referents.
......
...@@ -209,7 +209,7 @@ class G1CollectedHeap : public SharedHeap { ...@@ -209,7 +209,7 @@ class G1CollectedHeap : public SharedHeap {
friend class OldGCAllocRegion; friend class OldGCAllocRegion;
// Closures used in implementation. // Closures used in implementation.
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
friend class G1ParCopyClosure; friend class G1ParCopyClosure;
friend class G1IsAliveClosure; friend class G1IsAliveClosure;
friend class G1EvacuateFollowersClosure; friend class G1EvacuateFollowersClosure;
...@@ -1677,6 +1677,10 @@ public: ...@@ -1677,6 +1677,10 @@ public:
// after a full GC // after a full GC
void rebuild_strong_code_roots(); void rebuild_strong_code_roots();
// Delete entries for dead interned string and clean up unreferenced symbols
// in symbol table, possibly in parallel.
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
// Verification // Verification
// The following is just to alert the verification code // The following is just to alert the verification code
......
...@@ -163,11 +163,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, ...@@ -163,11 +163,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// Prune dead klasses from subklass/sibling/implementor lists. // Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
// Delete entries for dead interned strings. // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
StringTable::unlink(&GenMarkSweep::is_alive); G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
......
...@@ -38,7 +38,7 @@ class ReferenceProcessor; ...@@ -38,7 +38,7 @@ class ReferenceProcessor;
// A class that scans oops in a given heap region (much as OopsInGenClosure // A class that scans oops in a given heap region (much as OopsInGenClosure
// scans oops in a generation.) // scans oops in a generation.)
class OopsInHeapRegionClosure: public OopsInGenClosure { class OopsInHeapRegionClosure: public ExtendedOopClosure {
protected: protected:
HeapRegion* _from; HeapRegion* _from;
public: public:
...@@ -131,7 +131,7 @@ class G1ParCopyHelper : public G1ParClosureSuper { ...@@ -131,7 +131,7 @@ class G1ParCopyHelper : public G1ParClosureSuper {
template <class T> void do_klass_barrier(T* p, oop new_obj); template <class T> void do_klass_barrier(T* p, oop new_obj);
}; };
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template <G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper { class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner; G1ParScanClosure _scanner;
template <class T> void do_oop_work(T* p); template <class T> void do_oop_work(T* p);
...@@ -166,22 +166,16 @@ public: ...@@ -166,22 +166,16 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
}; };
typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure; typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
typedef G1ParCopyClosure<false, G1BarrierKlass, false> G1ParScanMetadataClosure; typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure; typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkClosure; typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
// The following closure types are no longer used but are retained
// for historical reasons:
// typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
// typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
// The following closure type is defined in g1_specialized_oop_closures.hpp: // The following closure type is defined in g1_specialized_oop_closures.hpp:
// //
// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure; // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
// We use a separate closure to handle references during evacuation // We use a separate closure to handle references during evacuation
// failure processing. // failure processing.
...@@ -189,7 +183,7 @@ typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataCl ...@@ -189,7 +183,7 @@ typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataCl
// (since that closure no longer assumes that the references it // (since that closure no longer assumes that the references it
// handles point into the collection set). // handles point into the collection set).
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure; typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
class FilterIntoCSClosure: public ExtendedOopClosure { class FilterIntoCSClosure: public ExtendedOopClosure {
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
......
...@@ -71,6 +71,9 @@ ...@@ -71,6 +71,9 @@
diagnostic(bool, G1TraceConcRefinement, false, \ diagnostic(bool, G1TraceConcRefinement, false, \
"Trace G1 concurrent refinement") \ "Trace G1 concurrent refinement") \
\ \
experimental(bool, G1TraceStringSymbolTableScrubbing, false, \
"Trace information string and symbol table scrubbing.") \
\
product(double, G1ConcMarkStepDurationMillis, 10.0, \ product(double, G1ConcMarkStepDurationMillis, 10.0, \
"Target duration of individual concurrent marking steps " \ "Target duration of individual concurrent marking steps " \
"in milliseconds.") \ "in milliseconds.") \
......
...@@ -33,18 +33,17 @@ ...@@ -33,18 +33,17 @@
// Forward declarations. // Forward declarations.
enum G1Barrier { enum G1Barrier {
G1BarrierNone, G1BarrierNone,
G1BarrierRS,
G1BarrierEvac, G1BarrierEvac,
G1BarrierKlass G1BarrierKlass
}; };
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> template<G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure; class G1ParCopyClosure;
class G1ParScanClosure; class G1ParScanClosure;
class G1ParPushHeapRSClosure; class G1ParPushHeapRSClosure;
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure; typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure; class FilterIntoCSClosure;
class FilterOutOfRegionClosure; class FilterOutOfRegionClosure;
......
/* /*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -466,10 +466,12 @@ bool PSScavenge::invoke_no_policy() { ...@@ -466,10 +466,12 @@ bool PSScavenge::invoke_no_policy() {
} }
} }
GCTraceTime tm("StringTable", false, false, &_gc_timer); {
// Unlink any dead interned Strings and process the remaining live ones. GCTraceTime tm("StringTable", false, false, &_gc_timer);
PSScavengeRootsClosure root_closure(promotion_manager); // Unlink any dead interned Strings and process the remaining live ones.
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks. // Finally, flush the promotion_manager's labs, and deallocate its stacks.
promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStringSymbolTableStats.java
* @bug 8027476 8027455
* @summary Ensure that the G1TraceStringSymbolTableScrubbing prints the expected message.
* @key gc
* @library /testlibrary
*/
import com.oracle.java.testlibrary.ProcessTools;
import com.oracle.java.testlibrary.OutputAnalyzer;
public class TestStringSymbolTableStats {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-XX:+UnlockExperimentalVMOptions",
"-XX:+G1TraceStringSymbolTableScrubbing",
SystemGCTest.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println("Output:\n" + output.getOutput());
output.shouldContain("Cleaned string and symbol table");
output.shouldHaveExitValue(0);
}
static class SystemGCTest {
public static void main(String [] args) {
System.out.println("Calling System.gc()");
System.gc();
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册