提交 ca4cc345 编写于 作者: T tschatzl

8180048: Interned string and symbol table leak memory during parallel unlinking

Summary: Make appending found dead BasicHashtableEntrys to the free list atomic.
Reviewed-by: ehelin, shade
上级 7ad6d459
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -96,7 +96,7 @@ int SymbolTable::_symbols_removed = 0; ...@@ -96,7 +96,7 @@ int SymbolTable::_symbols_removed = 0;
int SymbolTable::_symbols_counted = 0; int SymbolTable::_symbols_counted = 0;
volatile int SymbolTable::_parallel_claimed_idx = 0; volatile int SymbolTable::_parallel_claimed_idx = 0;
void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) { void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context, size_t* memory_total) {
for (int i = start_idx; i < end_idx; ++i) { for (int i = start_idx; i < end_idx; ++i) {
HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
...@@ -110,15 +110,14 @@ void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int ...@@ -110,15 +110,14 @@ void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int
} }
Symbol* s = entry->literal(); Symbol* s = entry->literal();
(*memory_total) += s->size(); (*memory_total) += s->size();
(*processed)++; context->_num_processed++;
assert(s != NULL, "just checking"); assert(s != NULL, "just checking");
// If reference count is zero, remove. // If reference count is zero, remove.
if (s->refcount() == 0) { if (s->refcount() == 0) {
assert(!entry->is_shared(), "shared entries should be kept live"); assert(!entry->is_shared(), "shared entries should be kept live");
delete s; delete s;
(*removed)++;
*p = entry->next(); *p = entry->next();
the_table()->free_entry(entry); context->free_entry(entry);
} else { } else {
p = entry->next_addr(); p = entry->next_addr();
} }
...@@ -132,9 +131,14 @@ void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int ...@@ -132,9 +131,14 @@ void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int
// This is done late during GC. // This is done late during GC.
void SymbolTable::unlink(int* processed, int* removed) { void SymbolTable::unlink(int* processed, int* removed) {
size_t memory_total = 0; size_t memory_total = 0;
buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total); BucketUnlinkContext context;
_symbols_removed += *removed; buckets_unlink(0, the_table()->table_size(), &context, &memory_total);
_symbols_counted += *processed; _the_table->bulk_free_entries(&context);
*processed = context._num_processed;
*removed = context._num_removed;
_symbols_removed = context._num_removed;
_symbols_counted = context._num_processed;
// Exclude printing for normal PrintGCDetails because people parse // Exclude printing for normal PrintGCDetails because people parse
// this output. // this output.
if (PrintGCDetails && Verbose && WizardMode) { if (PrintGCDetails && Verbose && WizardMode) {
...@@ -148,6 +152,7 @@ void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) { ...@@ -148,6 +152,7 @@ void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
size_t memory_total = 0; size_t memory_total = 0;
BucketUnlinkContext context;
for (;;) { for (;;) {
// Grab next set of buckets to scan // Grab next set of buckets to scan
int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
...@@ -157,10 +162,15 @@ void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) { ...@@ -157,10 +162,15 @@ void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
} }
int end_idx = MIN2(limit, start_idx + ClaimChunkSize); int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
buckets_unlink(start_idx, end_idx, processed, removed, &memory_total); buckets_unlink(start_idx, end_idx, &context, &memory_total);
} }
Atomic::add(*processed, &_symbols_counted);
Atomic::add(*removed, &_symbols_removed); _the_table->bulk_free_entries(&context);
*processed = context._num_processed;
*removed = context._num_removed;
Atomic::add(context._num_processed, &_symbols_counted);
Atomic::add(context._num_removed, &_symbols_removed);
// Exclude printing for normal PrintGCDetails because people parse // Exclude printing for normal PrintGCDetails because people parse
// this output. // this output.
if (PrintGCDetails && Verbose && WizardMode) { if (PrintGCDetails && Verbose && WizardMode) {
...@@ -811,7 +821,11 @@ oop StringTable::intern(const char* utf8_string, TRAPS) { ...@@ -811,7 +821,11 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
} }
void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed); BucketUnlinkContext context;
buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), &context);
_the_table->bulk_free_entries(&context);
*processed = context._num_processed;
*removed = context._num_removed;
} }
void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
...@@ -820,6 +834,7 @@ void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_aliv ...@@ -820,6 +834,7 @@ void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_aliv
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
const int limit = the_table()->table_size(); const int limit = the_table()->table_size();
BucketUnlinkContext context;
for (;;) { for (;;) {
// Grab next set of buckets to scan // Grab next set of buckets to scan
int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
...@@ -829,8 +844,11 @@ void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_aliv ...@@ -829,8 +844,11 @@ void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_aliv
} }
int end_idx = MIN2(limit, start_idx + ClaimChunkSize); int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed); buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, &context);
} }
_the_table->bulk_free_entries(&context);
*processed = context._num_processed;
*removed = context._num_removed;
} }
void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) { void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
...@@ -856,7 +874,7 @@ void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) { ...@@ -856,7 +874,7 @@ void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
} }
} }
void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) { void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context) {
const int limit = the_table()->table_size(); const int limit = the_table()->table_size();
assert(0 <= start_idx && start_idx <= limit, assert(0 <= start_idx && start_idx <= limit,
...@@ -880,10 +898,9 @@ void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClos ...@@ -880,10 +898,9 @@ void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClos
p = entry->next_addr(); p = entry->next_addr();
} else { } else {
*p = entry->next(); *p = entry->next();
the_table()->free_entry(entry); context->free_entry(entry);
(*removed)++;
} }
(*processed)++; context->_num_processed++;
entry = *p; entry = *p;
} }
} }
......
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -124,8 +124,11 @@ private: ...@@ -124,8 +124,11 @@ private:
static volatile int _parallel_claimed_idx; static volatile int _parallel_claimed_idx;
// Release any dead symbols typedef SymbolTable::BucketUnlinkContext BucketUnlinkContext;
static void buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total); // Release any dead symbols. Unlinked bucket entries are collected in the given
// context to be freed later.
// This allows multiple threads to work on the table at once.
static void buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context, size_t* memory_total);
public: public:
enum { enum {
symbol_alloc_batch_size = 8, symbol_alloc_batch_size = 8,
...@@ -274,9 +277,13 @@ private: ...@@ -274,9 +277,13 @@ private:
// Apply the give oop closure to the entries to the buckets // Apply the give oop closure to the entries to the buckets
// in the range [start_idx, end_idx). // in the range [start_idx, end_idx).
static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx); static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx);
typedef StringTable::BucketUnlinkContext BucketUnlinkContext;
// Unlink or apply the give oop closure to the entries to the buckets // Unlink or apply the give oop closure to the entries to the buckets
// in the range [start_idx, end_idx). // in the range [start_idx, end_idx). Unlinked bucket entries are collected in the given
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed); // context to be freed later.
// This allows multiple threads to work on the table at once.
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context);
StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize, StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {} sizeof (HashtableEntry<oop, mtSymbol>)) {}
......
...@@ -704,7 +704,7 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable; ...@@ -704,7 +704,7 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
\ \
nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \ nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \
nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \ nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \
nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \ volatile_nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \
nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \ nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \ nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \ nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \
......
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -172,6 +172,35 @@ template <MEMFLAGS F> void BasicHashtable<F>::reverse() { ...@@ -172,6 +172,35 @@ template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
} }
} }
template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
entry->set_next(_removed_head);
_removed_head = entry;
if (_removed_tail == NULL) {
_removed_tail = entry;
}
_num_removed++;
}
template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
if (context->_num_removed == 0) {
assert(context->_removed_head == NULL && context->_removed_tail == NULL,
err_msg("Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
p2i(context->_removed_head), p2i(context->_removed_tail)));
return;
}
// MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
BasicHashtableEntry<F>* current = _free_list;
while (true) {
context->_removed_tail->set_next(current);
BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
if (old == current) {
break;
}
current = old;
}
Atomic::add(-context->_num_removed, &_number_of_entries);
}
// Copy the table to the shared space. // Copy the table to the shared space.
......
...@@ -164,11 +164,11 @@ private: ...@@ -164,11 +164,11 @@ private:
// Instance variables // Instance variables
int _table_size; int _table_size;
HashtableBucket<F>* _buckets; HashtableBucket<F>* _buckets;
BasicHashtableEntry<F>* _free_list; BasicHashtableEntry<F>* volatile _free_list;
char* _first_free_entry; char* _first_free_entry;
char* _end_block; char* _end_block;
int _entry_size; int _entry_size;
int _number_of_entries; volatile int _number_of_entries;
protected: protected:
...@@ -215,6 +215,24 @@ protected: ...@@ -215,6 +215,24 @@ protected:
// Free the buckets in this hashtable // Free the buckets in this hashtable
void free_buckets(); void free_buckets();
// Helper data structure containing context for the bucket entry unlink process,
// storing the unlinked buckets in a linked list.
// Also avoids the need to pass around these four members as parameters everywhere.
struct BucketUnlinkContext {
int _num_processed;
int _num_removed;
// Head and tail pointers for the linked list of removed entries.
BasicHashtableEntry<F>* _removed_head;
BasicHashtableEntry<F>* _removed_tail;
BucketUnlinkContext() : _num_processed(0), _num_removed(0), _removed_head(NULL), _removed_tail(NULL) {
}
void free_entry(BasicHashtableEntry<F>* entry);
};
// Add of bucket entries linked together in the given context to the global free list. This method
// is mt-safe wrt. to other calls of this method.
void bulk_free_entries(BucketUnlinkContext* context);
public: public:
int table_size() { return _table_size; } int table_size() { return _table_size; }
void set_entry(int index, BasicHashtableEntry<F>* entry); void set_entry(int index, BasicHashtableEntry<F>* entry);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册