提交 e5a1fbeb 编写于 作者: T tschatzl

Merge

# #
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
...@@ -89,7 +89,7 @@ ifeq ($(INCLUDE_ALL_GCS), false) ...@@ -89,7 +89,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \ g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \ g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \ ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \ adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \ cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \ parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
......
...@@ -24,6 +24,14 @@ ...@@ -24,6 +24,14 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp" #include "gc_implementation/g1/g1BiasedArray.hpp"
#include "memory/padded.inline.hpp"
// Allocate a new array, generic version.
address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return PaddedPrimitiveArray<u_char, mtGC>::create_unfreeable(length * elem_size);
}
#ifndef PRODUCT #ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const { void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions // Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index. // for accessing its elements using a biased index.
...@@ -48,11 +48,7 @@ protected: ...@@ -48,11 +48,7 @@ protected:
_bias(0), _shift_by(0) { } _bias(0), _shift_by(0) { }
// Allocate a new array, generic version. // Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) { static address create_new_base_array(size_t length, size_t elem_size);
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
// Initialize the members of this class. The biased start address of this array // Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size. // is the bias (in elements) multiplied by the element size.
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#include "memory/iterator.hpp"
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
_top = bottom();
}
void G1CodeRootChunk::reset() {
_next = _prev = NULL;
_top = bottom();
}
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
nmethod** cur = bottom();
while (cur != _top) {
cl->do_code_blob(*cur);
cur++;
}
}
FreeList<G1CodeRootChunk> G1CodeRootSet::_free_list;
size_t G1CodeRootSet::_num_chunks_handed_out = 0;
G1CodeRootChunk* G1CodeRootSet::new_chunk() {
G1CodeRootChunk* result = _free_list.get_chunk_at_head();
if (result == NULL) {
result = new G1CodeRootChunk();
}
G1CodeRootSet::_num_chunks_handed_out++;
result->reset();
return result;
}
void G1CodeRootSet::free_chunk(G1CodeRootChunk* chunk) {
_free_list.return_chunk_at_head(chunk);
G1CodeRootSet::_num_chunks_handed_out--;
}
void G1CodeRootSet::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
G1CodeRootSet::_num_chunks_handed_out -= list->count();
_free_list.prepend(list);
}
void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
size_t keep = G1CodeRootSet::_num_chunks_handed_out * keep_ratio / 100;
if (keep >= (size_t)_free_list.count()) {
return;
}
FreeList<G1CodeRootChunk> temp;
temp.initialize();
temp.set_size(G1CodeRootChunk::word_size());
_free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp);
G1CodeRootChunk* cur = temp.get_chunk_at_head();
while (cur != NULL) {
delete cur;
cur = temp.get_chunk_at_head();
}
}
size_t G1CodeRootSet::static_mem_size() {
return sizeof(_free_list) + sizeof(_num_chunks_handed_out);
}
size_t G1CodeRootSet::fl_mem_size() {
return _free_list.count() * _free_list.size();
}
void G1CodeRootSet::initialize() {
_free_list.initialize();
_free_list.set_size(G1CodeRootChunk::word_size());
}
G1CodeRootSet::G1CodeRootSet() : _list(), _length(0) {
_list.initialize();
_list.set_size(G1CodeRootChunk::word_size());
}
G1CodeRootSet::~G1CodeRootSet() {
clear();
}
void G1CodeRootSet::add(nmethod* method) {
if (!contains(method)) {
// Try to add the nmethod. If there is not enough space, get a new chunk.
if (_list.head() == NULL || _list.head()->is_full()) {
G1CodeRootChunk* cur = new_chunk();
_list.return_chunk_at_head(cur);
}
bool result = _list.head()->add(method);
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
_length++;
}
}
void G1CodeRootSet::remove(nmethod* method) {
G1CodeRootChunk* found = find(method);
if (found != NULL) {
bool result = found->remove(method);
guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
// eventually free completely emptied chunk
if (found->is_empty()) {
_list.remove_chunk(found);
free(found);
}
_length--;
}
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
}
nmethod* G1CodeRootSet::pop() {
do {
G1CodeRootChunk* cur = _list.head();
if (cur == NULL) {
assert(_length == 0, "when there are no chunks, there should be no elements");
return NULL;
}
nmethod* result = cur->pop();
if (result != NULL) {
_length--;
return result;
} else {
free(_list.get_chunk_at_head());
}
} while (true);
}
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
G1CodeRootChunk* cur = _list.head();
while (cur != NULL) {
if (cur->contains(method)) {
return cur;
}
cur = (G1CodeRootChunk*)cur->next();
}
return NULL;
}
void G1CodeRootSet::free(G1CodeRootChunk* chunk) {
free_chunk(chunk);
}
bool G1CodeRootSet::contains(nmethod* method) {
return find(method) != NULL;
}
void G1CodeRootSet::clear() {
free_all_chunks(&_list);
_length = 0;
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
G1CodeRootChunk* cur = _list.head();
while (cur != NULL) {
cur->nmethods_do(blk);
cur = (G1CodeRootChunk*)cur->next();
}
}
size_t G1CodeRootSet::mem_size() {
return sizeof(this) + _list.count() * _list.size();
}
#ifndef PRODUCT
void G1CodeRootSet::test() {
initialize();
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
// The number of chunks that we allocate for purge testing.
size_t const num_chunks = 10;
{
G1CodeRootSet set1;
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
set1.add((nmethod*)1);
assert(_num_chunks_handed_out == 1,
err_msg("Must have allocated and handed out one chunk, but handed out "
SIZE_FORMAT" chunks", _num_chunks_handed_out));
assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
SIZE_FORMAT" elements", set1.length()));
// G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
// we cannot access.
for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
set1.add((nmethod*)1);
}
assert(_num_chunks_handed_out == 1,
err_msg("Duplicate detection must have prevented allocation of further "
"chunks but contains "SIZE_FORMAT, _num_chunks_handed_out));
assert(set1.length() == 1,
err_msg("Duplicate detection should not have increased the set size but "
"is "SIZE_FORMAT, set1.length()));
size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
for (size_t i = 0; i < num_total_after_add - 1; i++) {
set1.add((nmethod*)(2 + i));
}
assert(_num_chunks_handed_out > 1,
"After adding more code roots, more than one chunks should have been handed out");
assert(set1.length() == num_total_after_add,
err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
"need to be in the set, but there are only "SIZE_FORMAT,
num_total_after_add, set1.length()));
size_t num_popped = 0;
while (set1.pop() != NULL) {
num_popped++;
}
assert(num_popped == num_total_after_add,
err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
"were added", num_popped, num_total_after_add));
assert(_num_chunks_handed_out == 0,
err_msg("After popping all elements, all chunks must have been returned "
"but are still "SIZE_FORMAT, _num_chunks_handed_out));
purge_chunks(0);
assert(_free_list.count() == 0,
err_msg("After purging everything, the free list must be empty but still "
"contains "SIZE_FORMAT" chunks", _free_list.count()));
// Add some more handed out chunks.
size_t i = 0;
while (_num_chunks_handed_out < num_chunks) {
set1.add((nmethod*)i);
i++;
}
{
// Generate chunks on the free list.
G1CodeRootSet set2;
size_t i = 0;
while (_num_chunks_handed_out < num_chunks * 2) {
set2.add((nmethod*)i);
i++;
}
// Exit of the scope of the set2 object will call the destructor that generates
// num_chunks elements on the free list.
}
assert(_num_chunks_handed_out == num_chunks,
err_msg("Deletion of the second set must have resulted in giving back "
"those, but there is still "SIZE_FORMAT" handed out, expecting "
SIZE_FORMAT, _num_chunks_handed_out, num_chunks));
assert((size_t)_free_list.count() == num_chunks,
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
"but there are only "SIZE_FORMAT, num_chunks, _free_list.count()));
size_t const test_percentage = 50;
purge_chunks(test_percentage);
assert(_num_chunks_handed_out == num_chunks,
err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
_num_chunks_handed_out));
assert((size_t)_free_list.count() == (ssize_t)(num_chunks * test_percentage / 100),
err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
"but there are "SSIZE_FORMAT, test_percentage, num_chunks,
_free_list.count()));
// Purge the remainder of the chunks on the free list.
purge_chunks(0);
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == num_chunks,
err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
"but there are "SIZE_FORMAT, num_chunks, _num_chunks_handed_out));
// Exit of the scope of the set1 object will call the destructor that generates
// num_chunks additional elements on the free list.
}
assert(_num_chunks_handed_out == 0,
err_msg("Deletion of the only set must have resulted in no chunks handed "
"out, but there is still "SIZE_FORMAT" handed out", _num_chunks_handed_out));
assert((size_t)_free_list.count() == num_chunks,
err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
"but there are only "SSIZE_FORMAT, num_chunks, _free_list.count()));
// Restore initial state.
purge_chunks(0);
assert(_free_list.count() == 0, "Free List must be empty");
assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
}
void TestCodeCacheRemSet_test() {
G1CodeRootSet::test();
}
#endif
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
#include "memory/freeList.hpp"
#include "runtime/globals.hpp"
class CodeBlobClosure;
class G1CodeRootChunk : public CHeapObj<mtGC> {
private:
static const int NUM_ENTRIES = 32;
public:
G1CodeRootChunk* _next;
G1CodeRootChunk* _prev;
nmethod** _top;
nmethod* _data[NUM_ENTRIES];
nmethod** bottom() const {
return (nmethod**) &(_data[0]);
}
nmethod** end() const {
return (nmethod**) &(_data[NUM_ENTRIES]);
}
public:
G1CodeRootChunk();
~G1CodeRootChunk() {}
static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
// FreeList "interface" methods
G1CodeRootChunk* next() const { return _next; }
G1CodeRootChunk* prev() const { return _prev; }
void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");}
void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");}
void clear_next() { set_next(NULL); }
void clear_prev() { set_prev(NULL); }
size_t size() const { return word_size(); }
void link_next(G1CodeRootChunk* ptr) { set_next(ptr); }
void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); }
void link_after(G1CodeRootChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
}
bool is_free() { return true; }
// New G1CodeRootChunk routines
void reset();
bool is_empty() const {
return _top == bottom();
}
bool is_full() const {
return _top == (nmethod**)end();
}
bool contains(nmethod* method) {
nmethod** cur = bottom();
while (cur != _top) {
if (*cur == method) return true;
cur++;
}
return false;
}
bool add(nmethod* method) {
if (is_full()) return false;
*_top = method;
_top++;
return true;
}
bool remove(nmethod* method) {
nmethod** cur = bottom();
while (cur != _top) {
if (*cur == method) {
memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
_top--;
return true;
}
cur++;
}
return false;
}
void nmethods_do(CodeBlobClosure* blk);
nmethod* pop() {
if (is_empty()) {
return NULL;
}
_top--;
return *_top;
}
};
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
private:
// Global free chunk list management
static FreeList<G1CodeRootChunk> _free_list;
// Total number of chunks handed out
static size_t _num_chunks_handed_out;
static G1CodeRootChunk* new_chunk();
static void free_chunk(G1CodeRootChunk* chunk);
// Free all elements of the given list.
static void free_all_chunks(FreeList<G1CodeRootChunk>* list);
// Return the chunk that contains the given nmethod, NULL otherwise.
// Scans the list of chunks backwards, as this method is used to add new
// entries, which are typically added in bulk for a single nmethod.
G1CodeRootChunk* find(nmethod* method);
void free(G1CodeRootChunk* chunk);
size_t _length;
FreeList<G1CodeRootChunk> _list;
public:
G1CodeRootSet();
~G1CodeRootSet();
static void initialize();
static void purge_chunks(size_t keep_ratio);
static size_t static_mem_size();
static size_t fl_mem_size();
// Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
// method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
void remove(nmethod* method);
nmethod* pop();
bool contains(nmethod* method);
void clear();
void nmethods_do(CodeBlobClosure* blk) const;
bool is_empty() { return length() == 0; }
// Length in elements
size_t length() const { return _length; }
// Memory size in bytes taken by this set.
size_t mem_size();
static void test() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
...@@ -169,14 +169,6 @@ public: ...@@ -169,14 +169,6 @@ public:
int calls() { return _calls; } int calls() { return _calls; }
}; };
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
public:
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
*card_ptr = CardTableModRefBS::dirty_card_val();
return true;
}
};
YoungList::YoungList(G1CollectedHeap* g1h) : YoungList::YoungList(G1CollectedHeap* g1h) :
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0), _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) { _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
...@@ -1962,7 +1954,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : ...@@ -1962,7 +1954,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
int n_queues = MAX2((int)ParallelGCThreads, 1); int n_queues = MAX2((int)ParallelGCThreads, 1);
_task_queues = new RefToScanQueueSet(n_queues); _task_queues = new RefToScanQueueSet(n_queues);
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
assert(n_rem_sets > 0, "Invariant."); assert(n_rem_sets > 0, "Invariant.");
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
...@@ -2368,8 +2360,12 @@ public: ...@@ -2368,8 +2360,12 @@ public:
}; };
size_t G1CollectedHeap::recalculate_used() const { size_t G1CollectedHeap::recalculate_used() const {
double recalculate_used_start = os::elapsedTime();
SumUsedClosure blk; SumUsedClosure blk;
heap_region_iterate(&blk); heap_region_iterate(&blk);
g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
return blk.result(); return blk.result();
} }
...@@ -4402,6 +4398,8 @@ void G1CollectedHeap::finalize_for_evac_failure() { ...@@ -4402,6 +4398,8 @@ void G1CollectedHeap::finalize_for_evac_failure() {
void G1CollectedHeap::remove_self_forwarding_pointers() { void G1CollectedHeap::remove_self_forwarding_pointers() {
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
double remove_self_forwards_start = os::elapsedTime();
G1ParRemoveSelfForwardPtrsTask rsfp_task(this); G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
...@@ -4429,6 +4427,8 @@ void G1CollectedHeap::remove_self_forwarding_pointers() { ...@@ -4429,6 +4427,8 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
} }
_objs_with_preserved_marks.clear(true); _objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true); _preserved_marks_of_objs.clear(true);
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
} }
void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
...@@ -4650,9 +4650,7 @@ bool G1ParScanThreadState::verify_task(StarTask ref) const { ...@@ -4650,9 +4650,7 @@ bool G1ParScanThreadState::verify_task(StarTask ref) const {
#endif // ASSERT #endif // ASSERT
void G1ParScanThreadState::trim_queue() { void G1ParScanThreadState::trim_queue() {
assert(_evac_cl != NULL, "not set");
assert(_evac_failure_cl != NULL, "not set"); assert(_evac_failure_cl != NULL, "not set");
assert(_partial_scan_cl != NULL, "not set");
StarTask ref; StarTask ref;
do { do {
...@@ -4854,55 +4852,6 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { ...@@ -4854,55 +4852,6 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
_par_scan_state->push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
class G1ParEvacuateFollowersClosure : public VoidClosure { class G1ParEvacuateFollowersClosure : public VoidClosure {
protected: protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
...@@ -5044,13 +4993,9 @@ public: ...@@ -5044,13 +4993,9 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_stw(); ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState pss(_g1h, worker_id, rp); G1ParScanThreadState pss(_g1h, worker_id, rp);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
...@@ -5306,6 +5251,29 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive ...@@ -5306,6 +5251,29 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
} }
} }
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
public:
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
*card_ptr = CardTableModRefBS::dirty_card_val();
return true;
}
};
void G1CollectedHeap::redirty_logged_cards() {
guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
double redirty_logged_cards_start = os::elapsedTime();
RedirtyLoggedCardTableEntryFastClosure redirty;
dirty_card_queue_set().set_closure(&redirty);
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
}
// Weak Reference Processing support // Weak Reference Processing support
// An always "is_alive" closure that is used to preserve referents. // An always "is_alive" closure that is used to preserve referents.
...@@ -5487,14 +5455,9 @@ public: ...@@ -5487,14 +5455,9 @@ public:
G1STWIsAliveClosure is_alive(_g1h); G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState pss(_g1h, worker_id, NULL); G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
...@@ -5599,13 +5562,9 @@ public: ...@@ -5599,13 +5562,9 @@ public:
HandleMark hm; HandleMark hm;
G1ParScanThreadState pss(_g1h, worker_id, NULL); G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
...@@ -5729,13 +5688,9 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { ...@@ -5729,13 +5688,9 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// We do not embed a reference processor in the copying/scanning // We do not embed a reference processor in the copying/scanning
// closures while we're actually processing the discovered // closures while we're actually processing the discovered
// reference objects. // reference objects.
G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
pss.set_evac_closure(&scan_evac_cl);
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
pss.set_partial_scan_closure(&partial_scan_cl);
assert(pss.refs()->is_empty(), "pre-condition"); assert(pss.refs()->is_empty(), "pre-condition");
...@@ -5934,6 +5889,8 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { ...@@ -5934,6 +5889,8 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// strong code roots for a particular heap region. // strong code roots for a particular heap region.
migrate_strong_code_roots(); migrate_strong_code_roots();
purge_code_root_memory();
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
// Reset the claim values set during marking the strong code roots // Reset the claim values set during marking the strong code roots
reset_heap_region_claim_values(); reset_heap_region_claim_values();
...@@ -5960,20 +5917,15 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { ...@@ -5960,20 +5917,15 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
enqueue_discovered_references(n_workers); enqueue_discovered_references(n_workers);
if (G1DeferredRSUpdate) { if (G1DeferredRSUpdate) {
RedirtyLoggedCardTableEntryFastClosure redirty; redirty_logged_cards();
dirty_card_queue_set().set_closure(&redirty);
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
} }
COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
} }
void G1CollectedHeap::free_region(HeapRegion* hr, void G1CollectedHeap::free_region(HeapRegion* hr,
FreeRegionList* free_list, FreeRegionList* free_list,
bool par) { bool par,
bool locked) {
assert(!hr->isHumongous(), "this is only for non-humongous regions"); assert(!hr->isHumongous(), "this is only for non-humongous regions");
assert(!hr->is_empty(), "the region should not be empty"); assert(!hr->is_empty(), "the region should not be empty");
assert(free_list != NULL, "pre-condition"); assert(free_list != NULL, "pre-condition");
...@@ -5984,7 +5936,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr, ...@@ -5984,7 +5936,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
if (!hr->is_young()) { if (!hr->is_young()) {
_cg1r->hot_card_cache()->reset_card_counts(hr); _cg1r->hot_card_cache()->reset_card_counts(hr);
} }
hr->hr_clear(par, true /* clear_space */); hr->hr_clear(par, true /* clear_space */, locked /* locked */);
free_list->add_as_head(hr); free_list->add_as_head(hr);
} }
...@@ -6193,7 +6145,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e ...@@ -6193,7 +6145,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
} }
} }
rs_lengths += cur->rem_set()->occupied(); rs_lengths += cur->rem_set()->occupied_locked();
HeapRegion* next = cur->next_in_collection_set(); HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS"); assert(cur->in_collection_set(), "bad CS");
...@@ -6227,7 +6179,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e ...@@ -6227,7 +6179,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
// And the region is empty. // And the region is empty.
assert(!used_mr.is_empty(), "Should not have empty regions in a CS."); assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
pre_used += cur->used(); pre_used += cur->used();
free_region(cur, &local_free_list, false /* par */); free_region(cur, &local_free_list, false /* par */, true /* locked */);
} else { } else {
cur->uninstall_surv_rate_group(); cur->uninstall_surv_rate_group();
if (cur->is_young()) { if (cur->is_young()) {
...@@ -6810,6 +6762,13 @@ void G1CollectedHeap::migrate_strong_code_roots() { ...@@ -6810,6 +6762,13 @@ void G1CollectedHeap::migrate_strong_code_roots() {
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms); g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
} }
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
// Mark all the code roots that point into regions *not* in the // Mark all the code roots that point into regions *not* in the
// collection set. // collection set.
// //
...@@ -6880,7 +6839,7 @@ public: ...@@ -6880,7 +6839,7 @@ public:
// Code roots should never be attached to a continuation of a humongous region // Code roots should never be attached to a continuation of a humongous region
assert(hrrs->strong_code_roots_list_length() == 0, assert(hrrs->strong_code_roots_list_length() == 0,
err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
" starting at "HR_FORMAT", but has "INT32_FORMAT, " starting at "HR_FORMAT", but has "SIZE_FORMAT,
HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
hrrs->strong_code_roots_list_length())); hrrs->strong_code_roots_list_length()));
return false; return false;
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -763,9 +763,12 @@ public: ...@@ -763,9 +763,12 @@ public:
// list later). The used bytes of freed regions are accumulated in // list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed // pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later. // up. The assumption is that this will be done later.
// The locked parameter indicates if the caller has already taken
// care of proper synchronization. This may allow some optimizations.
void free_region(HeapRegion* hr, void free_region(HeapRegion* hr,
FreeRegionList* free_list, FreeRegionList* free_list,
bool par); bool par,
bool locked = false);
// Frees a humongous region by collapsing it into individual regions // Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions // and calling free_region() for each of them. The freed regions
...@@ -1647,6 +1650,9 @@ public: ...@@ -1647,6 +1650,9 @@ public:
// that were not successfullly evacuated are not migrated. // that were not successfullly evacuated are not migrated.
void migrate_strong_code_roots(); void migrate_strong_code_roots();
// Free up superfluous code root memory.
void purge_code_root_memory();
// During an initial mark pause, mark all the code roots that // During an initial mark pause, mark all the code roots that
// point into regions *not* in the collection set. // point into regions *not* in the collection set.
void mark_strong_code_roots(uint worker_id); void mark_strong_code_roots(uint worker_id);
...@@ -1659,6 +1665,8 @@ public: ...@@ -1659,6 +1665,8 @@ public:
// in symbol table, possibly in parallel. // in symbol table, possibly in parallel.
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
// Redirty logged cards in the refinement queue.
void redirty_logged_cards();
// Verification // Verification
// The following is just to alert the verification code // The following is just to alert the verification code
...@@ -1785,8 +1793,6 @@ protected: ...@@ -1785,8 +1793,6 @@ protected:
size_t _undo_waste; size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl; OopsInHeapRegionClosure* _evac_failure_cl;
G1ParScanHeapEvacClosure* _evac_cl;
G1ParScanPartialArrayClosure* _partial_scan_cl;
int _hash_seed; int _hash_seed;
uint _queue_num; uint _queue_num;
...@@ -1914,14 +1920,6 @@ public: ...@@ -1914,14 +1920,6 @@ public:
return _evac_failure_cl; return _evac_failure_cl;
} }
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
_evac_cl = evac_cl;
}
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
_partial_scan_cl = partial_scan_cl;
}
int* hash_seed() { return &_hash_seed; } int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; } uint queue_num() { return _queue_num; }
...@@ -1969,19 +1967,121 @@ public: ...@@ -1969,19 +1967,121 @@ public:
false /* retain */); false /* retain */);
} }
} }
private:
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
// We never encode partial array oops as narrowOop*, so return false immediately.
// This allows the compiler to create optimized code when popping references from
// the work queue.
inline bool has_partial_array_mask(narrowOop* ref) const {
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
return false;
}
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
// We always encode partial arrays as regular oop, to allow the
// specialization for has_partial_array_mask() for narrowOops above.
// This means that unintentional use of this method with narrowOops are caught
// by the compiler.
inline oop* set_partial_array_mask(oop obj) const {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
inline oop clear_partial_array_mask(oop* ref) const {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
void do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
public:
oop copy_to_survivor_space(oop const obj); oop copy_to_survivor_space(oop const obj);
template <class T> void deal_with_reference(T* ref_to_scan) { template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) { if (!has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan);
} else {
// Note: we can use "raw" versions of "region_containing" because // Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a // "obj_to_scan" is definitely in the heap, and is not in a
// humongous region. // humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
_evac_cl->set_region(r); do_oop_evac(ref_to_scan, r);
_evac_cl->do_oop_nv(ref_to_scan); } else {
do_oop_partial_array((oop*)ref_to_scan);
} }
} }
......
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -250,6 +250,9 @@ double G1GCPhaseTimes::accounted_time_ms() { ...@@ -250,6 +250,9 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Strong code root migration time // Strong code root migration time
misc_time_ms += _cur_strong_code_root_migration_time_ms; misc_time_ms += _cur_strong_code_root_migration_time_ms;
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
// Subtract the time taken to clean the card table from the // Subtract the time taken to clean the card table from the
// current value of "other time" // current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms; misc_time_ms += _cur_clear_ct_time_ms;
...@@ -299,20 +302,38 @@ void G1GCPhaseTimes::print(double pause_time_sec) { ...@@ -299,20 +302,38 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
} }
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms); print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms); print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms(); double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
print_stats(1, "Other", misc_time_ms); print_stats(1, "Other", misc_time_ms);
if (_cur_verify_before_time_ms > 0.0) { if (_cur_verify_before_time_ms > 0.0) {
print_stats(2, "Verify Before", _cur_verify_before_time_ms); print_stats(2, "Verify Before", _cur_verify_before_time_ms);
} }
if (G1CollectedHeap::heap()->evacuation_failed()) {
double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
_cur_evac_fail_restore_remsets;
print_stats(2, "Evacuation Failure", evac_fail_handling);
if (G1Log::finest()) {
print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);
print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);
print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);
}
}
print_stats(2, "Choose CSet", print_stats(2, "Choose CSet",
(_recorded_young_cset_choice_time_ms + (_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms)); _recorded_non_young_cset_choice_time_ms));
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms); print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
if (G1DeferredRSUpdate) {
print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
}
print_stats(2, "Free CSet", print_stats(2, "Free CSet",
(_recorded_young_free_cset_time_ms + (_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms)); _recorded_non_young_free_cset_time_ms));
if (G1Log::finest()) {
print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);
print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);
}
if (_cur_verify_after_time_ms > 0.0) { if (_cur_verify_after_time_ms > 0.0) {
print_stats(2, "Verify After", _cur_verify_after_time_ms); print_stats(2, "Verify After", _cur_verify_after_time_ms);
} }
......
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -131,6 +131,11 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> { ...@@ -131,6 +131,11 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_collection_par_time_ms; double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms; double _cur_collection_code_root_fixup_time_ms;
double _cur_strong_code_root_migration_time_ms; double _cur_strong_code_root_migration_time_ms;
double _cur_strong_code_root_purge_time_ms;
double _cur_evac_fail_recalc_used;
double _cur_evac_fail_restore_remsets;
double _cur_evac_fail_remove_self_forwards;
double _cur_clear_ct_time_ms; double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms; double _cur_ref_proc_time_ms;
...@@ -142,6 +147,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> { ...@@ -142,6 +147,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_young_cset_choice_time_ms; double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms; double _recorded_non_young_cset_choice_time_ms;
double _recorded_redirty_logged_cards_time_ms;
double _recorded_young_free_cset_time_ms; double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms; double _recorded_non_young_free_cset_time_ms;
...@@ -223,6 +230,22 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> { ...@@ -223,6 +230,22 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_strong_code_root_migration_time_ms = ms; _cur_strong_code_root_migration_time_ms = ms;
} }
void record_strong_code_root_purge_time(double ms) {
_cur_strong_code_root_purge_time_ms = ms;
}
void record_evac_fail_recalc_used_time(double ms) {
_cur_evac_fail_recalc_used = ms;
}
void record_evac_fail_restore_remsets(double ms) {
_cur_evac_fail_restore_remsets = ms;
}
void record_evac_fail_remove_self_forwards(double ms) {
_cur_evac_fail_remove_self_forwards = ms;
}
void record_ref_proc_time(double ms) { void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms; _cur_ref_proc_time_ms = ms;
} }
...@@ -251,6 +274,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> { ...@@ -251,6 +274,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_non_young_cset_choice_time_ms = time_ms; _recorded_non_young_cset_choice_time_ms = time_ms;
} }
void record_redirty_logged_cards_time_ms(double time_ms) {
_recorded_redirty_logged_cards_time_ms = time_ms;
}
void record_cur_collection_start_sec(double time_ms) { void record_cur_collection_start_sec(double time_ms) {
_cur_collection_start_sec = time_ms; _cur_collection_start_sec = time_ms;
} }
......
...@@ -80,53 +80,6 @@ public: ...@@ -80,53 +80,6 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
}; };
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
// We never encode partial array oops as narrowOop*, so return false immediately.
// This allows the compiler to create optimized code when popping references from
// the work queue.
inline bool has_partial_array_mask(narrowOop* ref) {
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
return false;
}
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
// We always encode partial arrays as regular oop, to allow the
// specialization for has_partial_array_mask() for narrowOops above.
// This means that unintentional use of this method with narrowOops are caught
// by the compiler.
inline oop* set_partial_array_mask(oop obj) {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
template <class T> inline oop clear_partial_array_mask(T* ref) {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
G1ParScanClosure _scanner;
public:
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
{
assert(_ref_processor == NULL, "sanity");
}
G1ParScanClosure* scanner() {
return &_scanner;
}
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
// Add back base class for metadata // Add back base class for metadata
class G1ParCopyHelper : public G1ParClosureSuper { class G1ParCopyHelper : public G1ParClosureSuper {
protected: protected:
...@@ -173,15 +126,8 @@ typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure; ...@@ -173,15 +126,8 @@ typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure; typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure; typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
// The following closure type is defined in g1_specialized_oop_closures.hpp:
//
// typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
// We use a separate closure to handle references during evacuation // We use a separate closure to handle references during evacuation
// failure processing. // failure processing.
// We could have used another instance of G1ParScanHeapEvacClosure
// (since that closure no longer assumes that the references it
// handles point into the collection set).
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure; typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
......
...@@ -463,8 +463,9 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() { ...@@ -463,8 +463,9 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
if (_g1->evacuation_failed()) { if (_g1->evacuation_failed()) {
// Restore remembered sets for the regions pointing into the collection set. double restore_remembered_set_start = os::elapsedTime();
// Restore remembered sets for the regions pointing into the collection set.
if (G1DeferredRSUpdate) { if (G1DeferredRSUpdate) {
// If deferred RS updates are enabled then we just need to transfer // If deferred RS updates are enabled then we just need to transfer
// the completed buffers from (a) the DirtyCardQueueSet used to hold // the completed buffers from (a) the DirtyCardQueueSet used to hold
...@@ -483,6 +484,8 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() { ...@@ -483,6 +484,8 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
} }
assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers"); assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
} }
_g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
} }
// Free any completed buffers in the DirtyCardQueueSet used to hold cards // Free any completed buffers in the DirtyCardQueueSet used to hold cards
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -285,6 +285,10 @@ ...@@ -285,6 +285,10 @@
product(uintx, G1MixedGCCountTarget, 8, \ product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \ "The target number of mixed GCs after a marking cycle.") \
\ \
experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
"The amount of code root chunks that should be kept at most " \
"as percentage of already allocated.") \
\
experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \ experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \ "An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.") \ "as a percentage of the heap size.") \
......
...@@ -43,8 +43,6 @@ class G1ParCopyClosure; ...@@ -43,8 +43,6 @@ class G1ParCopyClosure;
class G1ParScanClosure; class G1ParScanClosure;
class G1ParPushHeapRSClosure; class G1ParPushHeapRSClosure;
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure; class FilterIntoCSClosure;
class FilterOutOfRegionClosure; class FilterOutOfRegionClosure;
class G1CMOopClosure; class G1CMOopClosure;
...@@ -61,7 +59,6 @@ class G1UpdateRSOrPushRefOopClosure; ...@@ -61,7 +59,6 @@ class G1UpdateRSOrPushRefOopClosure;
#endif #endif
#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ #define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
f(G1ParScanHeapEvacClosure,_nv) \
f(G1ParScanClosure,_nv) \ f(G1ParScanClosure,_nv) \
f(G1ParPushHeapRSClosure,_nv) \ f(G1ParPushHeapRSClosure,_nv) \
f(FilterIntoCSClosure,_nv) \ f(FilterIntoCSClosure,_nv) \
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -205,7 +205,7 @@ void HeapRegion::reset_after_compaction() { ...@@ -205,7 +205,7 @@ void HeapRegion::reset_after_compaction() {
init_top_at_mark_start(); init_top_at_mark_start();
} }
void HeapRegion::hr_clear(bool par, bool clear_space) { void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
assert(_humongous_type == NotHumongous, assert(_humongous_type == NotHumongous,
"we should have already filtered out humongous regions"); "we should have already filtered out humongous regions");
assert(_humongous_start_region == NULL, assert(_humongous_start_region == NULL,
...@@ -223,7 +223,11 @@ void HeapRegion::hr_clear(bool par, bool clear_space) { ...@@ -223,7 +223,11 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if (!par) { if (!par) {
// If this is parallel, this will be done later. // If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set(); HeapRegionRemSet* hrrs = rem_set();
hrrs->clear(); if (locked) {
hrrs->clear_locked();
} else {
hrrs->clear();
}
_claimed = InitialClaimValue; _claimed = InitialClaimValue;
} }
zero_marked_bytes(); zero_marked_bytes();
...@@ -710,14 +714,14 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const ...@@ -710,14 +714,14 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
} }
HeapRegionRemSet* hrrs = rem_set(); HeapRegionRemSet* hrrs = rem_set();
int strong_code_roots_length = hrrs->strong_code_roots_list_length(); size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
// if this region is empty then there should be no entries // if this region is empty then there should be no entries
// on its strong code root list // on its strong code root list
if (is_empty()) { if (is_empty()) {
if (strong_code_roots_length > 0) { if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
"but has "INT32_FORMAT" code root entries", "but has "SIZE_FORMAT" code root entries",
bottom(), end(), strong_code_roots_length); bottom(), end(), strong_code_roots_length);
*failures = true; *failures = true;
} }
...@@ -727,7 +731,7 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const ...@@ -727,7 +731,7 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
if (continuesHumongous()) { if (continuesHumongous()) {
if (strong_code_roots_length > 0) { if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
"region but has "INT32_FORMAT" code root entries", "region but has "SIZE_FORMAT" code root entries",
HR_FORMAT_PARAMS(this), strong_code_roots_length); HR_FORMAT_PARAMS(this), strong_code_roots_length);
*failures = true; *failures = true;
} }
......
...@@ -596,7 +596,7 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -596,7 +596,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
void save_marks(); void save_marks();
// Reset HR stuff to default values. // Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space); void hr_clear(bool par, bool clear_space, bool locked = false);
void par_clear(); void par_clear();
// Get the start of the unmarked area in this region. // Get the start of the unmarked area in this region.
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/padded.inline.hpp"
#include "memory/space.inline.hpp" #include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
...@@ -259,10 +260,9 @@ size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; ...@@ -259,10 +260,9 @@ size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
size_t OtherRegionsTable::_fine_eviction_stride = 0; size_t OtherRegionsTable::_fine_eviction_stride = 0;
size_t OtherRegionsTable::_fine_eviction_sample_size = 0; size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_m(Mutex::leaf, "An OtherRegionsTable lock", true), _hr(hr), _m(m),
_hr(hr),
_coarse_map(G1CollectedHeap::heap()->max_regions(), _coarse_map(G1CollectedHeap::heap()->max_regions(),
false /* in-resource-area */), false /* in-resource-area */),
_fine_grain_regions(NULL), _fine_grain_regions(NULL),
...@@ -358,46 +358,66 @@ void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { ...@@ -358,46 +358,66 @@ void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
"just checking"); "just checking");
} }
int** OtherRegionsTable::_from_card_cache = NULL; int** FromCardCache::_cache = NULL;
size_t OtherRegionsTable::_from_card_cache_max_regions = 0; uint FromCardCache::_max_regions = 0;
size_t OtherRegionsTable::_from_card_cache_mem_size = 0; size_t FromCardCache::_static_mem_size = 0;
void OtherRegionsTable::init_from_card_cache(size_t max_regions) { void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
_from_card_cache_max_regions = max_regions; guarantee(_cache == NULL, "Should not call this multiple times");
int n_par_rs = HeapRegionRemSet::num_par_rem_sets(); _max_regions = max_num_regions;
_from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC); _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
for (int i = 0; i < n_par_rs; i++) { _max_regions,
_from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC); &_static_mem_size);
for (size_t j = 0; j < max_regions; j++) {
_from_card_cache[i][j] = -1; // An invalid value. for (uint i = 0; i < n_par_rs; i++) {
for (uint j = 0; j < _max_regions; j++) {
set(i, j, InvalidCard);
} }
} }
_from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
} }
void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) { void FromCardCache::shrink(uint new_num_regions) {
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max."); assert(new_num_regions <= _max_regions, "Must be within max.");
for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) { for (uint j = new_num_regions; j < _max_regions; j++) {
_from_card_cache[i][j] = -1; // An invalid value. set(i, j, InvalidCard);
} }
} }
} }
#ifndef PRODUCT #ifndef PRODUCT
void OtherRegionsTable::print_from_card_cache() { void FromCardCache::print(outputStream* out) {
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
for (size_t j = 0; j < _from_card_cache_max_regions; j++) { for (uint j = 0; j < _max_regions; j++) {
gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.", out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
i, j, _from_card_cache[i][j]); i, j, at(i, j));
} }
} }
} }
#endif #endif
void FromCardCache::clear(uint region_idx) {
uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
for (uint i = 0; i < num_par_remsets; i++) {
set(i, region_idx, InvalidCard);
}
}
void OtherRegionsTable::init_from_card_cache(uint max_regions) {
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}
void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
FromCardCache::shrink(new_num_regions);
}
void OtherRegionsTable::print_from_card_cache() {
FromCardCache::print();
}
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t cur_hrs_ind = (size_t) hr()->hrs_index(); uint cur_hrs_ind = hr()->hrs_index();
if (G1TraceHeapRegionRememberedSet) { if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
...@@ -410,19 +430,17 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { ...@@ -410,19 +430,17 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
if (G1TraceHeapRegionRememberedSet) { if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
hr()->bottom(), from_card, hr()->bottom(), from_card,
_from_card_cache[tid][cur_hrs_ind]); FromCardCache::at((uint)tid, cur_hrs_ind));
} }
if (from_card == _from_card_cache[tid][cur_hrs_ind]) { if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
if (G1TraceHeapRegionRememberedSet) { if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" from-card cache hit."); gclog_or_tty->print_cr(" from-card cache hit.");
} }
assert(contains_reference(from), "We just added it!"); assert(contains_reference(from), "We just added it!");
return; return;
} else {
_from_card_cache[tid][cur_hrs_ind] = from_card;
} }
// Note that this may be a continued H region. // Note that this may be a continued H region.
...@@ -442,7 +460,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { ...@@ -442,7 +460,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
PerRegionTable* prt = find_region_table(ind, from_hr); PerRegionTable* prt = find_region_table(ind, from_hr);
if (prt == NULL) { if (prt == NULL) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
// Confirm that it's really not there... // Confirm that it's really not there...
prt = find_region_table(ind, from_hr); prt = find_region_table(ind, from_hr);
if (prt == NULL) { if (prt == NULL) {
...@@ -544,7 +562,7 @@ OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { ...@@ -544,7 +562,7 @@ OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
jint OtherRegionsTable::_n_coarsenings = 0; jint OtherRegionsTable::_n_coarsenings = 0;
PerRegionTable* OtherRegionsTable::delete_region_table() { PerRegionTable* OtherRegionsTable::delete_region_table() {
assert(_m.owned_by_self(), "Precondition"); assert(_m->owned_by_self(), "Precondition");
assert(_n_fine_entries == _max_fine_entries, "Precondition"); assert(_n_fine_entries == _max_fine_entries, "Precondition");
PerRegionTable* max = NULL; PerRegionTable* max = NULL;
jint max_occ = 0; jint max_occ = 0;
...@@ -676,8 +694,6 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, ...@@ -676,8 +694,6 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
size_t OtherRegionsTable::occupied() const { size_t OtherRegionsTable::occupied() const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
size_t sum = occ_fine(); size_t sum = occ_fine();
sum += occ_sparse(); sum += occ_sparse();
sum += occ_coarse(); sum += occ_coarse();
...@@ -707,8 +723,6 @@ size_t OtherRegionsTable::occ_sparse() const { ...@@ -707,8 +723,6 @@ size_t OtherRegionsTable::occ_sparse() const {
} }
size_t OtherRegionsTable::mem_size() const { size_t OtherRegionsTable::mem_size() const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
size_t sum = 0; size_t sum = 0;
// all PRTs are of the same size so it is sufficient to query only one of them. // all PRTs are of the same size so it is sufficient to query only one of them.
if (_first_all_fine_prts != NULL) { if (_first_all_fine_prts != NULL) {
...@@ -724,7 +738,7 @@ size_t OtherRegionsTable::mem_size() const { ...@@ -724,7 +738,7 @@ size_t OtherRegionsTable::mem_size() const {
} }
size_t OtherRegionsTable::static_mem_size() { size_t OtherRegionsTable::static_mem_size() {
return _from_card_cache_mem_size; return FromCardCache::static_mem_size();
} }
size_t OtherRegionsTable::fl_mem_size() { size_t OtherRegionsTable::fl_mem_size() {
...@@ -732,14 +746,10 @@ size_t OtherRegionsTable::fl_mem_size() { ...@@ -732,14 +746,10 @@ size_t OtherRegionsTable::fl_mem_size() {
} }
void OtherRegionsTable::clear_fcc() { void OtherRegionsTable::clear_fcc() {
size_t hrs_idx = hr()->hrs_index(); FromCardCache::clear(hr()->hrs_index());
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
_from_card_cache[i][hrs_idx] = -1;
}
} }
void OtherRegionsTable::clear() { void OtherRegionsTable::clear() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
// if there are no entries, skip this step // if there are no entries, skip this step
if (_first_all_fine_prts != NULL) { if (_first_all_fine_prts != NULL) {
guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking"); guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
...@@ -759,7 +769,7 @@ void OtherRegionsTable::clear() { ...@@ -759,7 +769,7 @@ void OtherRegionsTable::clear() {
} }
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
size_t hrs_ind = (size_t) from_hr->hrs_index(); size_t hrs_ind = (size_t) from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask; size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) { if (del_single_region_table(ind, from_hr)) {
...@@ -768,15 +778,15 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { ...@@ -768,15 +778,15 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map.par_at_put(hrs_ind, 0); _coarse_map.par_at_put(hrs_ind, 0);
} }
// Check to see if any of the fcc entries come from here. // Check to see if any of the fcc entries come from here.
size_t hr_ind = (size_t) hr()->hrs_index(); uint hr_ind = hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind]; int fcc_ent = FromCardCache::at(tid, hr_ind);
if (fcc_ent != -1) { if (fcc_ent != FromCardCache::InvalidCard) {
HeapWord* card_addr = (HeapWord*) HeapWord* card_addr = (HeapWord*)
(uintptr_t(fcc_ent) << CardTableModRefBS::card_shift); (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
if (hr()->is_in_reserved(card_addr)) { if (hr()->is_in_reserved(card_addr)) {
// Clear the from card cache. // Clear the from card cache.
_from_card_cache[tid][hr_ind] = -1; FromCardCache::set(tid, hr_ind, FromCardCache::InvalidCard);
} }
} }
} }
...@@ -805,7 +815,7 @@ bool OtherRegionsTable::del_single_region_table(size_t ind, ...@@ -805,7 +815,7 @@ bool OtherRegionsTable::del_single_region_table(size_t ind,
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
// Cast away const in this case. // Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
return contains_reference_locked(from); return contains_reference_locked(from);
} }
...@@ -832,8 +842,6 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const ...@@ -832,8 +842,6 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
"Must be in range."); "Must be in range.");
return _sparse_table.contains_card(hr_ind, card_index); return _sparse_table.contains_card(hr_ind, card_index);
} }
} }
void void
...@@ -844,13 +852,15 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { ...@@ -844,13 +852,15 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
// Determines how many threads can add records to an rset in parallel. // Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the // This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads. // concurrent refinement threads or GC threads.
int HeapRegionRemSet::num_par_rem_sets() { uint HeapRegionRemSet::num_par_rem_sets() {
return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); return (uint)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
} }
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr) HeapRegion* hr)
: _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) { : _bosa(bosa),
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
_code_roots(), _other_regions(hr, &_m) {
reset_for_par_iteration(); reset_for_par_iteration();
} }
...@@ -883,7 +893,7 @@ bool HeapRegionRemSet::iter_is_complete() { ...@@ -883,7 +893,7 @@ bool HeapRegionRemSet::iter_is_complete() {
} }
#ifndef PRODUCT #ifndef PRODUCT
void HeapRegionRemSet::print() const { void HeapRegionRemSet::print() {
HeapRegionRemSetIterator iter(this); HeapRegionRemSetIterator iter(this);
size_t card_index; size_t card_index;
while (iter.has_next(card_index)) { while (iter.has_next(card_index)) {
...@@ -909,14 +919,14 @@ void HeapRegionRemSet::cleanup() { ...@@ -909,14 +919,14 @@ void HeapRegionRemSet::cleanup() {
} }
void HeapRegionRemSet::clear() { void HeapRegionRemSet::clear() {
if (_strong_code_roots_list != NULL) { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
delete _strong_code_roots_list; clear_locked();
} }
_strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
GrowableArray<nmethod*>(10, 0, NULL, true);
void HeapRegionRemSet::clear_locked() {
_code_roots.clear();
_other_regions.clear(); _other_regions.clear();
assert(occupied() == 0, "Should be clear."); assert(occupied_locked() == 0, "Should be clear.");
reset_for_par_iteration(); reset_for_par_iteration();
} }
...@@ -932,27 +942,18 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, ...@@ -932,27 +942,18 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
_other_regions.scrub(ctbs, region_bm, card_bm); _other_regions.scrub(ctbs, region_bm, card_bm);
} }
// Code roots support // Code roots support
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity"); assert(nm != NULL, "sanity");
// Search for the code blob from the RHS to avoid _code_roots.add(nm);
// duplicate entries as much as possible
if (_strong_code_roots_list->find_from_end(nm) < 0) {
// Code blob isn't already in the list
_strong_code_roots_list->push(nm);
}
} }
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity"); assert(nm != NULL, "sanity");
int idx = _strong_code_roots_list->find(nm); _code_roots.remove(nm);
if (idx >= 0) {
_strong_code_roots_list->remove_at(idx);
}
// Check that there were no duplicates // Check that there were no duplicates
guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found"); guarantee(!_code_roots.contains(nm), "duplicate entry found");
} }
class NMethodMigrationOopClosure : public OopClosure { class NMethodMigrationOopClosure : public OopClosure {
...@@ -1014,8 +1015,8 @@ void HeapRegionRemSet::migrate_strong_code_roots() { ...@@ -1014,8 +1015,8 @@ void HeapRegionRemSet::migrate_strong_code_roots() {
GrowableArray<nmethod*> to_be_retained(10); GrowableArray<nmethod*> to_be_retained(10);
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
while (_strong_code_roots_list->is_nonempty()) { while (!_code_roots.is_empty()) {
nmethod *nm = _strong_code_roots_list->pop(); nmethod *nm = _code_roots.pop();
if (nm != NULL) { if (nm != NULL) {
NMethodMigrationOopClosure oop_cl(g1h, hr(), nm); NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
nm->oops_do(&oop_cl); nm->oops_do(&oop_cl);
...@@ -1038,20 +1039,16 @@ void HeapRegionRemSet::migrate_strong_code_roots() { ...@@ -1038,20 +1039,16 @@ void HeapRegionRemSet::migrate_strong_code_roots() {
} }
void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
for (int i = 0; i < _strong_code_roots_list->length(); i += 1) { _code_roots.nmethods_do(blk);
nmethod* nm = _strong_code_roots_list->at(i);
blk->do_code_blob(nm);
}
} }
size_t HeapRegionRemSet::strong_code_roots_mem_size() { size_t HeapRegionRemSet::strong_code_roots_mem_size() {
return sizeof(GrowableArray<nmethod*>) + return _code_roots.mem_size();
_strong_code_roots_list->max_length() * sizeof(nmethod*);
} }
//-------------------- Iteration -------------------- //-------------------- Iteration --------------------
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
_hrrs(hrrs), _hrrs(hrrs),
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_coarse_map(&hrrs->_other_regions._coarse_map), _coarse_map(&hrrs->_other_regions._coarse_map),
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#include "gc_implementation/g1/sparsePRT.hpp" #include "gc_implementation/g1/sparsePRT.hpp"
// Remembered set for a heap region. Represent a set of "cards" that // Remembered set for a heap region. Represent a set of "cards" that
...@@ -44,6 +45,54 @@ class nmethod; ...@@ -44,6 +45,54 @@ class nmethod;
class HRRSCleanupTask : public SparsePRTCleanupTask { class HRRSCleanupTask : public SparsePRTCleanupTask {
}; };
// The FromCardCache remembers the most recently processed card on the heap on
// a per-region and per-thread basis.
class FromCardCache : public AllStatic {
private:
// Array of card indices. Indexed by thread X and heap region to minimize
// thread contention.
static int** _cache;
static uint _max_regions;
static size_t _static_mem_size;
public:
enum {
InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used.
};
static void clear(uint region_idx);
// Returns true if the given card is in the cache at the given location, or
// replaces the card at that location and returns false.
static bool contains_or_replace(uint worker_id, uint region_idx, int card) {
int card_in_cache = at(worker_id, region_idx);
if (card_in_cache == card) {
return true;
} else {
set(worker_id, region_idx, card);
return false;
}
}
static int at(uint worker_id, uint region_idx) {
return _cache[worker_id][region_idx];
}
static void set(uint worker_id, uint region_idx, int val) {
_cache[worker_id][region_idx] = val;
}
static void initialize(uint n_par_rs, uint max_num_regions);
static void shrink(uint new_num_regions);
static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
static size_t static_mem_size() {
return _static_mem_size;
}
};
// The "_coarse_map" is a bitmap with one bit for each region, where set // The "_coarse_map" is a bitmap with one bit for each region, where set
// bits indicate that the corresponding region may contain some pointer // bits indicate that the corresponding region may contain some pointer
// into the owning region. // into the owning region.
...@@ -72,7 +121,7 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { ...@@ -72,7 +121,7 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
friend class HeapRegionRemSetIterator; friend class HeapRegionRemSetIterator;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
Mutex _m; Mutex* _m;
HeapRegion* _hr; HeapRegion* _hr;
// These are protected by "_m". // These are protected by "_m".
...@@ -118,18 +167,13 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { ...@@ -118,18 +167,13 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
// false. // false.
bool del_single_region_table(size_t ind, HeapRegion* hr); bool del_single_region_table(size_t ind, HeapRegion* hr);
// Indexed by thread X heap region, to minimize thread contention.
static int** _from_card_cache;
static size_t _from_card_cache_max_regions;
static size_t _from_card_cache_mem_size;
// link/add the given fine grain remembered set into the "all" list // link/add the given fine grain remembered set into the "all" list
void link_to_all(PerRegionTable * prt); void link_to_all(PerRegionTable * prt);
// unlink/remove the given fine grain remembered set into the "all" list // unlink/remove the given fine grain remembered set into the "all" list
void unlink_from_all(PerRegionTable * prt); void unlink_from_all(PerRegionTable * prt);
public: public:
OtherRegionsTable(HeapRegion* hr); OtherRegionsTable(HeapRegion* hr, Mutex* m);
HeapRegion* hr() const { return _hr; } HeapRegion* hr() const { return _hr; }
...@@ -141,7 +185,6 @@ public: ...@@ -141,7 +185,6 @@ public:
// objects. // objects.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
// Not const because it takes a lock.
size_t occupied() const; size_t occupied() const;
size_t occ_fine() const; size_t occ_fine() const;
size_t occ_coarse() const; size_t occ_coarse() const;
...@@ -170,11 +213,11 @@ public: ...@@ -170,11 +213,11 @@ public:
// Declare the heap size (in # of regions) to the OtherRegionsTable. // Declare the heap size (in # of regions) to the OtherRegionsTable.
// (Uses it to initialize from_card_cache). // (Uses it to initialize from_card_cache).
static void init_from_card_cache(size_t max_regions); static void init_from_card_cache(uint max_regions);
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use. // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
// Make sure any entries for higher regions are invalid. // Make sure any entries for higher regions are invalid.
static void shrink_from_card_cache(size_t new_n_regs); static void shrink_from_card_cache(uint new_num_regions);
static void print_from_card_cache(); static void print_from_card_cache();
}; };
...@@ -192,9 +235,11 @@ private: ...@@ -192,9 +235,11 @@ private:
G1BlockOffsetSharedArray* _bosa; G1BlockOffsetSharedArray* _bosa;
G1BlockOffsetSharedArray* bosa() const { return _bosa; } G1BlockOffsetSharedArray* bosa() const { return _bosa; }
// A list of code blobs (nmethods) whose code contains pointers into // A set of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet. // the region that owns this RSet.
GrowableArray<nmethod*>* _strong_code_roots_list; G1CodeRootSet _code_roots;
Mutex _m;
OtherRegionsTable _other_regions; OtherRegionsTable _other_regions;
...@@ -218,17 +263,20 @@ private: ...@@ -218,17 +263,20 @@ private:
static void print_event(outputStream* str, Event evnt); static void print_event(outputStream* str, Event evnt);
public: public:
HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr);
HeapRegion* hr);
static int num_par_rem_sets(); static uint num_par_rem_sets();
static void setup_remset_size(); static void setup_remset_size();
HeapRegion* hr() const { HeapRegion* hr() const {
return _other_regions.hr(); return _other_regions.hr();
} }
size_t occupied() const { size_t occupied() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
return occupied_locked();
}
size_t occupied_locked() {
return _other_regions.occupied(); return _other_regions.occupied();
} }
size_t occ_fine() const { size_t occ_fine() const {
...@@ -260,6 +308,7 @@ public: ...@@ -260,6 +308,7 @@ public:
// The region is being reclaimed; clear its remset, and any mention of // The region is being reclaimed; clear its remset, and any mention of
// entries for this region in other remsets. // entries for this region in other remsets.
void clear(); void clear();
void clear_locked();
// Attempt to claim the region. Returns true iff this call caused an // Attempt to claim the region. Returns true iff this call caused an
// atomic transition from Unclaimed to Claimed. // atomic transition from Unclaimed to Claimed.
...@@ -289,6 +338,7 @@ public: ...@@ -289,6 +338,7 @@ public:
// The actual # of bytes this hr_remset takes up. // The actual # of bytes this hr_remset takes up.
// Note also includes the strong code root set. // Note also includes the strong code root set.
size_t mem_size() { size_t mem_size() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
return _other_regions.mem_size() return _other_regions.mem_size()
// This correction is necessary because the above includes the second // This correction is necessary because the above includes the second
// part. // part.
...@@ -299,13 +349,13 @@ public: ...@@ -299,13 +349,13 @@ public:
// Returns the memory occupancy of all static data structures associated // Returns the memory occupancy of all static data structures associated
// with remembered sets. // with remembered sets.
static size_t static_mem_size() { static size_t static_mem_size() {
return OtherRegionsTable::static_mem_size(); return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
} }
// Returns the memory occupancy of all free_list data structures associated // Returns the memory occupancy of all free_list data structures associated
// with remembered sets. // with remembered sets.
static size_t fl_mem_size() { static size_t fl_mem_size() {
return OtherRegionsTable::fl_mem_size(); return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size();
} }
bool contains_reference(OopOrNarrowOopStar from) const { bool contains_reference(OopOrNarrowOopStar from) const {
...@@ -328,21 +378,21 @@ public: ...@@ -328,21 +378,21 @@ public:
void strong_code_roots_do(CodeBlobClosure* blk) const; void strong_code_roots_do(CodeBlobClosure* blk) const;
// Returns the number of elements in the strong code roots list // Returns the number of elements in the strong code roots list
int strong_code_roots_list_length() { size_t strong_code_roots_list_length() {
return _strong_code_roots_list->length(); return _code_roots.length();
} }
// Returns true if the strong code roots contains the given // Returns true if the strong code roots contains the given
// nmethod. // nmethod.
bool strong_code_roots_list_contains(nmethod* nm) { bool strong_code_roots_list_contains(nmethod* nm) {
return _strong_code_roots_list->contains(nm); return _code_roots.contains(nm);
} }
// Returns the amount of memory, in bytes, currently // Returns the amount of memory, in bytes, currently
// consumed by the strong code roots. // consumed by the strong code roots.
size_t strong_code_roots_mem_size(); size_t strong_code_roots_mem_size();
void print() const; void print() PRODUCT_RETURN;
// Called during a stop-world phase to perform any deferred cleanups. // Called during a stop-world phase to perform any deferred cleanups.
static void cleanup(); static void cleanup();
...@@ -350,12 +400,13 @@ public: ...@@ -350,12 +400,13 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache). // (Uses it to initialize from_card_cache).
static void init_heap(uint max_regions) { static void init_heap(uint max_regions) {
OtherRegionsTable::init_from_card_cache((size_t) max_regions); G1CodeRootSet::initialize();
OtherRegionsTable::init_from_card_cache(max_regions);
} }
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use. // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
static void shrink_heap(uint new_n_regs) { static void shrink_heap(uint new_n_regs) {
OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs); OtherRegionsTable::shrink_from_card_cache(new_n_regs);
} }
#ifndef PRODUCT #ifndef PRODUCT
...@@ -384,7 +435,7 @@ public: ...@@ -384,7 +435,7 @@ public:
class HeapRegionRemSetIterator : public StackObj { class HeapRegionRemSetIterator : public StackObj {
// The region RSet over which we're iterating. // The region RSet over which we're iterating.
const HeapRegionRemSet* _hrrs; HeapRegionRemSet* _hrrs;
// Local caching of HRRS fields. // Local caching of HRRS fields.
const BitMap* _coarse_map; const BitMap* _coarse_map;
...@@ -441,7 +492,7 @@ class HeapRegionRemSetIterator : public StackObj { ...@@ -441,7 +492,7 @@ class HeapRegionRemSetIterator : public StackObj {
public: public:
// We require an iterator to be initialized before use, so the // We require an iterator to be initialized before use, so the
// constructor does little. // constructor does little.
HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs); HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
// If there remains one or more cards to be yielded, returns true and // If there remains one or more cards to be yielded, returns true and
// sets "card_index" to one of those cards (which is then considered // sets "card_index" to one of those cards (which is then considered
......
/* /*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks // Free list. A FreeList is used to access a linked list of chunks
...@@ -332,4 +333,5 @@ template class FreeList<Metablock>; ...@@ -332,4 +333,5 @@ template class FreeList<Metablock>;
template class FreeList<Metachunk>; template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>; template class FreeList<FreeChunk>;
template class FreeList<G1CodeRootChunk>;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -90,4 +90,23 @@ class PaddedArray { ...@@ -90,4 +90,23 @@ class PaddedArray {
static PaddedEnd<T>* create_unfreeable(uint length); static PaddedEnd<T>* create_unfreeable(uint length);
}; };
// Helper class to create an array of references to arrays of primitive types
// Both the array of references and the data arrays are aligned to the given
// alignment. The allocated memory is zero-filled.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded2DArray {
public:
// Creates an aligned padded 2D array.
// The memory cannot be deleted since the raw memory chunk is not returned.
static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL);
};
// Helper class to create an array of T objects. The array as a whole will
// start at a multiple of alignment and its size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class PaddedPrimitiveArray {
public:
static T* create_unfreeable(size_t length);
};
#endif // SHARE_VM_MEMORY_PADDED_HPP #endif // SHARE_VM_MEMORY_PADDED_HPP
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -47,3 +47,42 @@ PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) { ...@@ -47,3 +47,42 @@ PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
return aligned_padded_array; return aligned_padded_array;
} }
template <class T, MEMFLAGS flags, size_t alignment>
T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
// Calculate and align the size of the first dimension's table.
size_t table_size = align_size_up_(rows * sizeof(T*), alignment);
// The size of the separate rows.
size_t row_size = align_size_up_(columns * sizeof(T), alignment);
// Total size consists of the indirection table plus the rows.
size_t total_size = table_size + rows * row_size + alignment;
// Allocate a chunk of memory large enough to allow alignment of the chunk.
void* chunk = AllocateHeap(total_size, flags);
// Clear the allocated memory.
memset(chunk, 0, total_size);
// Align the chunk of memory.
T** result = (T**)align_pointer_up(chunk, alignment);
void* data_start = (void*)((uintptr_t)result + table_size);
// Fill in the row table.
for (size_t i = 0; i < rows; i++) {
result[i] = (T*)((uintptr_t)data_start + i * row_size);
}
if (allocation_size != NULL) {
*allocation_size = total_size;
}
return result;
}
template <class T, MEMFLAGS flags, size_t alignment>
T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags);
memset(chunk, 0, length * sizeof(T) + alignment);
return (T*)align_pointer_up(chunk, alignment);
}
...@@ -5083,6 +5083,7 @@ void TestVirtualSpaceNode_test(); ...@@ -5083,6 +5083,7 @@ void TestVirtualSpaceNode_test();
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
void TestOldFreeSpaceCalculation_test(); void TestOldFreeSpaceCalculation_test();
void TestG1BiasedArray_test(); void TestG1BiasedArray_test();
void TestCodeCacheRemSet_test();
#endif #endif
void execute_internal_vm_tests() { void execute_internal_vm_tests() {
...@@ -5108,6 +5109,7 @@ void execute_internal_vm_tests() { ...@@ -5108,6 +5109,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestOldFreeSpaceCalculation_test()); run_unit_test(TestOldFreeSpaceCalculation_test());
run_unit_test(TestG1BiasedArray_test()); run_unit_test(TestG1BiasedArray_test());
run_unit_test(HeapRegionRemSet::test_prt()); run_unit_test(HeapRegionRemSet::test_prt());
run_unit_test(TestCodeCacheRemSet_test());
#endif #endif
tty->print_cr("All internal VM tests passed"); tty->print_cr("All internal VM tests passed");
} }
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestPrintGCDetails
* @bug 8035406 8027295 8035398
* @summary Ensure that the PrintGCDetails output for a minor GC with G1
* includes the expected necessary messages.
* @key gc
* @library /testlibrary
*/
import com.oracle.java.testlibrary.ProcessTools;
import com.oracle.java.testlibrary.OutputAnalyzer;
public class TestGCLogMessages {
public static void main(String[] args) throws Exception {
testNormalLogs();
testWithToSpaceExhaustionLogs();
}
private static void testNormalLogs() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
GCTest.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("[Redirty Cards");
output.shouldNotContain("[Code Root Purge");
output.shouldNotContain("[Young Free CSet");
output.shouldNotContain("[Non-Young Free CSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-XX:+PrintGCDetails",
GCTest.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Redirty Cards");
output.shouldContain("[Code Root Purge");
output.shouldNotContain("[Young Free CSet");
output.shouldNotContain("[Non-Young Free CSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-XX:+PrintGCDetails",
"-XX:+UnlockExperimentalVMOptions",
"-XX:G1LogLevel=finest",
GCTest.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Redirty Cards");
output.shouldContain("[Code Root Purge");
output.shouldContain("[Young Free CSet");
output.shouldContain("[Non-Young Free CSet");
// also check evacuation failure messages once
output.shouldNotContain("[Evacuation Failure");
output.shouldNotContain("[Recalculate Used");
output.shouldNotContain("[Remove Self Forwards");
output.shouldNotContain("[Restore RemSet");
output.shouldHaveExitValue(0);
}
private static void testWithToSpaceExhaustionLogs() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-Xmn5M",
"-XX:+PrintGCDetails",
GCTestWithToSpaceExhaustion.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("[Evacuation Failure");
output.shouldNotContain("[Recalculate Used");
output.shouldNotContain("[Remove Self Forwards");
output.shouldNotContain("[Restore RemSet");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
"-Xmx10M",
"-Xmn5M",
"-XX:+PrintGCDetails",
"-XX:+UnlockExperimentalVMOptions",
"-XX:G1LogLevel=finest",
GCTestWithToSpaceExhaustion.class.getName());
output = new OutputAnalyzer(pb.start());
output.shouldContain("[Evacuation Failure");
output.shouldContain("[Recalculate Used");
output.shouldContain("[Remove Self Forwards");
output.shouldContain("[Restore RemSet");
output.shouldHaveExitValue(0);
}
static class GCTest {
private static byte[] garbage;
public static void main(String [] args) {
System.out.println("Creating garbage");
// create 128MB of garbage. This should result in at least one GC
for (int i = 0; i < 1024; i++) {
garbage = new byte[128 * 1024];
}
System.out.println("Done");
}
}
static class GCTestWithToSpaceExhaustion {
private static byte[] garbage;
private static byte[] largeObject;
public static void main(String [] args) {
largeObject = new byte[5*1024*1024];
System.out.println("Creating garbage");
// create 128MB of garbage. This should result in at least one GC,
// some of them with to-space exhaustion.
for (int i = 0; i < 1024; i++) {
garbage = new byte[128 * 1024];
}
System.out.println("Done");
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册