提交 5e6e89d1 编写于 作者: T tamao

Merge

......@@ -122,6 +122,22 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
}
};
class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
private:
const MemRegion _span;
CMSBitMap* _bitMap;
protected:
DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
......
......@@ -569,6 +569,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_restart_addr(NULL),
_overflow_list(NULL),
_stats(cmsGen),
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
_eden_chunk_array(NULL), // may be set in ctor body
_eden_chunk_capacity(0), // -- ditto --
_eden_chunk_index(0), // -- ditto --
......@@ -732,7 +733,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
// Support for parallelizing survivor space rescan
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
const size_t max_plab_samples =
((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
......@@ -2137,6 +2138,39 @@ void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
}
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
EdenSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
// Eden
if (_eden_chunk_array != NULL) {
gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
eden_space->bottom(), eden_space->top(),
eden_space->end(), eden_space->capacity());
gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
"_eden_chunk_capacity=" SIZE_FORMAT,
_eden_chunk_index, _eden_chunk_capacity);
for (size_t i = 0; i < _eden_chunk_index; i++) {
gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
i, _eden_chunk_array[i]);
}
}
// Survivor
if (_survivor_chunk_array != NULL) {
gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
from_space->bottom(), from_space->top(),
from_space->end(), from_space->capacity());
gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
"_survivor_chunk_capacity=" SIZE_FORMAT,
_survivor_chunk_index, _survivor_chunk_capacity);
for (size_t i = 0; i < _survivor_chunk_index; i++) {
gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
i, _survivor_chunk_array[i]);
}
}
}
void CMSCollector::getFreelistLocks() const {
// Get locks for all free lists in all generations that this
// collector is responsible for
......@@ -3549,6 +3583,31 @@ CMSPhaseAccounting::~CMSPhaseAccounting() {
// CMS work
// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
class CMSParMarkTask : public AbstractGangTask {
protected:
CMSCollector* _collector;
int _n_workers;
CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
AbstractGangTask(name),
_collector(collector),
_n_workers(n_workers) {}
// Work method in support of parallel rescan ... of young gen spaces
void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
ContiguousSpace* space,
HeapWord** chunk_array, size_t chunk_top);
void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
};
// Parallel initial mark task
class CMSParInitialMarkTask: public CMSParMarkTask {
public:
CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
collector, n_workers) {}
void work(uint worker_id);
};
// Checkpoint the roots into this generation from outside
// this generation. [Note this initial checkpoint need only
// be approximate -- we'll do a catch up phase subsequently.]
......@@ -3646,19 +3705,42 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// the klasses. The claimed marks need to be cleared before marking starts.
ClassLoaderDataGraph::clear_claimed_marks();
CMKlassClosure klass_closure(&notOlder);
if (CMSPrintEdenSurvivorChunks) {
print_eden_and_survivor_chunk_arrays();
}
{
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
false, // not scavenging
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
true, // walk all of code cache if (so & SO_CodeCache)
NULL,
&klass_closure);
if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
// The parallel version.
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
int n_workers = workers->active_workers();
CMSParInitialMarkTask tsk(this, n_workers);
gch->set_par_threads(n_workers);
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
if (n_workers > 1) {
GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
gch->set_par_threads(0);
} else {
// The serial version.
CMKlassClosure klass_closure(&notOlder);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
false, // not scavenging
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
true, // walk all of code cache if (so & SO_CodeCache)
NULL,
&klass_closure);
}
}
// Clear mod-union table; it will be dirtied in the prologue of
......@@ -4417,7 +4499,9 @@ void CMSCollector::preclean() {
verify_overflow_empty();
_abort_preclean = false;
if (CMSPrecleaningEnabled) {
_eden_chunk_index = 0;
if (!CMSEdenChunksRecordAlways) {
_eden_chunk_index = 0;
}
size_t used = get_eden_used();
size_t capacity = get_eden_capacity();
// Don't start sampling unless we will get sufficiently
......@@ -4526,7 +4610,9 @@ void CMSCollector::sample_eden() {
if (!_start_sampling) {
return;
}
if (_eden_chunk_array) {
// When CMSEdenChunksRecordAlways is true, the eden chunk array
// is populated by the young generation.
if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
if (_eden_chunk_index < _eden_chunk_capacity) {
_eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
......@@ -5010,6 +5096,10 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// Update the saved marks which may affect the root scans.
gch->save_marks();
if (CMSPrintEdenSurvivorChunks) {
print_eden_and_survivor_chunk_arrays();
}
{
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
......@@ -5116,10 +5206,53 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
}
}
void CMSParInitialMarkTask::work(uint worker_id) {
elapsedTimer _timer;
ResourceMark rm;
HandleMark hm;
// ---------- scan from roots --------------
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
CMKlassClosure klass_closure(&par_mri_cl);
// ---------- young gen roots --------------
{
work_on_young_gen_roots(worker_id, &par_mri_cl);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished young gen initial mark scan work in %dth thread: %3.3f sec",
worker_id, _timer.seconds());
}
}
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
false, // not scavenging
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
&par_mri_cl,
true, // walk all of code cache if (so & SO_CodeCache)
NULL,
&klass_closure);
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
worker_id, _timer.seconds());
}
}
// Parallel remark task
class CMSParRemarkTask: public AbstractGangTask {
CMSCollector* _collector;
int _n_workers;
class CMSParRemarkTask: public CMSParMarkTask {
CompactibleFreeListSpace* _cms_space;
// The per-thread work queues, available here for stealing.
......@@ -5133,10 +5266,9 @@ class CMSParRemarkTask: public AbstractGangTask {
CompactibleFreeListSpace* cms_space,
int n_workers, FlexibleWorkGang* workers,
OopTaskQueueSet* task_queues):
AbstractGangTask("Rescan roots and grey objects in parallel"),
_collector(collector),
CMSParMarkTask("Rescan roots and grey objects in parallel",
collector, n_workers),
_cms_space(cms_space),
_n_workers(n_workers),
_task_queues(task_queues),
_term(n_workers, task_queues) { }
......@@ -5150,11 +5282,6 @@ class CMSParRemarkTask: public AbstractGangTask {
void work(uint worker_id);
private:
// Work method in support of parallel rescan ... of young gen spaces
void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
ContiguousSpace* space,
HeapWord** chunk_array, size_t chunk_top);
// ... of dirty cards in old space
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl);
......@@ -5186,6 +5313,25 @@ class RemarkKlassClosure : public KlassClosure {
}
};
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
EdenSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
HeapWord** eca = _collector->_eden_chunk_array;
size_t ect = _collector->_eden_chunk_index;
HeapWord** sca = _collector->_survivor_chunk_array;
size_t sct = _collector->_survivor_chunk_index;
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
do_young_space_rescan(worker_id, cl, from_space, sca, sct);
do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
}
// work_queue(i) is passed to the closure
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
// also is passed to do_dirty_card_rescan_tasks() and to
......@@ -5210,23 +5356,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// work first.
// ---------- young gen roots --------------
{
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
EdenSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
HeapWord** eca = _collector->_eden_chunk_array;
size_t ect = _collector->_eden_chunk_index;
HeapWord** sca = _collector->_survivor_chunk_array;
size_t sct = _collector->_survivor_chunk_index;
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
work_on_young_gen_roots(worker_id, &par_mrias_cl);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
......@@ -5334,8 +5464,8 @@ void CMSParRemarkTask::work(uint worker_id) {
// Note that parameter "i" is not used.
void
CMSParRemarkTask::do_young_space_rescan(int i,
Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
CMSParMarkTask::do_young_space_rescan(uint worker_id,
OopsInGenClosure* cl, ContiguousSpace* space,
HeapWord** chunk_array, size_t chunk_top) {
// Until all tasks completed:
// . claim an unclaimed task
......@@ -5530,6 +5660,32 @@ CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
"Else our work is not yet done");
}
// Record object boundaries in _eden_chunk_array by sampling the eden
// top in the slow-path eden object allocation code path and record
// the boundaries, if CMSEdenChunksRecordAlways is true. If
// CMSEdenChunksRecordAlways is false, we use the other asynchronous
// sampling in sample_eden() that activates during the part of the
// preclean phase.
void CMSCollector::sample_eden_chunk() {
if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
if (_eden_chunk_lock->try_lock()) {
// Record a sample. This is the critical section. The contents
// of the _eden_chunk_array have to be non-decreasing in the
// address order.
_eden_chunk_array[_eden_chunk_index] = *_top_addr;
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
"Unexpected state of Eden");
if (_eden_chunk_index == 0 ||
((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
(pointer_delta(_eden_chunk_array[_eden_chunk_index],
_eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
_eden_chunk_index++; // commit sample
}
_eden_chunk_lock->unlock();
}
}
}
// Return a thread-local PLAB recording array, as appropriate.
void* CMSCollector::get_data_recorder(int thr_num) {
if (_survivor_plab_array != NULL &&
......@@ -5553,12 +5709,13 @@ void CMSCollector::reset_survivor_plab_arrays() {
// Merge the per-thread plab arrays into the global survivor chunk
// array which will provide the partitioning of the survivor space
// for CMS rescan.
// for CMS initial scan and rescan.
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
int no_of_gc_threads) {
assert(_survivor_plab_array != NULL, "Error");
assert(_survivor_chunk_array != NULL, "Error");
assert(_collectorState == FinalMarking, "Error");
assert(_collectorState == FinalMarking ||
(CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
for (int j = 0; j < no_of_gc_threads; j++) {
_cursor[j] = 0;
}
......@@ -5621,7 +5778,7 @@ void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
}
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan of young gen.
// for parallel initial scan and rescan of young gen.
// See ParRescanTask where this is currently used.
void
CMSCollector::
......@@ -6748,6 +6905,28 @@ void MarkRefsIntoClosure::do_oop(oop obj) {
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
MemRegion span, CMSBitMap* bitMap):
_span(span),
_bitMap(bitMap)
{
assert(_ref_processor == NULL, "deliberately left NULL");
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
assert(obj->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr)) {
// this should be made more efficient
_bitMap->par_mark(addr);
}
}
void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
......@@ -9305,7 +9484,6 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
return;
}
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
......@@ -9377,4 +9555,3 @@ TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorSt
ShouldNotReachHere();
}
}
......@@ -515,6 +515,8 @@ class CMSCollector: public CHeapObj<mtGC> {
friend class ConcurrentMarkSweepThread;
friend class ConcurrentMarkSweepGeneration;
friend class CompactibleFreeListSpace;
friend class CMSParMarkTask;
friend class CMSParInitialMarkTask;
friend class CMSParRemarkTask;
friend class CMSConcMarkingTask;
friend class CMSRefProcTaskProxy;
......@@ -749,6 +751,7 @@ class CMSCollector: public CHeapObj<mtGC> {
Generation* _young_gen; // the younger gen
HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
Mutex* _eden_chunk_lock;
HeapWord** _eden_chunk_array; // ... Eden partitioning array
size_t _eden_chunk_index; // ... top (exclusive) of array
size_t _eden_chunk_capacity; // ... max entries in array
......@@ -950,6 +953,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Support for parallel remark of survivor space
void* get_data_recorder(int thr_num);
void sample_eden_chunk();
CMSBitMap* markBitMap() { return &_markBitMap; }
void directAllocated(HeapWord* start, size_t size);
......@@ -1027,6 +1031,8 @@ class CMSCollector: public CHeapObj<mtGC> {
// Initialization errors
bool completed_initialization() { return _completed_initialization; }
void print_eden_and_survivor_chunk_arrays();
};
class CMSExpansionCause : public AllStatic {
......@@ -1317,6 +1323,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
//Delegate to collector
return collector()->get_data_recorder(thr_num);
}
void sample_eden_chunk() {
//Delegate to collector
return collector()->sample_eden_chunk();
}
// Printing
const char* name() const;
......
......@@ -96,11 +96,6 @@
"the buffer will be enqueued for processing. A value of 0 " \
"specifies that mutator threads should not do such filtering.") \
\
develop(intx, G1ExtraRegionSurvRate, 33, \
"If the young survival rate is S, and there's room left in " \
"to-space, we will allow regions whose survival rate is up to " \
"S + (1 - S)*X, where X is this parameter (as a fraction.)") \
\
develop(bool, G1SATBPrintStubs, false, \
"If true, print generated stubs for the SATB barrier") \
\
......@@ -110,9 +105,6 @@
develop(bool, G1RSBarrierRegionFilter, true, \
"If true, generate region filtering code in RS barrier") \
\
develop(bool, G1RSBarrierNullFilter, true, \
"If true, generate null-pointer filtering code in RS barrier") \
\
develop(bool, G1DeferredRSUpdate, true, \
"If true, use deferred RS updates") \
\
......@@ -120,9 +112,6 @@
"If true, verify that no dirty cards remain after RS log " \
"processing.") \
\
develop(bool, G1RSCountHisto, false, \
"If true, print a histogram of RS occupancies after each pause") \
\
diagnostic(bool, G1PrintRegionLivenessInfo, false, \
"Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \
......@@ -169,9 +158,6 @@
product(uintx, G1ConcRSHotCardLimit, 4, \
"The threshold that defines (>=) a hot card.") \
\
develop(bool, G1PrintOopAppls, false, \
"When true, print applications of closures to external locs.") \
\
develop(intx, G1RSetRegionEntriesBase, 256, \
"Max number of regions in a fine-grain table per MB.") \
\
......
......@@ -314,6 +314,11 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
region_size = MAX_REGION_SIZE;
}
if (region_size != G1HeapRegionSize) {
// Update the flag to make sure that PrintFlagsFinal logs the correct value
FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size);
}
// And recalculate the log.
region_size_log = log2_long((jlong) region_size);
......
......@@ -1033,6 +1033,9 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
// have to use it here, as well.
HeapWord* result = eden()->par_allocate(word_size);
if (result != NULL) {
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
}
return result;
}
do {
......@@ -1063,13 +1066,19 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
// circular dependency at compile time.
if (result == NULL) {
result = allocate_from_space(word_size);
} else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
}
return result;
}
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
bool is_tlab) {
return eden()->par_allocate(word_size);
HeapWord* res = eden()->par_allocate(word_size);
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
}
return res;
}
void DefNewGeneration::gc_prologue(bool full) {
......
......@@ -455,6 +455,7 @@ class Generation: public CHeapObj<mtGC> {
// expected to be GC worker thread-local, with the worker index
// indicated by "thr_num".
virtual void* get_data_recorder(int thr_num) { return NULL; }
virtual void sample_eden_chunk() {}
// Some generations may require some cleanup actions before allowing
// a verification.
......
......@@ -65,7 +65,8 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
}
_sh = this; // ch is static, should be set only once.
if ((UseParNewGC ||
(UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
(UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
CMSParallelRemarkEnabled)) ||
UseG1GC) &&
ParallelGCThreads > 0) {
_workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
......
......@@ -1891,6 +1891,10 @@ void Arguments::check_deprecated_gc_flags() {
warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
"and will likely be removed in future release");
}
if (FLAG_IS_CMDLINE(DefaultMaxRAMFraction)) {
warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. "
"Use MaxRAMFraction instead.");
}
}
// Check stack pages settings
......
......@@ -1689,6 +1689,9 @@ class CommandLineFlags {
product(bool, CMSAbortSemantics, false, \
"Whether abort-on-overflow semantics is implemented") \
\
product(bool, CMSParallelInitialMarkEnabled, true, \
"Use the parallel initial mark.") \
\
product(bool, CMSParallelRemarkEnabled, true, \
"Whether parallel remark enabled (only if ParNewGC)") \
\
......@@ -1700,6 +1703,14 @@ class CommandLineFlags {
"Whether to always record survivor space PLAB bdries" \
" (effective only if CMSParallelSurvivorRemarkEnabled)") \
\
product(bool, CMSEdenChunksRecordAlways, true, \
"Whether to always record eden chunks used for " \
"the parallel initial mark or remark of eden" ) \
\
product(bool, CMSPrintEdenSurvivorChunks, false, \
"Print the eden and the survivor chunks used for the parallel " \
"initial mark or remark of the eden/survivor spaces") \
\
product(bool, CMSConcurrentMTEnabled, true, \
"Whether multi-threaded concurrent work enabled (if ParNewGC)") \
\
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestG1HeapRegionSize
* @key gc
* @bug 8021879
* @summary Verify that the flag G1HeapRegionSize is updated properly
* @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
* @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m TestG1HeapRegionSize 2097152
* @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m TestG1HeapRegionSize 2097152
* @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m TestG1HeapRegionSize 33554432
*/
import sun.management.ManagementFactoryHelper;
import com.sun.management.HotSpotDiagnosticMXBean;
import com.sun.management.VMOption;
public class TestG1HeapRegionSize {
public static void main(String[] args) {
HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
VMOption option = diagnostic.getVMOption("UseG1GC");
if (option.getValue().equals("false")) {
System.out.println("Skipping this test. It is only a G1 test.");
return;
}
String expectedValue = getExpectedValue(args);
option = diagnostic.getVMOption("G1HeapRegionSize");
if (!expectedValue.equals(option.getValue())) {
throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
}
}
private static String getExpectedValue(String[] args) {
if (args.length != 1) {
throw new RuntimeException("Wrong number of arguments. Expected 1 but got " + args.length);
}
return args[0];
}
}
......@@ -27,7 +27,7 @@
* @bug 8014240
* @summary Test output of G1PrintRegionRememberedSetInfo
* @library /testlibrary
* @build TestPrintRegionRememberedSetInfo
* @run main TestPrintRegionRememberedSetInfo
* @author thomas.schatzl@oracle.com
*/
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestDefaultMaxRAMFraction
* @key gc
* @bug 8021967
* @summary Test that the deprecated TestDefaultMaxRAMFraction flag print a warning message
* @library /testlibrary
*/
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
public class TestDefaultMaxRAMFraction {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:DefaultMaxRAMFraction=4", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("warning: DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. Use MaxRAMFraction instead.");
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册