提交 39ff8d62 编写于 作者: S stefank

8080111: Remove SubTaskDone::_n_threads

Reviewed-by: jmasa, kbarrett
上级 de96d3f0
......@@ -580,10 +580,6 @@ ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
// active threads.
void ParNewGenTask::set_for_termination(uint active_workers) {
_state_set->reset(active_workers, _gen->promotion_failed());
// Should the heap be passed in? There's only 1 for now so
// grab it instead.
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->set_n_termination(active_workers);
}
void ParNewGenTask::work(uint worker_id) {
......
......@@ -4326,7 +4326,6 @@ public:
ParallelTaskTerminator* terminator() { return &_terminator; }
virtual void set_for_termination(uint active_workers) {
_root_processor->set_num_workers(active_workers);
terminator()->reset_for_reuse(active_workers);
_n_workers = active_workers;
}
......
......@@ -204,7 +204,7 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
}
}
_process_strong_tasks->all_tasks_completed();
_process_strong_tasks->all_tasks_completed(n_workers());
}
void G1RootProcessor::process_strong_roots(OopClosure* oops,
......@@ -214,7 +214,7 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
process_vm_roots(oops, NULL, NULL, 0);
_process_strong_tasks->all_tasks_completed();
_process_strong_tasks->all_tasks_completed(n_workers());
}
void G1RootProcessor::process_all_roots(OopClosure* oops,
......@@ -228,7 +228,7 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
CodeCache::blobs_do(blobs);
}
_process_strong_tasks->all_tasks_completed();
_process_strong_tasks->all_tasks_completed(n_workers());
}
void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
......@@ -327,14 +327,6 @@ void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
}
void G1RootProcessor::set_num_workers(uint active_workers) {
assert(active_workers == _srs.n_threads(),
err_msg("Mismatch between number of worker threads. active_workers: %u and n_workers(): %u",
active_workers,
_srs.n_threads()));
_process_strong_tasks->set_n_threads(active_workers);
}
uint G1RootProcessor::n_workers() const {
return _srs.n_threads();
}
......@@ -114,9 +114,6 @@ public:
OopClosure* scan_non_heap_weak_roots,
uint worker_i);
// Inform SubTaskDone about the number of worker threads.
void set_num_workers(uint active_workers);
// Number of worker threads used by the root processor.
uint n_workers() const;
};
......
......@@ -564,11 +564,6 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
void GenCollectedHeap::set_par_threads(uint t) {
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
CollectedHeap::set_par_threads(t);
set_n_termination(t);
}
void GenCollectedHeap::set_n_termination(uint t) {
_process_strong_tasks->set_n_threads(t);
}
#ifdef ASSERT
......@@ -709,7 +704,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
older_gens->reset_generation();
}
_process_strong_tasks->all_tasks_completed();
_process_strong_tasks->all_tasks_completed(scope->n_threads());
}
......
......@@ -365,7 +365,6 @@ public:
static GenCollectedHeap* heap();
void set_par_threads(uint t);
void set_n_termination(uint t);
// Invoke the "do_oop" method of one of the closures "not_older_gens"
// or "older_gens" on root locations for the generation at
......
......@@ -434,7 +434,7 @@ void WorkGangBarrierSync::abort() {
// SubTasksDone functions.
SubTasksDone::SubTasksDone(uint n) :
_n_tasks(n), _n_threads(1), _tasks(NULL) {
_n_tasks(n), _tasks(NULL) {
_tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
guarantee(_tasks != NULL, "alloc failure");
clear();
......@@ -444,12 +444,6 @@ bool SubTasksDone::valid() {
return _tasks != NULL;
}
void SubTasksDone::set_n_threads(uint t) {
assert(_claimed == 0 || _threads_completed == _n_threads,
"should not be called while tasks are being processed!");
_n_threads = (t == 0 ? 1 : t);
}
void SubTasksDone::clear() {
for (uint i = 0; i < _n_tasks; i++) {
_tasks[i] = 0;
......@@ -477,7 +471,7 @@ bool SubTasksDone::is_task_claimed(uint t) {
return res;
}
void SubTasksDone::all_tasks_completed() {
void SubTasksDone::all_tasks_completed(uint n_threads) {
jint observed = _threads_completed;
jint old;
do {
......@@ -485,7 +479,10 @@ void SubTasksDone::all_tasks_completed() {
observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
} while (observed != old);
// If this was the last thread checking in, clear the tasks.
if (observed+1 == (jint)_n_threads) clear();
uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
if (observed + 1 == (jint)adjusted_thread_count) {
clear();
}
}
......
......@@ -390,12 +390,6 @@ public:
class SubTasksDone: public CHeapObj<mtInternal> {
uint* _tasks;
uint _n_tasks;
// _n_threads is used to determine when a sub task is done.
// It does not control how many threads will execute the subtask
// but must be initialized to the number that do execute the task
// in order to correctly decide when the subtask is done (all the
// threads working on the task have finished).
uint _n_threads;
uint _threads_completed;
#ifdef ASSERT
volatile uint _claimed;
......@@ -413,11 +407,6 @@ public:
// True iff the object is in a valid state.
bool valid();
// Get/set the number of parallel threads doing the tasks to "t". Can only
// be called before tasks start or after they are complete.
uint n_threads() { return _n_threads; }
void set_n_threads(uint t);
// Returns "false" if the task "t" is unclaimed, and ensures that task is
// claimed. The task "t" is required to be within the range of "this".
bool is_task_claimed(uint t);
......@@ -426,7 +415,9 @@ public:
// tasks that it will try to claim. Every thread in the parallel task
// must execute this. (When the last thread does so, the task array is
// cleared.)
void all_tasks_completed();
//
// n_threads - Number of threads executing the sub-tasks.
void all_tasks_completed(uint n_threads);
// Destructor.
~SubTasksDone();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册