Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
ee972d58
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ee972d58
编写于
4月 01, 2010
作者:
T
trims
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
dc7dfb63
9677603a
变更
33
隐藏空白更改
内联
并排
Showing
33 changed file
with
527 addition
and
209 deletion
+527
-209
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+22
-4
src/share/vm/gc_implementation/g1/concurrentMark.hpp
src/share/vm/gc_implementation/g1/concurrentMark.hpp
+2
-2
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+11
-8
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+8
-3
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+2
-0
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
...share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
+3
-3
src/share/vm/gc_implementation/g1/ptrQueue.cpp
src/share/vm/gc_implementation/g1/ptrQueue.cpp
+2
-2
src/share/vm/gc_implementation/g1/ptrQueue.hpp
src/share/vm/gc_implementation/g1/ptrQueue.hpp
+4
-2
src/share/vm/gc_implementation/g1/satbQueue.cpp
src/share/vm/gc_implementation/g1/satbQueue.cpp
+50
-2
src/share/vm/gc_implementation/g1/satbQueue.hpp
src/share/vm/gc_implementation/g1/satbQueue.hpp
+9
-5
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
+5
-1
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+12
-10
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
...c_implementation/parallelScavenge/psCompactionManager.cpp
+32
-27
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
...c_implementation/parallelScavenge/psCompactionManager.hpp
+28
-17
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
...mentation/parallelScavenge/psCompactionManager.inline.hpp
+32
-0
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+2
-0
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+5
-22
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+1
-2
src/share/vm/gc_implementation/shared/markSweep.cpp
src/share/vm/gc_implementation/shared/markSweep.cpp
+16
-7
src/share/vm/gc_implementation/shared/markSweep.hpp
src/share/vm/gc_implementation/shared/markSweep.hpp
+3
-1
src/share/vm/gc_implementation/shared/markSweep.inline.hpp
src/share/vm/gc_implementation/shared/markSweep.inline.hpp
+6
-0
src/share/vm/includeDB_core
src/share/vm/includeDB_core
+7
-1
src/share/vm/includeDB_gc_parallel
src/share/vm/includeDB_gc_parallel
+4
-0
src/share/vm/memory/genMarkSweep.cpp
src/share/vm/memory/genMarkSweep.cpp
+2
-1
src/share/vm/memory/genOopClosures.hpp
src/share/vm/memory/genOopClosures.hpp
+4
-4
src/share/vm/oops/objArrayKlass.cpp
src/share/vm/oops/objArrayKlass.cpp
+13
-13
src/share/vm/oops/objArrayKlass.hpp
src/share/vm/oops/objArrayKlass.hpp
+8
-1
src/share/vm/oops/objArrayKlass.inline.hpp
src/share/vm/oops/objArrayKlass.inline.hpp
+89
-0
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+1
-3
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+4
-0
src/share/vm/utilities/globalDefinitions.hpp
src/share/vm/utilities/globalDefinitions.hpp
+2
-0
src/share/vm/utilities/taskqueue.cpp
src/share/vm/utilities/taskqueue.cpp
+7
-4
src/share/vm/utilities/taskqueue.hpp
src/share/vm/utilities/taskqueue.hpp
+131
-64
未找到文件。
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
ee972d58
...
...
@@ -760,7 +760,10 @@ void ConcurrentMark::checkpointRootsInitialPost() {
rp
->
setup_policy
(
false
);
// snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet
&
satb_mq_set
=
JavaThread
::
satb_mark_queue_set
();
satb_mq_set
.
set_active_all_threads
(
true
);
// This is the start of the marking cycle, we're expected all
// threads to have SATB queues with active set to false.
satb_mq_set
.
set_active_all_threads
(
true
,
/* new active value */
false
/* expected_active */
);
// update_g1_committed() will be called at the end of an evac pause
// when marking is on. So, it's also called at the end of the
...
...
@@ -1079,7 +1082,11 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
gclog_or_tty
->
print_cr
(
"
\n
Remark led to restart for overflow."
);
}
else
{
// We're done with marking.
JavaThread
::
satb_mark_queue_set
().
set_active_all_threads
(
false
);
// This is the end of the marking cycle, we're expected all
// threads to have SATB queues with active set to true.
JavaThread
::
satb_mark_queue_set
().
set_active_all_threads
(
false
,
/* new active value */
true
/* expected_active */
);
if
(
VerifyDuringGC
)
{
HandleMark
hm
;
// handle scope
...
...
@@ -2586,7 +2593,11 @@ void ConcurrentMark::abort() {
SATBMarkQueueSet
&
satb_mq_set
=
JavaThread
::
satb_mark_queue_set
();
satb_mq_set
.
abandon_partial_marking
();
satb_mq_set
.
set_active_all_threads
(
false
);
// This can be called either during or outside marking, we'll read
// the expected_active value from the SATB queue set.
satb_mq_set
.
set_active_all_threads
(
false
,
/* new active value */
satb_mq_set
.
is_active
()
/* expected_active */
);
}
static
void
print_ms_time_info
(
const
char
*
prefix
,
const
char
*
name
,
...
...
@@ -3704,7 +3715,14 @@ void CMTask::do_marking_step(double time_target_ms) {
// enough to point to the next possible object header (the
// bitmap knows by how much we need to move it as it knows its
// granularity).
move_finger_to
(
_nextMarkBitMap
->
nextWord
(
_finger
));
assert
(
_finger
<
_region_limit
,
"invariant"
);
HeapWord
*
new_finger
=
_nextMarkBitMap
->
nextWord
(
_finger
);
// Check if bitmap iteration was aborted while scanning the last object
if
(
new_finger
>=
_region_limit
)
{
giveup_current_region
();
}
else
{
move_finger_to
(
new_finger
);
}
}
}
// At this point we have either completed iterating over the
...
...
src/share/vm/gc_implementation/g1/concurrentMark.hpp
浏览文件 @
ee972d58
...
...
@@ -24,8 +24,8 @@
class
G1CollectedHeap
;
class
CMTask
;
typedef
GenericTaskQueue
<
oop
>
CMTaskQueue
;
typedef
GenericTaskQueueSet
<
oop
>
CMTaskQueueSet
;
typedef
GenericTaskQueue
<
oop
>
CMTaskQueue
;
typedef
GenericTaskQueueSet
<
CMTaskQueue
>
CMTaskQueueSet
;
// A generic CM bit map. This is essentially a wrapper around the BitMap
// class, with one bit per (1<<_shifter) HeapWords.
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
ee972d58
...
...
@@ -2102,18 +2102,21 @@ size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
size_t
G1CollectedHeap
::
unsafe_max_tlab_alloc
(
Thread
*
ignored
)
const
{
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, no more than half the region size, since we can't allow tlabs to
// grow big enough to accomodate humongous objects.
// We need to story it locally, since it might change between when we
// test for NULL and when we use it later.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accomodate
// humongous objects.
// We need to store the cur alloc region locally, since it might change
// between when we test for NULL and when we use it later.
ContiguousSpace
*
cur_alloc_space
=
_cur_alloc_region
;
size_t
max_tlab_size
=
_humongous_object_threshold_in_words
*
wordSize
;
if
(
cur_alloc_space
==
NULL
)
{
return
HeapRegion
::
GrainBytes
/
2
;
return
max_tlab_size
;
}
else
{
return
MAX2
(
MIN2
(
cur_alloc_space
->
free
(),
(
size_t
)(
HeapRegion
::
GrainBytes
/
2
)),
(
size_t
)
MinTLABSize
);
return
MIN2
(
MAX2
(
cur_alloc_space
->
free
(),
(
size_t
)
MinTLABSize
),
max_tlab_size
);
}
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
ee972d58
...
...
@@ -56,8 +56,8 @@ class ConcurrentZFThread;
# define IF_G1_DETAILED_STATS(code)
#endif
typedef
GenericTaskQueue
<
StarTask
>
RefToScanQueue
;
typedef
GenericTaskQueueSet
<
StarTask
>
RefToScanQueueSet
;
typedef
GenericTaskQueue
<
StarTask
>
RefToScanQueue
;
typedef
GenericTaskQueueSet
<
RefToScanQueue
>
RefToScanQueueSet
;
typedef
int
RegionIdx_t
;
// needs to hold [ 0..max_regions() )
typedef
int
CardIdx_t
;
// needs to hold [ 0..CardsPerRegion )
...
...
@@ -1055,7 +1055,12 @@ public:
// Returns "true" iff the given word_size is "very large".
static
bool
isHumongous
(
size_t
word_size
)
{
return
word_size
>=
_humongous_object_threshold_in_words
;
// Note this has to be strictly greater-than as the TLABs
// are capped at the humongous thresold and we want to
// ensure that we don't try to allocate a TLAB as
// humongous and that we don't allocate a humongous
// object in a TLAB.
return
word_size
>
_humongous_object_threshold_in_words
;
}
// Update mod union table with the set of dirty cards.
...
...
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
ee972d58
...
...
@@ -101,6 +101,8 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep
::
_marking_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
4000
,
true
);
GenMarkSweep
::
_objarray_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
ObjArrayTask
>
(
50
,
true
);
int
size
=
SystemDictionary
::
number_of_classes
()
*
2
;
GenMarkSweep
::
_revisit_klass_stack
=
...
...
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
浏览文件 @
ee972d58
...
...
@@ -35,7 +35,7 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void
G1SATBCardTableModRefBS
::
enqueue
(
oop
pre_val
)
{
assert
(
pre_val
->
is_oop_or_null
(
true
),
"Error"
);
if
(
!
JavaThread
::
satb_mark_queue_set
().
active
())
return
;
if
(
!
JavaThread
::
satb_mark_queue_set
().
is_
active
())
return
;
Thread
*
thr
=
Thread
::
current
();
if
(
thr
->
is_Java_thread
())
{
JavaThread
*
jt
=
(
JavaThread
*
)
thr
;
...
...
@@ -51,7 +51,7 @@ template <class T> void
G1SATBCardTableModRefBS
::
write_ref_field_pre_static
(
T
*
field
,
oop
new_val
,
JavaThread
*
jt
)
{
if
(
!
JavaThread
::
satb_mark_queue_set
().
active
())
return
;
if
(
!
JavaThread
::
satb_mark_queue_set
().
is_
active
())
return
;
T
heap_oop
=
oopDesc
::
load_heap_oop
(
field
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
pre_val
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
...
...
@@ -62,7 +62,7 @@ G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
template
<
class
T
>
void
G1SATBCardTableModRefBS
::
write_ref_array_pre_work
(
T
*
dst
,
int
count
)
{
if
(
!
JavaThread
::
satb_mark_queue_set
().
active
())
return
;
if
(
!
JavaThread
::
satb_mark_queue_set
().
is_
active
())
return
;
T
*
elem_ptr
=
dst
;
for
(
int
i
=
0
;
i
<
count
;
i
++
,
elem_ptr
++
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
elem_ptr
);
...
...
src/share/vm/gc_implementation/g1/ptrQueue.cpp
浏览文件 @
ee972d58
...
...
@@ -25,8 +25,8 @@
# include "incls/_precompiled.incl"
# include "incls/_ptrQueue.cpp.incl"
PtrQueue
::
PtrQueue
(
PtrQueueSet
*
qset_
,
bool
perm
)
:
_qset
(
qset_
),
_buf
(
NULL
),
_index
(
0
),
_active
(
fals
e
),
PtrQueue
::
PtrQueue
(
PtrQueueSet
*
qset_
,
bool
perm
,
bool
active
)
:
_qset
(
qset_
),
_buf
(
NULL
),
_index
(
0
),
_active
(
activ
e
),
_perm
(
perm
),
_lock
(
NULL
)
{}
...
...
src/share/vm/gc_implementation/g1/ptrQueue.hpp
浏览文件 @
ee972d58
...
...
@@ -62,7 +62,7 @@ protected:
public:
// Initialize this queue to contain a null buffer, and be part of the
// given PtrQueueSet.
PtrQueue
(
PtrQueueSet
*
,
bool
perm
=
false
);
PtrQueue
(
PtrQueueSet
*
,
bool
perm
=
false
,
bool
active
=
false
);
// Release any contained resources.
void
flush
();
// Calls flush() when destroyed.
...
...
@@ -101,6 +101,8 @@ public:
}
}
bool
is_active
()
{
return
_active
;
}
static
int
byte_index_to_index
(
int
ind
)
{
assert
((
ind
%
oopSize
)
==
0
,
"Invariant."
);
return
ind
/
oopSize
;
...
...
@@ -257,7 +259,7 @@ public:
bool
process_completed_buffers
()
{
return
_process_completed
;
}
void
set_process_completed
(
bool
x
)
{
_process_completed
=
x
;
}
bool
active
()
{
return
_all_active
;
}
bool
is_
active
()
{
return
_all_active
;
}
// Set the buffer size. Should be called before any "enqueue" operation
// can be called. And should only be called once.
...
...
src/share/vm/gc_implementation/g1/satbQueue.cpp
浏览文件 @
ee972d58
...
...
@@ -82,9 +82,57 @@ void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
t
->
satb_mark_queue
().
handle_zero_index
();
}
void
SATBMarkQueueSet
::
set_active_all_threads
(
bool
b
)
{
#ifdef ASSERT
void
SATBMarkQueueSet
::
dump_active_values
(
JavaThread
*
first
,
bool
expected_active
)
{
gclog_or_tty
->
print_cr
(
"SATB queue active values for Java Threads"
);
gclog_or_tty
->
print_cr
(
" SATB queue set: active is %s"
,
(
is_active
())
?
"TRUE"
:
"FALSE"
);
gclog_or_tty
->
print_cr
(
" expected_active is %s"
,
(
expected_active
)
?
"TRUE"
:
"FALSE"
);
for
(
JavaThread
*
t
=
first
;
t
;
t
=
t
->
next
())
{
bool
active
=
t
->
satb_mark_queue
().
is_active
();
gclog_or_tty
->
print_cr
(
" thread %s, active is %s"
,
t
->
name
(),
(
active
)
?
"TRUE"
:
"FALSE"
);
}
}
#endif // ASSERT
void
SATBMarkQueueSet
::
set_active_all_threads
(
bool
b
,
bool
expected_active
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Must be at safepoint."
);
JavaThread
*
first
=
Threads
::
first
();
#ifdef ASSERT
if
(
_all_active
!=
expected_active
)
{
dump_active_values
(
first
,
expected_active
);
// I leave this here as a guarantee, instead of an assert, so
// that it will still be compiled in if we choose to uncomment
// the #ifdef ASSERT in a product build. The whole block is
// within an #ifdef ASSERT so the guarantee will not be compiled
// in a product build anyway.
guarantee
(
false
,
"SATB queue set has an unexpected active value"
);
}
#endif // ASSERT
_all_active
=
b
;
for
(
JavaThread
*
t
=
Threads
::
first
();
t
;
t
=
t
->
next
())
{
for
(
JavaThread
*
t
=
first
;
t
;
t
=
t
->
next
())
{
#ifdef ASSERT
bool
active
=
t
->
satb_mark_queue
().
is_active
();
if
(
active
!=
expected_active
)
{
dump_active_values
(
first
,
expected_active
);
// I leave this here as a guarantee, instead of an assert, so
// that it will still be compiled in if we choose to uncomment
// the #ifdef ASSERT in a product build. The whole block is
// within an #ifdef ASSERT so the guarantee will not be compiled
// in a product build anyway.
guarantee
(
false
,
"thread has an unexpected active value in its SATB queue"
);
}
#endif // ASSERT
t
->
satb_mark_queue
().
set_active
(
b
);
}
}
...
...
src/share/vm/gc_implementation/g1/satbQueue.hpp
浏览文件 @
ee972d58
...
...
@@ -29,8 +29,7 @@ class JavaThread;
class
ObjPtrQueue
:
public
PtrQueue
{
public:
ObjPtrQueue
(
PtrQueueSet
*
qset_
,
bool
perm
=
false
)
:
PtrQueue
(
qset_
,
perm
)
{}
PtrQueue
(
qset_
,
perm
,
qset_
->
is_active
())
{
}
// Apply the closure to all elements, and reset the index to make the
// buffer empty.
void
apply_closure
(
ObjectClosure
*
cl
);
...
...
@@ -55,6 +54,9 @@ class SATBMarkQueueSet: public PtrQueueSet {
// is ignored.
bool
apply_closure_to_completed_buffer_work
(
bool
par
,
int
worker
);
#ifdef ASSERT
void
dump_active_values
(
JavaThread
*
first
,
bool
expected_active
);
#endif // ASSERT
public:
SATBMarkQueueSet
();
...
...
@@ -65,9 +67,11 @@ public:
static
void
handle_zero_index_for_thread
(
JavaThread
*
t
);
// Apply "set_active(b)" to all thread tloq's. Should be called only
// with the world stopped.
void
set_active_all_threads
(
bool
b
);
// Apply "set_active(b)" to all Java threads' SATB queues. It should be
// called only with the world stopped. The method will assert that the
// SATB queues of all threads it visits, as well as the SATB queue
// set itself, has an active value same as expected_active.
void
set_active_all_threads
(
bool
b
,
bool
expected_active
);
// Register "blk" as "the closure" for all queues. Only one such closure
// is allowed. The "apply_closure_to_completed_buffer" method will apply
...
...
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
浏览文件 @
ee972d58
...
...
@@ -175,6 +175,7 @@ psAdaptiveSizePolicy.hpp gcUtil.hpp
psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp
psCompactionManager.cpp gcTaskManager.hpp
psCompactionManager.cpp objArrayKlass.inline.hpp
psCompactionManager.cpp objectStartArray.hpp
psCompactionManager.cpp oop.hpp
psCompactionManager.cpp oop.inline.hpp
...
...
@@ -189,6 +190,9 @@ psCompactionManager.cpp systemDictionary.hpp
psCompactionManager.hpp allocation.hpp
psCompactionManager.hpp taskqueue.hpp
psCompactionManager.inline.hpp psCompactionManager.hpp
psCompactionManager.inline.hpp psParallelCompact.hpp
psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp
...
...
@@ -379,12 +383,12 @@ pcTasks.cpp fprofiler.hpp
pcTasks.cpp jniHandles.hpp
pcTasks.cpp jvmtiExport.hpp
pcTasks.cpp management.hpp
pcTasks.cpp objArrayKlass.inline.hpp
pcTasks.cpp psParallelCompact.hpp
pcTasks.cpp pcTasks.hpp
pcTasks.cpp oop.inline.hpp
pcTasks.cpp oop.pcgc.inline.hpp
pcTasks.cpp systemDictionary.hpp
pcTasks.cpp taskqueue.hpp
pcTasks.cpp thread.hpp
pcTasks.cpp universe.hpp
pcTasks.cpp vmThread.hpp
...
...
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
浏览文件 @
ee972d58
...
...
@@ -48,7 +48,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
_vm_thread
->
oops_do
(
&
mark_and_push_closure
,
&
mark_and_push_in_blobs
);
// Do the real work
cm
->
drain_marking_stacks
(
&
mark_and_push_closure
);
cm
->
follow_marking_stacks
(
);
}
...
...
@@ -118,7 +118,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
}
// Do the real work
cm
->
drain_marking_stacks
(
&
mark_and_push_closure
);
cm
->
follow_marking_stacks
(
);
// cm->deallocate_stacks();
}
...
...
@@ -196,17 +196,19 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
PSParallelCompact
::
MarkAndPushClosure
mark_and_push_closure
(
cm
);
oop
obj
=
NULL
;
ObjArrayTask
task
;
int
random_seed
=
17
;
while
(
true
)
{
if
(
ParCompactionManager
::
steal
(
which
,
&
random_seed
,
obj
))
{
do
{
while
(
ParCompactionManager
::
steal_objarray
(
which
,
&
random_seed
,
task
))
{
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
cm
,
task
.
obj
(),
task
.
index
());
cm
->
follow_marking_stacks
();
}
while
(
ParCompactionManager
::
steal
(
which
,
&
random_seed
,
obj
))
{
obj
->
follow_contents
(
cm
);
cm
->
drain_marking_stacks
(
&
mark_and_push_closure
);
}
else
{
if
(
terminator
()
->
offer_termination
())
{
break
;
}
cm
->
follow_marking_stacks
();
}
}
}
while
(
!
terminator
()
->
offer_termination
());
}
//
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
浏览文件 @
ee972d58
...
...
@@ -28,6 +28,8 @@
PSOldGen
*
ParCompactionManager
::
_old_gen
=
NULL
;
ParCompactionManager
**
ParCompactionManager
::
_manager_array
=
NULL
;
OopTaskQueueSet
*
ParCompactionManager
::
_stack_array
=
NULL
;
ParCompactionManager
::
ObjArrayTaskQueueSet
*
ParCompactionManager
::
_objarray_queues
=
NULL
;
ObjectStartArray
*
ParCompactionManager
::
_start_array
=
NULL
;
ParMarkBitMap
*
ParCompactionManager
::
_mark_bitmap
=
NULL
;
RegionTaskQueueSet
*
ParCompactionManager
::
_region_array
=
NULL
;
...
...
@@ -46,6 +48,11 @@ ParCompactionManager::ParCompactionManager() :
// We want the overflow stack to be permanent
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
10
,
true
);
_objarray_queue
.
initialize
();
_objarray_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
ObjArrayOverflowStack
(
10
,
true
);
#ifdef USE_RegionTaskQueueWithOverflow
region_stack
()
->
initialize
();
#else
...
...
@@ -69,6 +76,7 @@ ParCompactionManager::ParCompactionManager() :
ParCompactionManager
::~
ParCompactionManager
()
{
delete
_overflow_stack
;
delete
_objarray_overflow_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_mdo_stack
;
// _manager_array and _stack_array are statics
...
...
@@ -86,18 +94,21 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
assert
(
_manager_array
==
NULL
,
"Attempt to initialize twice"
);
_manager_array
=
NEW_C_HEAP_ARRAY
(
ParCompactionManager
*
,
parallel_gc_threads
+
1
);
guarantee
(
_manager_array
!=
NULL
,
"Could not
initialize promotion manager
"
);
guarantee
(
_manager_array
!=
NULL
,
"Could not
allocate manager_array
"
);
_stack_array
=
new
OopTaskQueueSet
(
parallel_gc_threads
);
guarantee
(
_stack_array
!=
NULL
,
"Count not initialize promotion manager"
);
guarantee
(
_stack_array
!=
NULL
,
"Could not allocate stack_array"
);
_objarray_queues
=
new
ObjArrayTaskQueueSet
(
parallel_gc_threads
);
guarantee
(
_objarray_queues
!=
NULL
,
"Could not allocate objarray_queues"
);
_region_array
=
new
RegionTaskQueueSet
(
parallel_gc_threads
);
guarantee
(
_region_array
!=
NULL
,
"Cou
nt not initialize promotion manager
"
);
guarantee
(
_region_array
!=
NULL
,
"Cou
ld not allocate region_array
"
);
// Create and register the ParCompactionManager(s) for the worker threads.
for
(
uint
i
=
0
;
i
<
parallel_gc_threads
;
i
++
)
{
_manager_array
[
i
]
=
new
ParCompactionManager
();
guarantee
(
_manager_array
[
i
]
!=
NULL
,
"Could not create ParCompactionManager"
);
stack_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
marking_stack
());
_objarray_queues
->
register_queue
(
i
,
&
_manager_array
[
i
]
->
_objarray_queue
);
#ifdef USE_RegionTaskQueueWithOverflow
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
()
->
task_queue
());
#else
...
...
@@ -203,36 +214,30 @@ void ParCompactionManager::reset() {
}
}
void
ParCompactionManager
::
drain_marking_stacks
(
OopClosure
*
blk
)
{
#ifdef ASSERT
ParallelScavengeHeap
*
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
assert
(
heap
->
kind
()
==
CollectedHeap
::
ParallelScavengeHeap
,
"Sanity"
);
MutableSpace
*
to_space
=
heap
->
young_gen
()
->
to_space
();
MutableSpace
*
old_space
=
heap
->
old_gen
()
->
object_space
();
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
void
ParCompactionManager
::
follow_marking_stacks
()
{
do
{
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while
(
!
overflow_stack
()
->
is_empty
())
{
oop
obj
=
overflow_stack
()
->
pop
();
obj
->
follow_contents
(
this
);
}
// Drain the overflow stack first, to allow stealing from the marking stack.
oop
obj
;
// obj is a reference!!!
while
(
!
overflow_stack
()
->
is_empty
())
{
overflow_stack
()
->
pop
()
->
follow_contents
(
this
);
}
while
(
marking_stack
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj
->
follow_contents
(
this
);
}
}
while
((
marking_stack
()
->
size
()
!=
0
)
||
(
overflow_stack
()
->
length
()
!=
0
));
assert
(
marking_stack
()
->
size
()
==
0
,
"Sanity"
);
assert
(
overflow_stack
()
->
length
()
==
0
,
"Sanity"
);
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask
task
;
if
(
!
_objarray_overflow_stack
->
is_empty
())
{
task
=
_objarray_overflow_stack
->
pop
();
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
else
if
(
_objarray_queue
.
pop_local
(
task
))
{
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
}
while
(
!
marking_stacks_empty
());
assert
(
marking_stacks_empty
(),
"Sanity"
);
}
void
ParCompactionManager
::
drain_region_overflow_stack
()
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
浏览文件 @
ee972d58
...
...
@@ -22,18 +22,6 @@
*
*/
//
// psPromotionManager is used by a single thread to manage object survival
// during a scavenge. The promotion manager contains thread local data only.
//
// NOTE! Be carefull when allocating the stacks on cheap. If you are going
// to use a promotion manager in more than one thread, the stacks MUST be
// on cheap. This can lead to memory leaks, though, as they are not auto
// deallocated.
//
// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
//
// Move to some global location
#define HAS_BEEN_MOVED 0x1501d01d
// End move to some global location
...
...
@@ -46,8 +34,6 @@ class ObjectStartArray;
class
ParallelCompactData
;
class
ParMarkBitMap
;
// Move to it's own file if this works out.
class
ParCompactionManager
:
public
CHeapObj
{
friend
class
ParallelTaskTerminator
;
friend
class
ParMarkBitMap
;
...
...
@@ -72,14 +58,27 @@ class ParCompactionManager : public CHeapObj {
// ------------------------ End don't putback if not needed
private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef
GenericTaskQueue
<
ObjArrayTask
,
OBJARRAY_QUEUE_SIZE
>
ObjArrayTaskQueue
;
typedef
GenericTaskQueueSet
<
ObjArrayTaskQueue
>
ObjArrayTaskQueueSet
;
#undef OBJARRAY_QUEUE_SIZE
static
ParCompactionManager
**
_manager_array
;
static
OopTaskQueueSet
*
_stack_array
;
static
ObjArrayTaskQueueSet
*
_objarray_queues
;
static
ObjectStartArray
*
_start_array
;
static
RegionTaskQueueSet
*
_region_array
;
static
PSOldGen
*
_old_gen
;
private:
OopTaskQueue
_marking_stack
;
GrowableArray
<
oop
>*
_overflow_stack
;
typedef
GrowableArray
<
ObjArrayTask
>
ObjArrayOverflowStack
;
ObjArrayTaskQueue
_objarray_queue
;
ObjArrayOverflowStack
*
_objarray_overflow_stack
;
// Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different
// type of TaskQueue.
...
...
@@ -128,8 +127,8 @@ class ParCompactionManager : public CHeapObj {
// Pushes onto the region stack. If the region stack is full,
// pushes onto the region overflow stack.
void
region_stack_push
(
size_t
region_index
);
public:
public:
Action
action
()
{
return
_action
;
}
void
set_action
(
Action
v
)
{
_action
=
v
;
}
...
...
@@ -163,6 +162,8 @@ class ParCompactionManager : public CHeapObj {
// Get a oop for scanning. If returns null, no oop were found.
oop
retrieve_for_scanning
();
inline
void
push_objarray
(
oop
obj
,
size_t
index
);
// Save region for later processing. Must not fail.
void
save_for_processing
(
size_t
region_index
);
// Get a region for processing. If returns null, no region were found.
...
...
@@ -175,12 +176,17 @@ class ParCompactionManager : public CHeapObj {
return
stack_array
()
->
steal
(
queue_num
,
seed
,
t
);
}
static
bool
steal_objarray
(
int
queue_num
,
int
*
seed
,
ObjArrayTask
&
t
)
{
return
_objarray_queues
->
steal
(
queue_num
,
seed
,
t
);
}
static
bool
steal
(
int
queue_num
,
int
*
seed
,
RegionTask
&
t
)
{
return
region_array
()
->
steal
(
queue_num
,
seed
,
t
);
}
// Process tasks remaining on any stack
void
drain_marking_stacks
(
OopClosure
*
blk
);
// Process tasks remaining on any marking stack
void
follow_marking_stacks
();
inline
bool
marking_stacks_empty
()
const
;
// Process tasks remaining on any stack
void
drain_region_stacks
();
...
...
@@ -200,3 +206,8 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
"out of range manager_array access"
);
return
_manager_array
[
index
];
}
bool
ParCompactionManager
::
marking_stacks_empty
()
const
{
return
_marking_stack
.
size
()
==
0
&&
_overflow_stack
->
is_empty
()
&&
_objarray_queue
.
size
()
==
0
&&
_objarray_overflow_stack
->
is_empty
();
}
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
0 → 100644
浏览文件 @
ee972d58
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
void
ParCompactionManager
::
push_objarray
(
oop
obj
,
size_t
index
)
{
ObjArrayTask
task
(
obj
,
index
);
assert
(
task
.
is_valid
(),
"bad ObjArrayTask"
);
if
(
!
_objarray_queue
.
push
(
task
))
{
_objarray_overflow_stack
->
push
(
task
);
}
}
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
ee972d58
...
...
@@ -479,6 +479,7 @@ void PSMarkSweep::allocate_stacks() {
_preserved_oop_stack
=
NULL
;
_marking_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
4000
,
true
);
_objarray_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
ObjArrayTask
>
(
50
,
true
);
int
size
=
SystemDictionary
::
number_of_classes
()
*
2
;
_revisit_klass_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
Klass
*>
(
size
,
true
);
...
...
@@ -497,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
}
delete
_marking_stack
;
delete
_objarray_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_mdo_stack
;
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
ee972d58
...
...
@@ -785,7 +785,7 @@ PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closu
void
PSParallelCompact
::
AdjustPointerClosure
::
do_oop
(
oop
*
p
)
{
adjust_pointer
(
p
,
_is_root
);
}
void
PSParallelCompact
::
AdjustPointerClosure
::
do_oop
(
narrowOop
*
p
)
{
adjust_pointer
(
p
,
_is_root
);
}
void
PSParallelCompact
::
FollowStackClosure
::
do_void
()
{
follow_stack
(
_compaction_manager
);
}
void
PSParallelCompact
::
FollowStackClosure
::
do_void
()
{
_compaction_manager
->
follow_marking_stacks
(
);
}
void
PSParallelCompact
::
MarkAndPushClosure
::
do_oop
(
oop
*
p
)
{
mark_and_push
(
_compaction_manager
,
p
);
}
void
PSParallelCompact
::
MarkAndPushClosure
::
do_oop
(
narrowOop
*
p
)
{
mark_and_push
(
_compaction_manager
,
p
);
}
...
...
@@ -2376,7 +2376,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Follow code cache roots.
CodeCache
::
do_unloading
(
is_alive_closure
(),
&
mark_and_push_closure
,
purged_class
);
follow_stack
(
cm
);
// Flush marking stack.
cm
->
follow_marking_stacks
(
);
// Flush marking stack.
// Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links().
...
...
@@ -2389,8 +2389,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
SymbolTable
::
unlink
(
is_alive_closure
());
StringTable
::
unlink
(
is_alive_closure
());
assert
(
cm
->
marking_stack
()
->
size
()
==
0
,
"stack should be empty by now"
);
assert
(
cm
->
overflow_stack
()
->
is_empty
(),
"stack should be empty by now"
);
assert
(
cm
->
marking_stacks_empty
(),
"marking stacks should be empty"
);
}
// This should be moved to the shared markSweep code!
...
...
@@ -2709,22 +2708,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
young_gen
->
move_and_update
(
cm
);
}
void
PSParallelCompact
::
follow_stack
(
ParCompactionManager
*
cm
)
{
while
(
!
cm
->
overflow_stack
()
->
is_empty
())
{
oop
obj
=
cm
->
overflow_stack
()
->
pop
();
obj
->
follow_contents
(
cm
);
}
oop
obj
;
// obj is a reference!!!
while
(
cm
->
marking_stack
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj
->
follow_contents
(
cm
);
}
}
void
PSParallelCompact
::
follow_weak_klass_links
()
{
// All klasses on the revisit stack are marked at this point.
...
...
@@ -2745,7 +2728,7 @@ PSParallelCompact::follow_weak_klass_links() {
&
keep_alive_closure
);
}
// revisit_klass_stack is cleared in reset()
follow_stack
(
cm
);
cm
->
follow_marking_stacks
(
);
}
}
...
...
@@ -2776,7 +2759,7 @@ void PSParallelCompact::follow_mdo_weak_refs() {
rms
->
at
(
j
)
->
follow_weak_refs
(
is_alive_closure
());
}
// revisit_mdo_stack is cleared in reset()
follow_stack
(
cm
);
cm
->
follow_marking_stacks
(
);
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
ee972d58
...
...
@@ -901,7 +901,6 @@ class PSParallelCompact : AllStatic {
// Mark live objects
static
void
marking_phase
(
ParCompactionManager
*
cm
,
bool
maximum_heap_compaction
);
static
void
follow_stack
(
ParCompactionManager
*
cm
);
static
void
follow_weak_klass_links
();
static
void
follow_mdo_weak_refs
();
...
...
@@ -1276,7 +1275,7 @@ inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
}
}
}
follow_stack
(
cm
);
cm
->
follow_marking_stacks
(
);
}
template
<
class
T
>
...
...
src/share/vm/gc_implementation/shared/markSweep.cpp
浏览文件 @
ee972d58
...
...
@@ -25,8 +25,9 @@
#include "incls/_precompiled.incl"
#include "incls/_markSweep.cpp.incl"
GrowableArray
<
oop
>*
MarkSweep
::
_marking_stack
=
NULL
;
GrowableArray
<
Klass
*>*
MarkSweep
::
_revisit_klass_stack
=
NULL
;
GrowableArray
<
oop
>*
MarkSweep
::
_marking_stack
=
NULL
;
GrowableArray
<
ObjArrayTask
>*
MarkSweep
::
_objarray_stack
=
NULL
;
GrowableArray
<
Klass
*>*
MarkSweep
::
_revisit_klass_stack
=
NULL
;
GrowableArray
<
DataLayout
*>*
MarkSweep
::
_revisit_mdo_stack
=
NULL
;
GrowableArray
<
oop
>*
MarkSweep
::
_preserved_oop_stack
=
NULL
;
...
...
@@ -104,11 +105,19 @@ void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
void
MarkSweep
::
MarkAndPushClosure
::
do_oop
(
narrowOop
*
p
)
{
mark_and_push
(
p
);
}
void
MarkSweep
::
follow_stack
()
{
while
(
!
_marking_stack
->
is_empty
())
{
oop
obj
=
_marking_stack
->
pop
();
assert
(
obj
->
is_gc_marked
(),
"p must be marked"
);
obj
->
follow_contents
();
}
do
{
while
(
!
_marking_stack
->
is_empty
())
{
oop
obj
=
_marking_stack
->
pop
();
assert
(
obj
->
is_gc_marked
(),
"p must be marked"
);
obj
->
follow_contents
();
}
// Process ObjArrays one at a time to avoid marking stack bloat.
if
(
!
_objarray_stack
->
is_empty
())
{
ObjArrayTask
task
=
_objarray_stack
->
pop
();
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
task
.
obj
(),
task
.
index
());
}
}
while
(
!
_marking_stack
->
is_empty
()
||
!
_objarray_stack
->
is_empty
());
}
MarkSweep
::
FollowStackClosure
MarkSweep
::
follow_stack_closure
;
...
...
src/share/vm/gc_implementation/shared/markSweep.hpp
浏览文件 @
ee972d58
...
...
@@ -110,8 +110,9 @@ class MarkSweep : AllStatic {
// Vars
//
protected:
// Traversal stack used during phase1
// Traversal stack
s
used during phase1
static
GrowableArray
<
oop
>*
_marking_stack
;
static
GrowableArray
<
ObjArrayTask
>*
_objarray_stack
;
// Stack for live klasses to revisit at end of marking phase
static
GrowableArray
<
Klass
*>*
_revisit_klass_stack
;
// Set (stack) of MDO's to revisit at end of marking phase
...
...
@@ -188,6 +189,7 @@ class MarkSweep : AllStatic {
template
<
class
T
>
static
inline
void
mark_and_follow
(
T
*
p
);
// Check mark and maybe push on marking stack
template
<
class
T
>
static
inline
void
mark_and_push
(
T
*
p
);
static
inline
void
push_objarray
(
oop
obj
,
size_t
index
);
static
void
follow_stack
();
// Empty marking stack.
...
...
src/share/vm/gc_implementation/shared/markSweep.inline.hpp
浏览文件 @
ee972d58
...
...
@@ -77,6 +77,12 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
}
}
void
MarkSweep
::
push_objarray
(
oop
obj
,
size_t
index
)
{
ObjArrayTask
task
(
obj
,
index
);
assert
(
task
.
is_valid
(),
"bad ObjArrayTask"
);
_objarray_stack
->
push
(
task
);
}
template
<
class
T
>
inline
void
MarkSweep
::
adjust_pointer
(
T
*
p
,
bool
isroot
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
...
...
src/share/vm/includeDB_core
浏览文件 @
ee972d58
...
...
@@ -2726,8 +2726,10 @@ markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp
markSweep.cpp methodDataOop.hpp
markSweep.cpp objArrayKlass.inline.hpp
markSweep.hpp collectedHeap.hpp
markSweep.hpp taskqueue.hpp
memRegion.cpp globals.hpp
memRegion.cpp memRegion.hpp
...
...
@@ -3057,8 +3059,10 @@ objArrayKlass.cpp copy.hpp
objArrayKlass.cpp genOopClosures.inline.hpp
objArrayKlass.cpp handles.inline.hpp
objArrayKlass.cpp instanceKlass.hpp
objArrayKlass.cpp markSweep.inline.hpp
objArrayKlass.cpp mutexLocker.hpp
objArrayKlass.cpp objArrayKlass.hpp
objArrayKlass.cpp objArrayKlass.inline.hpp
objArrayKlass.cpp objArrayKlassKlass.hpp
objArrayKlass.cpp objArrayOop.hpp
objArrayKlass.cpp oop.inline.hpp
...
...
@@ -3069,11 +3073,12 @@ objArrayKlass.cpp systemDictionary.hpp
objArrayKlass.cpp universe.inline.hpp
objArrayKlass.cpp vmSymbols.hpp
objArrayKlass.hpp arrayKlass.hpp
objArrayKlass.hpp instanceKlass.hpp
objArrayKlass.hpp specialized_oop_closures.hpp
objArrayKlass.inline.hpp objArrayKlass.hpp
objArrayKlassKlass.cpp collectedHeap.inline.hpp
objArrayKlassKlass.cpp instanceKlass.hpp
objArrayKlassKlass.cpp javaClasses.hpp
...
...
@@ -4099,6 +4104,7 @@ task.cpp timer.hpp
task.hpp top.hpp
taskqueue.cpp debug.hpp
taskqueue.cpp oop.inline.hpp
taskqueue.cpp os.hpp
taskqueue.cpp taskqueue.hpp
taskqueue.cpp thread_<os_family>.inline.hpp
...
...
src/share/vm/includeDB_gc_parallel
浏览文件 @
ee972d58
...
...
@@ -115,10 +115,14 @@ objArrayKlass.cpp heapRegionSeq.inline.hpp
objArrayKlass.cpp g1CollectedHeap.inline.hpp
objArrayKlass.cpp g1OopClosures.inline.hpp
objArrayKlass.cpp oop.pcgc.inline.hpp
objArrayKlass.cpp psCompactionManager.hpp
objArrayKlass.cpp psPromotionManager.inline.hpp
objArrayKlass.cpp psScavenge.inline.hpp
objArrayKlass.cpp parOopClosures.inline.hpp
objArrayKlass.inline.hpp psCompactionManager.inline.hpp
objArrayKlass.inline.hpp psParallelCompact.hpp
oop.pcgc.inline.hpp parNewGeneration.hpp
oop.pcgc.inline.hpp parallelScavengeHeap.hpp
oop.pcgc.inline.hpp psCompactionManager.hpp
...
...
src/share/vm/memory/genMarkSweep.cpp
浏览文件 @
ee972d58
...
...
@@ -159,6 +159,7 @@ void GenMarkSweep::allocate_stacks() {
_preserved_oop_stack
=
NULL
;
_marking_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
4000
,
true
);
_objarray_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
ObjArrayTask
>
(
50
,
true
);
int
size
=
SystemDictionary
::
number_of_classes
()
*
2
;
_revisit_klass_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
Klass
*>
(
size
,
true
);
...
...
@@ -194,7 +195,6 @@ void GenMarkSweep::allocate_stacks() {
void
GenMarkSweep
::
deallocate_stacks
()
{
if
(
!
UseG1GC
)
{
GenCollectedHeap
*
gch
=
GenCollectedHeap
::
heap
();
gch
->
release_scratch
();
...
...
@@ -208,6 +208,7 @@ void GenMarkSweep::deallocate_stacks() {
}
delete
_marking_stack
;
delete
_objarray_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_mdo_stack
;
...
...
src/share/vm/memory/genOopClosures.hpp
浏览文件 @
ee972d58
...
...
@@ -28,10 +28,10 @@ class CardTableRS;
class
CardTableModRefBS
;
class
DefNewGeneration
;
template
<
class
E
>
class
GenericTaskQueue
;
typedef
GenericTaskQueue
<
oop
>
OopTaskQueue
;
template
<
class
E
>
class
GenericTaskQueueSet
;
typedef
GenericTaskQueueSet
<
oop
>
OopTaskQueueSet
;
template
<
class
E
,
unsigned
int
N
>
class
GenericTaskQueue
;
typedef
GenericTaskQueue
<
oop
,
TASKQUEUE_SIZE
>
OopTaskQueue
;
template
<
class
T
>
class
GenericTaskQueueSet
;
typedef
GenericTaskQueueSet
<
OopTaskQueue
>
OopTaskQueueSet
;
// Closure for iterating roots from a particular generation
// Note: all classes deriving from this MUST call this do_barrier
...
...
src/share/vm/oops/objArrayKlass.cpp
浏览文件 @
ee972d58
...
...
@@ -314,24 +314,24 @@ void objArrayKlass::initialize(TRAPS) {
void
objArrayKlass
::
oop_follow_contents
(
oop
obj
)
{
assert
(
obj
->
is_array
(),
"obj must be array"
);
objArrayOop
a
=
objArrayOop
(
obj
);
a
->
follow_header
();
ObjArrayKlass_OOP_ITERATE
(
\
a
,
p
,
\
/* we call mark_and_follow here to avoid excessive marking stack usage */
\
MarkSweep
::
mark_and_follow
(
p
))
objArrayOop
(
obj
)
->
follow_header
(
);
if
(
UseCompressedOops
)
{
objarray_follow_contents
<
narrowOop
>
(
obj
,
0
);
}
else
{
objarray_follow_contents
<
oop
>
(
obj
,
0
);
}
}
#ifndef SERIALGC
void
objArrayKlass
::
oop_follow_contents
(
ParCompactionManager
*
cm
,
oop
obj
)
{
assert
(
obj
->
is_array
(),
"obj must be array"
);
objArrayOop
a
=
objArrayOop
(
obj
);
a
->
follow_header
(
cm
);
ObjArrayKlass_OOP_ITERATE
(
\
a
,
p
,
\
/* we call mark_and_follow here to avoid excessive marking stack usage */
\
PSParallelCompact
::
mark_and_follow
(
cm
,
p
))
assert
(
obj
->
is_array
(),
"obj must be array"
);
objArrayOop
(
obj
)
->
follow_header
(
cm
);
if
(
UseCompressedOops
)
{
objarray_follow_contents
<
narrowOop
>
(
cm
,
obj
,
0
);
}
else
{
objarray_follow_contents
<
oop
>
(
cm
,
obj
,
0
);
}
}
#endif // SERIALGC
...
...
src/share/vm/oops/objArrayKlass.hpp
浏览文件 @
ee972d58
...
...
@@ -91,10 +91,18 @@ class objArrayKlass : public arrayKlass {
// Garbage collection
void
oop_follow_contents
(
oop
obj
);
inline
void
oop_follow_contents
(
oop
obj
,
int
index
);
template
<
class
T
>
inline
void
objarray_follow_contents
(
oop
obj
,
int
index
);
int
oop_adjust_pointers
(
oop
obj
);
// Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS
#ifndef SERIALGC
inline
void
oop_follow_contents
(
ParCompactionManager
*
cm
,
oop
obj
,
int
index
);
template
<
class
T
>
inline
void
objarray_follow_contents
(
ParCompactionManager
*
cm
,
oop
obj
,
int
index
);
#endif // !SERIALGC
// Iterators
int
oop_oop_iterate
(
oop
obj
,
OopClosure
*
blk
)
{
...
...
@@ -131,5 +139,4 @@ class objArrayKlass : public arrayKlass {
void
oop_verify_on
(
oop
obj
,
outputStream
*
st
);
void
oop_verify_old_oop
(
oop
obj
,
oop
*
p
,
bool
allow_dirty
);
void
oop_verify_old_oop
(
oop
obj
,
narrowOop
*
p
,
bool
allow_dirty
);
};
src/share/vm/oops/objArrayKlass.inline.hpp
0 → 100644
浏览文件 @
ee972d58
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
void
objArrayKlass
::
oop_follow_contents
(
oop
obj
,
int
index
)
{
if
(
UseCompressedOops
)
{
objarray_follow_contents
<
narrowOop
>
(
obj
,
index
);
}
else
{
objarray_follow_contents
<
oop
>
(
obj
,
index
);
}
}
template
<
class
T
>
void
objArrayKlass
::
objarray_follow_contents
(
oop
obj
,
int
index
)
{
objArrayOop
a
=
objArrayOop
(
obj
);
const
size_t
len
=
size_t
(
a
->
length
());
const
size_t
beg_index
=
size_t
(
index
);
assert
(
beg_index
<
len
||
len
==
0
,
"index too large"
);
const
size_t
stride
=
MIN2
(
len
-
beg_index
,
ObjArrayMarkingStride
);
const
size_t
end_index
=
beg_index
+
stride
;
T
*
const
base
=
(
T
*
)
a
->
base
();
T
*
const
beg
=
base
+
beg_index
;
T
*
const
end
=
base
+
end_index
;
// Push the non-NULL elements of the next stride on the marking stack.
for
(
T
*
e
=
beg
;
e
<
end
;
e
++
)
{
MarkSweep
::
mark_and_push
<
T
>
(
e
);
}
if
(
end_index
<
len
)
{
MarkSweep
::
push_objarray
(
a
,
end_index
);
// Push the continuation.
}
}
#ifndef SERIALGC
void
objArrayKlass
::
oop_follow_contents
(
ParCompactionManager
*
cm
,
oop
obj
,
int
index
)
{
if
(
UseCompressedOops
)
{
objarray_follow_contents
<
narrowOop
>
(
cm
,
obj
,
index
);
}
else
{
objarray_follow_contents
<
oop
>
(
cm
,
obj
,
index
);
}
}
template
<
class
T
>
void
objArrayKlass
::
objarray_follow_contents
(
ParCompactionManager
*
cm
,
oop
obj
,
int
index
)
{
objArrayOop
a
=
objArrayOop
(
obj
);
const
size_t
len
=
size_t
(
a
->
length
());
const
size_t
beg_index
=
size_t
(
index
);
assert
(
beg_index
<
len
||
len
==
0
,
"index too large"
);
const
size_t
stride
=
MIN2
(
len
-
beg_index
,
ObjArrayMarkingStride
);
const
size_t
end_index
=
beg_index
+
stride
;
T
*
const
base
=
(
T
*
)
a
->
base
();
T
*
const
beg
=
base
+
beg_index
;
T
*
const
end
=
base
+
end_index
;
// Push the non-NULL elements of the next stride on the marking stack.
for
(
T
*
e
=
beg
;
e
<
end
;
e
++
)
{
PSParallelCompact
::
mark_and_push
<
T
>
(
cm
,
e
);
}
if
(
end_index
<
len
)
{
cm
->
push_objarray
(
a
,
end_index
);
// Push the continuation.
}
}
#endif // #ifndef SERIALGC
src/share/vm/runtime/arguments.cpp
浏览文件 @
ee972d58
...
...
@@ -1346,9 +1346,7 @@ void Arguments::set_g1_gc_flags() {
}
if
(
FLAG_IS_DEFAULT
(
MarkStackSize
))
{
// Size as a multiple of TaskQueueSuper::N which is larger
// for 64-bit.
FLAG_SET_DEFAULT
(
MarkStackSize
,
128
*
TaskQueueSuper
::
total_size
());
FLAG_SET_DEFAULT
(
MarkStackSize
,
128
*
TASKQUEUE_SIZE
);
}
if
(
PrintGCDetails
&&
Verbose
)
{
tty
->
print_cr
(
"MarkStackSize: %uk MarkStackSizeMax: %uk"
,
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
ee972d58
...
...
@@ -1795,6 +1795,10 @@ class CommandLineFlags {
product(uintx, PreserveMarkStackSize, 1024, \
"Size for stack used in promotion failure handling") \
\
develop(uintx, ObjArrayMarkingStride, 512, \
"Number of ObjArray elements to push onto the marking stack" \
"before pushing a continuation entry") \
\
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\
product_pd(bool, ResizeTLAB, \
...
...
src/share/vm/utilities/globalDefinitions.hpp
浏览文件 @
ee972d58
...
...
@@ -827,6 +827,8 @@ const int badCodeHeapFreeVal = 0xDD; // value used to zap
#define badHeapWord (::badHeapWordVal)
#define badJNIHandle ((oop)::badJNIHandleVal)
// Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
#define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
//----------------------------------------------------------------------------------------------------
// Utility functions for bitfield manipulations
...
...
src/share/vm/utilities/taskqueue.cpp
浏览文件 @
ee972d58
...
...
@@ -31,10 +31,6 @@ uint ParallelTaskTerminator::_total_spins = 0;
uint
ParallelTaskTerminator
::
_total_peeks
=
0
;
#endif
bool
TaskQueueSuper
::
peek
()
{
return
_bottom
!=
_age
.
top
();
}
int
TaskQueueSetSuper
::
randomParkAndMiller
(
int
*
seed0
)
{
const
int
a
=
16807
;
const
int
m
=
2147483647
;
...
...
@@ -180,6 +176,13 @@ void ParallelTaskTerminator::reset_for_reuse() {
}
}
#ifdef ASSERT
bool
ObjArrayTask
::
is_valid
()
const
{
return
_obj
!=
NULL
&&
_obj
->
is_objArray
()
&&
_index
>
0
&&
_index
<
objArrayOop
(
_obj
)
->
length
();
}
#endif // ASSERT
bool
RegionTaskQueueWithOverflow
::
is_empty
()
{
return
(
_region_queue
.
size
()
==
0
)
&&
(
_overflow_stack
->
length
()
==
0
);
...
...
src/share/vm/utilities/taskqueue.hpp
浏览文件 @
ee972d58
...
...
@@ -22,6 +22,7 @@
*
*/
template
<
unsigned
int
N
>
class
TaskQueueSuper
:
public
CHeapObj
{
protected:
// Internal type for indexing the queue; also used for the tag.
...
...
@@ -30,10 +31,7 @@ protected:
// The first free element after the last one pushed (mod N).
volatile
uint
_bottom
;
enum
{
N
=
1
<<
NOT_LP64
(
14
)
LP64_ONLY
(
17
),
// Queue size: 16K or 128K
MOD_N_MASK
=
N
-
1
// To compute x mod N efficiently.
};
enum
{
MOD_N_MASK
=
N
-
1
};
class
Age
{
public:
...
...
@@ -84,12 +82,12 @@ protected:
// Returns a number in the range [0..N). If the result is "N-1", it should be
// interpreted as 0.
uint
dirty_size
(
uint
bot
,
uint
top
)
{
uint
dirty_size
(
uint
bot
,
uint
top
)
const
{
return
(
bot
-
top
)
&
MOD_N_MASK
;
}
// Returns the size corresponding to the given "bot" and "top".
uint
size
(
uint
bot
,
uint
top
)
{
uint
size
(
uint
bot
,
uint
top
)
const
{
uint
sz
=
dirty_size
(
bot
,
top
);
// Has the queue "wrapped", so that bottom is less than top? There's a
// complicated special case here. A pair of threads could perform pop_local
...
...
@@ -111,17 +109,17 @@ protected:
public:
TaskQueueSuper
()
:
_bottom
(
0
),
_age
()
{}
// Return
"true"
if the TaskQueue contains any tasks.
bool
peek
()
;
// Return
true
if the TaskQueue contains any tasks.
bool
peek
()
{
return
_bottom
!=
_age
.
top
();
}
// Return an estimate of the number of elements in the queue.
// The "careful" version admits the possibility of pop_local/pop_global
// races.
uint
size
()
{
uint
size
()
const
{
return
size
(
_bottom
,
_age
.
top
());
}
uint
dirty_size
()
{
uint
dirty_size
()
const
{
return
dirty_size
(
_bottom
,
_age
.
top
());
}
...
...
@@ -132,19 +130,36 @@ public:
// Maximum number of elements allowed in the queue. This is two less
// than the actual queue size, for somewhat complicated reasons.
uint
max_elems
()
{
return
N
-
2
;
}
uint
max_elems
()
const
{
return
N
-
2
;
}
// Total size of queue.
static
const
uint
total_size
()
{
return
N
;
}
};
template
<
class
E
>
class
GenericTaskQueue
:
public
TaskQueueSuper
{
template
<
class
E
,
unsigned
int
N
=
TASKQUEUE_SIZE
>
class
GenericTaskQueue
:
public
TaskQueueSuper
<
N
>
{
protected:
typedef
typename
TaskQueueSuper
<
N
>::
Age
Age
;
typedef
typename
TaskQueueSuper
<
N
>::
idx_t
idx_t
;
using
TaskQueueSuper
<
N
>::
_bottom
;
using
TaskQueueSuper
<
N
>::
_age
;
using
TaskQueueSuper
<
N
>::
increment_index
;
using
TaskQueueSuper
<
N
>::
decrement_index
;
using
TaskQueueSuper
<
N
>::
dirty_size
;
public:
using
TaskQueueSuper
<
N
>::
max_elems
;
using
TaskQueueSuper
<
N
>::
size
;
private:
// Slow paths for push, pop_local. (pop_global has no fast path.)
bool
push_slow
(
E
t
,
uint
dirty_n_elems
);
bool
pop_local_slow
(
uint
localBot
,
Age
oldAge
);
public:
typedef
E
element_type
;
// Initializes the queue to empty.
GenericTaskQueue
();
...
...
@@ -175,19 +190,19 @@ private:
volatile
E
*
_elems
;
};
template
<
class
E
>
GenericTaskQueue
<
E
>::
GenericTaskQueue
()
:
TaskQueueSuper
()
{
template
<
class
E
,
unsigned
int
N
>
GenericTaskQueue
<
E
,
N
>::
GenericTaskQueue
()
{
assert
(
sizeof
(
Age
)
==
sizeof
(
size_t
),
"Depends on this."
);
}
template
<
class
E
>
void
GenericTaskQueue
<
E
>::
initialize
()
{
template
<
class
E
,
unsigned
int
N
>
void
GenericTaskQueue
<
E
,
N
>::
initialize
()
{
_elems
=
NEW_C_HEAP_ARRAY
(
E
,
N
);
guarantee
(
_elems
!=
NULL
,
"Allocation failed."
);
}
template
<
class
E
>
void
GenericTaskQueue
<
E
>::
oops_do
(
OopClosure
*
f
)
{
template
<
class
E
,
unsigned
int
N
>
void
GenericTaskQueue
<
E
,
N
>::
oops_do
(
OopClosure
*
f
)
{
// tty->print_cr("START OopTaskQueue::oops_do");
uint
iters
=
size
();
uint
index
=
_bottom
;
...
...
@@ -203,21 +218,21 @@ void GenericTaskQueue<E>::oops_do(OopClosure* f) {
// tty->print_cr("END OopTaskQueue::oops_do");
}
template
<
class
E
>
bool
GenericTaskQueue
<
E
>::
push_slow
(
E
t
,
uint
dirty_n_elems
)
{
template
<
class
E
,
unsigned
int
N
>
bool
GenericTaskQueue
<
E
,
N
>::
push_slow
(
E
t
,
uint
dirty_n_elems
)
{
if
(
dirty_n_elems
==
N
-
1
)
{
// Actually means 0, so do the push.
uint
localBot
=
_bottom
;
_elems
[
localBot
]
=
t
;
// g++ complains if the volatile result of the assignment is unused.
const_cast
<
E
&>
(
_elems
[
localBot
]
=
t
);
OrderAccess
::
release_store
(
&
_bottom
,
increment_index
(
localBot
));
return
true
;
}
return
false
;
}
template
<
class
E
>
bool
GenericTaskQueue
<
E
>::
template
<
class
E
,
unsigned
int
N
>
bool
GenericTaskQueue
<
E
,
N
>::
pop_local_slow
(
uint
localBot
,
Age
oldAge
)
{
// This queue was observed to contain exactly one element; either this
// thread will claim it, or a competing "pop_global". In either case,
...
...
@@ -249,8 +264,8 @@ pop_local_slow(uint localBot, Age oldAge) {
return
false
;
}
template
<
class
E
>
bool
GenericTaskQueue
<
E
>::
pop_global
(
E
&
t
)
{
template
<
class
E
,
unsigned
int
N
>
bool
GenericTaskQueue
<
E
,
N
>::
pop_global
(
E
&
t
)
{
Age
oldAge
=
_age
.
get
();
uint
localBot
=
_bottom
;
uint
n_elems
=
size
(
localBot
,
oldAge
.
top
());
...
...
@@ -258,7 +273,7 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
return
false
;
}
t
=
_elems
[
oldAge
.
top
()]
;
const_cast
<
E
&>
(
t
=
_elems
[
oldAge
.
top
()])
;
Age
newAge
(
oldAge
);
newAge
.
increment
();
Age
resAge
=
_age
.
cmpxchg
(
newAge
,
oldAge
);
...
...
@@ -269,8 +284,8 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
return
resAge
==
oldAge
;
}
template
<
class
E
>
GenericTaskQueue
<
E
>::~
GenericTaskQueue
()
{
template
<
class
E
,
unsigned
int
N
>
GenericTaskQueue
<
E
,
N
>::~
GenericTaskQueue
()
{
FREE_C_HEAP_ARRAY
(
E
,
_elems
);
}
...
...
@@ -283,16 +298,18 @@ public:
virtual
bool
peek
()
=
0
;
};
template
<
class
E
>
class
GenericTaskQueueSet
:
public
TaskQueueSetSuper
{
template
<
class
T
>
class
GenericTaskQueueSet
:
public
TaskQueueSetSuper
{
private:
uint
_n
;
GenericTaskQueue
<
E
>
**
_queues
;
T
**
_queues
;
public:
typedef
typename
T
::
element_type
E
;
GenericTaskQueueSet
(
int
n
)
:
_n
(
n
)
{
typedef
GenericTaskQueue
<
E
>
*
GenericTaskQueuePtr
;
typedef
T
*
GenericTaskQueuePtr
;
_queues
=
NEW_C_HEAP_ARRAY
(
GenericTaskQueuePtr
,
n
);
guarantee
(
_queues
!=
NULL
,
"Allocation failure."
);
for
(
int
i
=
0
;
i
<
n
;
i
++
)
{
_queues
[
i
]
=
NULL
;
}
...
...
@@ -302,9 +319,9 @@ public:
bool
steal_best_of_2
(
uint
queue_num
,
int
*
seed
,
E
&
t
);
bool
steal_best_of_all
(
uint
queue_num
,
int
*
seed
,
E
&
t
);
void
register_queue
(
uint
i
,
GenericTaskQueue
<
E
>
*
q
);
void
register_queue
(
uint
i
,
T
*
q
);
GenericTaskQueue
<
E
>
*
queue
(
uint
n
);
T
*
queue
(
uint
n
);
// The thread with queue number "queue_num" (and whose random number seed
// is at "seed") is trying to steal a task from some other queue. (It
...
...
@@ -316,27 +333,27 @@ public:
bool
peek
();
};
template
<
class
E
>
void
GenericTaskQueueSet
<
E
>::
register_queue
(
uint
i
,
GenericTaskQueue
<
E
>
*
q
)
{
template
<
class
T
>
void
GenericTaskQueueSet
<
T
>::
register_queue
(
uint
i
,
T
*
q
)
{
assert
(
i
<
_n
,
"index out of range."
);
_queues
[
i
]
=
q
;
}
template
<
class
E
>
GenericTaskQueue
<
E
>*
GenericTaskQueueSet
<
E
>::
queue
(
uint
i
)
{
template
<
class
T
>
T
*
GenericTaskQueue
Set
<
T
>::
queue
(
uint
i
)
{
return
_queues
[
i
];
}
template
<
class
E
>
bool
GenericTaskQueueSet
<
E
>::
steal
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
template
<
class
T
>
bool
GenericTaskQueueSet
<
T
>::
steal
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
for
(
uint
i
=
0
;
i
<
2
*
_n
;
i
++
)
if
(
steal_best_of_2
(
queue_num
,
seed
,
t
))
return
true
;
return
false
;
}
template
<
class
E
>
bool
GenericTaskQueueSet
<
E
>::
steal_best_of_all
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
template
<
class
T
>
bool
GenericTaskQueueSet
<
T
>::
steal_best_of_all
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
if
(
_n
>
2
)
{
int
best_k
;
uint
best_sz
=
0
;
...
...
@@ -359,8 +376,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t)
}
}
template
<
class
E
>
bool
GenericTaskQueueSet
<
E
>::
steal_1_random
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
template
<
class
T
>
bool
GenericTaskQueueSet
<
T
>::
steal_1_random
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
if
(
_n
>
2
)
{
uint
k
=
queue_num
;
while
(
k
==
queue_num
)
k
=
randomParkAndMiller
(
seed
)
%
_n
;
...
...
@@ -375,8 +392,8 @@ bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
}
}
template
<
class
E
>
bool
GenericTaskQueueSet
<
E
>::
steal_best_of_2
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
template
<
class
T
>
bool
GenericTaskQueueSet
<
T
>::
steal_best_of_2
(
uint
queue_num
,
int
*
seed
,
E
&
t
)
{
if
(
_n
>
2
)
{
uint
k1
=
queue_num
;
while
(
k1
==
queue_num
)
k1
=
randomParkAndMiller
(
seed
)
%
_n
;
...
...
@@ -397,8 +414,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
}
}
template
<
class
E
>
bool
GenericTaskQueueSet
<
E
>::
peek
()
{
template
<
class
T
>
bool
GenericTaskQueueSet
<
T
>::
peek
()
{
// Try all the queues.
for
(
uint
j
=
0
;
j
<
_n
;
j
++
)
{
if
(
_queues
[
j
]
->
peek
())
...
...
@@ -468,14 +485,16 @@ public:
#endif
};
template
<
class
E
>
inline
bool
GenericTaskQueue
<
E
>::
push
(
E
t
)
{
template
<
class
E
,
unsigned
int
N
>
inline
bool
GenericTaskQueue
<
E
,
N
>::
push
(
E
t
)
{
uint
localBot
=
_bottom
;
assert
((
localBot
>=
0
)
&&
(
localBot
<
N
),
"_bottom out of range."
);
idx_t
top
=
_age
.
top
();
uint
dirty_n_elems
=
dirty_size
(
localBot
,
top
);
assert
(
(
dirty_n_elems
>=
0
)
&&
(
dirty_n_elems
<
N
)
,
"n_elems out of range."
);
assert
(
dirty_n_elems
<
N
,
"n_elems out of range."
);
if
(
dirty_n_elems
<
max_elems
())
{
_elems
[
localBot
]
=
t
;
// g++ complains if the volatile result of the assignment is unused.
const_cast
<
E
&>
(
_elems
[
localBot
]
=
t
);
OrderAccess
::
release_store
(
&
_bottom
,
increment_index
(
localBot
));
return
true
;
}
else
{
...
...
@@ -483,7 +502,8 @@ template<class E> inline bool GenericTaskQueue<E>::push(E t) {
}
}
template
<
class
E
>
inline
bool
GenericTaskQueue
<
E
>::
pop_local
(
E
&
t
)
{
template
<
class
E
,
unsigned
int
N
>
inline
bool
GenericTaskQueue
<
E
,
N
>::
pop_local
(
E
&
t
)
{
uint
localBot
=
_bottom
;
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
...
...
@@ -497,7 +517,7 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
// This is necessary to prevent any read below from being reordered
// before the store just above.
OrderAccess
::
fence
();
t
=
_elems
[
localBot
]
;
const_cast
<
E
&>
(
t
=
_elems
[
localBot
])
;
// This is a second read of "age"; the "size()" above is the first.
// If there's still at least one element in the queue, based on the
// "_bottom" and "age" we've read, then there can be no interference with
...
...
@@ -514,17 +534,23 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
}
typedef
oop
Task
;
typedef
GenericTaskQueue
<
Task
>
OopTaskQueue
;
typedef
GenericTaskQueueSet
<
Task
>
OopTaskQueueSet
;
typedef
GenericTaskQueue
<
Task
>
OopTaskQueue
;
typedef
GenericTaskQueueSet
<
OopTaskQueue
>
OopTaskQueueSet
;
#define COMPRESSED_OOP_MASK 1
#ifdef _MSC_VER
#pragma warning(push)
// warning C4522: multiple assignment operators specified
#pragma warning(disable:4522)
#endif
// This is a container class for either an oop* or a narrowOop*.
// Both are pushed onto a task queue and the consumer will test is_narrow()
// to determine which should be processed.
class
StarTask
{
void
*
_holder
;
// either union oop* or narrowOop*
enum
{
COMPRESSED_OOP_MASK
=
1
};
public:
StarTask
(
narrowOop
*
p
)
{
assert
(((
uintptr_t
)
p
&
COMPRESSED_OOP_MASK
)
==
0
,
"Information loss!"
);
...
...
@@ -540,20 +566,61 @@ class StarTask {
return
(
narrowOop
*
)((
uintptr_t
)
_holder
&
~
COMPRESSED_OOP_MASK
);
}
// Operators to preserve const/volatile in assignments required by gcc
void
operator
=
(
const
volatile
StarTask
&
t
)
volatile
{
_holder
=
t
.
_holder
;
}
StarTask
&
operator
=
(
const
StarTask
&
t
)
{
_holder
=
t
.
_holder
;
return
*
this
;
}
volatile
StarTask
&
operator
=
(
const
volatile
StarTask
&
t
)
volatile
{
_holder
=
t
.
_holder
;
return
*
this
;
}
bool
is_narrow
()
const
{
return
(((
uintptr_t
)
_holder
&
COMPRESSED_OOP_MASK
)
!=
0
);
}
};
typedef
GenericTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
GenericTaskQueueSet
<
StarTask
>
OopStarTaskQueueSet
;
class
ObjArrayTask
{
public:
ObjArrayTask
(
oop
o
=
NULL
,
int
idx
=
0
)
:
_obj
(
o
),
_index
(
idx
)
{
}
ObjArrayTask
(
oop
o
,
size_t
idx
)
:
_obj
(
o
),
_index
(
int
(
idx
))
{
assert
(
idx
<=
size_t
(
max_jint
),
"too big"
);
}
ObjArrayTask
(
const
ObjArrayTask
&
t
)
:
_obj
(
t
.
_obj
),
_index
(
t
.
_index
)
{
}
ObjArrayTask
&
operator
=
(
const
ObjArrayTask
&
t
)
{
_obj
=
t
.
_obj
;
_index
=
t
.
_index
;
return
*
this
;
}
volatile
ObjArrayTask
&
operator
=
(
const
volatile
ObjArrayTask
&
t
)
volatile
{
_obj
=
t
.
_obj
;
_index
=
t
.
_index
;
return
*
this
;
}
inline
oop
obj
()
const
{
return
_obj
;
}
inline
int
index
()
const
{
return
_index
;
}
DEBUG_ONLY
(
bool
is_valid
()
const
);
// Tasks to be pushed/popped must be valid.
private:
oop
_obj
;
int
_index
;
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
typedef
GenericTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
GenericTaskQueueSet
<
OopStarTaskQueue
>
OopStarTaskQueueSet
;
typedef
size_t
RegionTask
;
// index for region
typedef
GenericTaskQueue
<
RegionTask
>
RegionTaskQueue
;
typedef
GenericTaskQueueSet
<
RegionTask
>
RegionTaskQueueSet
;
typedef
GenericTaskQueue
<
RegionTask
>
RegionTaskQueue
;
typedef
GenericTaskQueueSet
<
RegionTask
Queue
>
RegionTaskQueueSet
;
class
RegionTaskQueueWithOverflow
:
public
CHeapObj
{
protected:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录