Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
68ac085d
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
68ac085d
编写于
11月 26, 2008
作者:
I
iveresov
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
bf28137a
afdb2f2e
变更
22
隐藏空白更改
内联
并排
Showing
22 changed file
with
233 addition
and
213 deletion
+233
-213
src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
.../gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+9
-3
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+52
-37
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+12
-2
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+7
-17
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+2
-1
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+8
-18
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+15
-21
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+3
-13
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+4
-15
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
...hare/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+4
-11
src/share/vm/includeDB_core
src/share/vm/includeDB_core
+1
-0
src/share/vm/memory/defNewGeneration.cpp
src/share/vm/memory/defNewGeneration.cpp
+4
-10
src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
+2
-1
src/share/vm/memory/genMarkSweep.cpp
src/share/vm/memory/genMarkSweep.cpp
+4
-14
src/share/vm/memory/referencePolicy.cpp
src/share/vm/memory/referencePolicy.cpp
+10
-0
src/share/vm/memory/referencePolicy.hpp
src/share/vm/memory/referencePolicy.hpp
+7
-1
src/share/vm/memory/referenceProcessor.cpp
src/share/vm/memory/referenceProcessor.cpp
+68
-43
src/share/vm/memory/referenceProcessor.hpp
src/share/vm/memory/referenceProcessor.hpp
+16
-3
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+1
-1
src/share/vm/oops/oop.inline.hpp
src/share/vm/oops/oop.inline.hpp
+1
-1
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+1
-1
src/share/vm/utilities/macros.hpp
src/share/vm/utilities/macros.hpp
+2
-0
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
浏览文件 @
68ac085d
...
...
@@ -325,24 +325,30 @@ class Par_PushOrMarkClosure: public OopClosure {
// For objects in CMS generation, this closure marks
// given objects (transitively) as being reachable/live.
// This is currently used during the (weak) reference object
// processing phase of the CMS final checkpoint step.
// processing phase of the CMS final checkpoint step, as
// well as during the concurrent precleaning of the discovered
// reference lists.
class
CMSKeepAliveClosure
:
public
OopClosure
{
private:
CMSCollector
*
_collector
;
const
MemRegion
_span
;
CMSMarkStack
*
_mark_stack
;
CMSBitMap
*
_bit_map
;
bool
_concurrent_precleaning
;
protected:
DO_OOP_WORK_DEFN
public:
CMSKeepAliveClosure
(
CMSCollector
*
collector
,
MemRegion
span
,
CMSBitMap
*
bit_map
,
CMSMarkStack
*
mark_stack
)
:
CMSBitMap
*
bit_map
,
CMSMarkStack
*
mark_stack
,
bool
cpc
)
:
_collector
(
collector
),
_span
(
span
),
_bit_map
(
bit_map
),
_mark_stack
(
mark_stack
)
{
_mark_stack
(
mark_stack
),
_concurrent_precleaning
(
cpc
)
{
assert
(
!
_span
.
is_empty
(),
"Empty span could spell trouble"
);
}
bool
concurrent_precleaning
()
const
{
return
_concurrent_precleaning
;
}
virtual
void
do_oop
(
oop
*
p
);
virtual
void
do_oop
(
narrowOop
*
p
);
inline
void
do_oop_nv
(
oop
*
p
)
{
CMSKeepAliveClosure
::
do_oop_work
(
p
);
}
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
68ac085d
...
...
@@ -538,6 +538,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_survivor_chunk_capacity
(
0
),
// -- ditto --
_survivor_chunk_index
(
0
),
// -- ditto --
_ser_pmc_preclean_ovflw
(
0
),
_ser_kac_preclean_ovflw
(
0
),
_ser_pmc_remark_ovflw
(
0
),
_par_pmc_remark_ovflw
(
0
),
_ser_kac_ovflw
(
0
),
...
...
@@ -1960,6 +1961,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
ref_processor
()
->
set_enqueuing_is_done
(
false
);
ref_processor
()
->
enable_discovery
();
ref_processor
()
->
snap_policy
(
clear_all_soft_refs
);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
// collection, clear the _modUnionTable.
...
...
@@ -2383,6 +2385,9 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
Universe
::
verify
(
true
);
}
// Snapshot the soft reference policy to be used in this collection cycle.
ref_processor
()
->
snap_policy
(
clear_all_soft_refs
);
bool
init_mark_was_synchronous
=
false
;
// until proven otherwise
while
(
_collectorState
!=
Idling
)
{
if
(
TraceCMSState
)
{
...
...
@@ -4388,10 +4393,10 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
CMSPrecleanRefsYieldClosure
yield_cl
(
this
);
assert
(
rp
->
span
().
equals
(
_span
),
"Spans should be equal"
);
CMSKeepAliveClosure
keep_alive
(
this
,
_span
,
&
_markBitMap
,
&
_markStack
);
&
_markStack
,
true
/* preclean */
);
CMSDrainMarkingStackClosure
complete_trace
(
this
,
_span
,
&
_markBitMap
,
&
_markStack
,
&
keep_alive
);
_span
,
&
_markBitMap
,
&
_markStack
,
&
keep_alive
,
true
/* preclean */
);
// We don't want this step to interfere with a young
// collection because we don't want to take CPU
...
...
@@ -4590,11 +4595,11 @@ size_t CMSCollector::preclean_mod_union_table(
if
(
!
dirtyRegion
.
is_empty
())
{
assert
(
numDirtyCards
>
0
,
"consistency check"
);
HeapWord
*
stop_point
=
NULL
;
stopTimer
();
CMSTokenSyncWithLocks
ts
(
true
,
gen
->
freelistLock
(),
bitMapLock
());
startTimer
();
{
stopTimer
();
CMSTokenSyncWithLocks
ts
(
true
,
gen
->
freelistLock
(),
bitMapLock
());
startTimer
();
verify_work_stacks_empty
();
verify_overflow_empty
();
sample_eden
();
...
...
@@ -4611,10 +4616,6 @@ size_t CMSCollector::preclean_mod_union_table(
assert
((
CMSPermGenPrecleaningEnabled
&&
(
gen
==
_permGen
))
||
(
_collectorState
==
AbortablePreclean
&&
should_abort_preclean
()),
"Unparsable objects should only be in perm gen."
);
stopTimer
();
CMSTokenSyncWithLocks
ts
(
true
,
bitMapLock
());
startTimer
();
_modUnionTable
.
mark_range
(
MemRegion
(
stop_point
,
dirtyRegion
.
end
()));
if
(
should_abort_preclean
())
{
break
;
// out of preclean loop
...
...
@@ -4852,17 +4853,19 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// recurrence of that condition.
assert
(
_markStack
.
isEmpty
(),
"No grey objects"
);
size_t
ser_ovflw
=
_ser_pmc_remark_ovflw
+
_ser_pmc_preclean_ovflw
+
_ser_kac_ovflw
;
_ser_kac_ovflw
+
_ser_kac_preclean_ovflw
;
if
(
ser_ovflw
>
0
)
{
if
(
PrintCMSStatistics
!=
0
)
{
gclog_or_tty
->
print_cr
(
"Marking stack overflow (benign) "
"(pmc_pc="
SIZE_FORMAT
", pmc_rm="
SIZE_FORMAT
", kac="
SIZE_FORMAT
")"
,
"(pmc_pc="
SIZE_FORMAT
", pmc_rm="
SIZE_FORMAT
", kac="
SIZE_FORMAT
", kac_preclean="
SIZE_FORMAT
")"
,
_ser_pmc_preclean_ovflw
,
_ser_pmc_remark_ovflw
,
_ser_kac_ovflw
);
_ser_kac_ovflw
,
_ser_kac_preclean_ovflw
);
}
_markStack
.
expand
();
_ser_pmc_remark_ovflw
=
0
;
_ser_pmc_preclean_ovflw
=
0
;
_ser_kac_preclean_ovflw
=
0
;
_ser_kac_ovflw
=
0
;
}
if
(
_par_pmc_remark_ovflw
>
0
||
_par_kac_ovflw
>
0
)
{
...
...
@@ -5675,40 +5678,29 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
ResourceMark
rm
;
HandleMark
hm
;
ReferencePolicy
*
soft_ref_policy
;
assert
(
!
ref_processor
()
->
enqueuing_is_done
(),
"Enqueuing should not be complete"
);
ReferenceProcessor
*
rp
=
ref_processor
();
assert
(
rp
->
span
().
equals
(
_span
),
"Spans should be equal"
);
assert
(
!
rp
->
enqueuing_is_done
(),
"Enqueuing should not be complete"
);
// Process weak references.
if
(
clear_all_soft_refs
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
}
rp
->
snap_policy
(
clear_all_soft_refs
);
verify_work_stacks_empty
();
ReferenceProcessor
*
rp
=
ref_processor
();
assert
(
rp
->
span
().
equals
(
_span
),
"Spans should be equal"
);
CMSKeepAliveClosure
cmsKeepAliveClosure
(
this
,
_span
,
&
_markBitMap
,
&
_markStack
);
&
_markStack
,
false
/* !preclean */
);
CMSDrainMarkingStackClosure
cmsDrainMarkingStackClosure
(
this
,
_span
,
&
_markBitMap
,
&
_markStack
,
&
cmsKeepAliveClosure
);
&
cmsKeepAliveClosure
,
false
/* !preclean */
);
{
TraceTime
t
(
"weak refs processing"
,
PrintGCDetails
,
false
,
gclog_or_tty
);
if
(
rp
->
processing_is_mt
())
{
CMSRefProcTaskExecutor
task_executor
(
*
this
);
rp
->
process_discovered_references
(
soft_ref_policy
,
&
_is_alive_closure
,
rp
->
process_discovered_references
(
&
_is_alive_closure
,
&
cmsKeepAliveClosure
,
&
cmsDrainMarkingStackClosure
,
&
task_executor
);
}
else
{
rp
->
process_discovered_references
(
soft_ref_policy
,
&
_is_alive_closure
,
rp
->
process_discovered_references
(
&
_is_alive_closure
,
&
cmsKeepAliveClosure
,
&
cmsDrainMarkingStackClosure
,
NULL
);
...
...
@@ -6163,8 +6155,8 @@ void CMSCollector::verify_ok_to_terminate() const {
#endif
size_t
CMSCollector
::
block_size_using_printezis_bits
(
HeapWord
*
addr
)
const
{
assert
(
_markBitMap
.
isMarked
(
addr
)
&&
_markBitMap
.
isMarked
(
addr
+
1
),
"missing Printezis mark?"
);
assert
(
_markBitMap
.
isMarked
(
addr
)
&&
_markBitMap
.
isMarked
(
addr
+
1
),
"missing Printezis mark?"
);
HeapWord
*
nextOneAddr
=
_markBitMap
.
getNextMarkedWordAddress
(
addr
+
2
);
size_t
size
=
pointer_delta
(
nextOneAddr
+
1
,
addr
);
assert
(
size
==
CompactibleFreeListSpace
::
adjustObjectSize
(
size
),
...
...
@@ -8302,8 +8294,29 @@ void CMSKeepAliveClosure::do_oop(oop obj) {
}
)
if
(
simulate_overflow
||
!
_mark_stack
->
push
(
obj
))
{
_collector
->
push_on_overflow_list
(
obj
);
_collector
->
_ser_kac_ovflw
++
;
if
(
_concurrent_precleaning
)
{
// We dirty the overflown object and let the remark
// phase deal with it.
assert
(
_collector
->
overflow_list_is_empty
(),
"Error"
);
// In the case of object arrays, we need to dirty all of
// the cards that the object spans. No locking or atomics
// are needed since no one else can be mutating the mod union
// table.
if
(
obj
->
is_objArray
())
{
size_t
sz
=
obj
->
size
();
HeapWord
*
end_card_addr
=
(
HeapWord
*
)
round_to
((
intptr_t
)(
addr
+
sz
),
CardTableModRefBS
::
card_size
);
MemRegion
redirty_range
=
MemRegion
(
addr
,
end_card_addr
);
assert
(
!
redirty_range
.
is_empty
(),
"Arithmetical tautology"
);
_collector
->
_modUnionTable
.
mark_range
(
redirty_range
);
}
else
{
_collector
->
_modUnionTable
.
mark
(
addr
);
}
_collector
->
_ser_kac_preclean_ovflw
++
;
}
else
{
_collector
->
push_on_overflow_list
(
obj
);
_collector
->
_ser_kac_ovflw
++
;
}
}
}
}
...
...
@@ -8400,6 +8413,8 @@ const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
void
CMSDrainMarkingStackClosure
::
do_void
()
{
// the max number to take from overflow list at a time
const
size_t
num
=
_mark_stack
->
capacity
()
/
4
;
assert
(
!
_concurrent_precleaning
||
_collector
->
overflow_list_is_empty
(),
"Overflow list should be NULL during concurrent phases"
);
while
(
!
_mark_stack
->
isEmpty
()
||
// if stack is empty, check the overflow list
_collector
->
take_from_overflow_list
(
num
,
_mark_stack
))
{
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
68ac085d
...
...
@@ -592,6 +592,7 @@ class CMSCollector: public CHeapObj {
size_t
_ser_pmc_preclean_ovflw
;
size_t
_ser_pmc_remark_ovflw
;
size_t
_par_pmc_remark_ovflw
;
size_t
_ser_kac_preclean_ovflw
;
size_t
_ser_kac_ovflw
;
size_t
_par_kac_ovflw
;
NOT_PRODUCT
(
size_t
_num_par_pushes
;)
...
...
@@ -1749,21 +1750,30 @@ class SweepClosure: public BlkClosureCareful {
// work-routine/closure used to complete transitive
// marking of objects as live after a certain point
// in which an initial set has been completely accumulated.
// This closure is currently used both during the final
// remark stop-world phase, as well as during the concurrent
// precleaning of the discovered reference lists.
class
CMSDrainMarkingStackClosure
:
public
VoidClosure
{
CMSCollector
*
_collector
;
MemRegion
_span
;
CMSMarkStack
*
_mark_stack
;
CMSBitMap
*
_bit_map
;
CMSKeepAliveClosure
*
_keep_alive
;
bool
_concurrent_precleaning
;
public:
CMSDrainMarkingStackClosure
(
CMSCollector
*
collector
,
MemRegion
span
,
CMSBitMap
*
bit_map
,
CMSMarkStack
*
mark_stack
,
CMSKeepAliveClosure
*
keep_alive
)
:
CMSKeepAliveClosure
*
keep_alive
,
bool
cpc
)
:
_collector
(
collector
),
_span
(
span
),
_bit_map
(
bit_map
),
_mark_stack
(
mark_stack
),
_keep_alive
(
keep_alive
)
{
}
_keep_alive
(
keep_alive
),
_concurrent_precleaning
(
cpc
)
{
assert
(
_concurrent_precleaning
==
_keep_alive
->
concurrent_precleaning
(),
"Mismatch"
);
}
void
do_void
();
};
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
68ac085d
...
...
@@ -811,6 +811,7 @@ void ConcurrentMark::checkpointRootsInitialPost() {
ReferenceProcessor
*
rp
=
g1h
->
ref_processor
();
rp
->
verify_no_references_recorded
();
rp
->
enable_discovery
();
// enable ("weak") refs discovery
rp
->
snap_policy
(
false
);
// snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet
&
satb_mq_set
=
JavaThread
::
satb_mark_queue_set
();
satb_mq_set
.
set_process_completed_threshold
(
G1SATBProcessCompletedThreshold
);
...
...
@@ -1829,32 +1830,21 @@ class G1CMDrainMarkingStackClosure: public VoidClosure {
void
ConcurrentMark
::
weakRefsWork
(
bool
clear_all_soft_refs
)
{
ResourceMark
rm
;
HandleMark
hm
;
ReferencePolicy
*
soft_ref_policy
;
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
ReferenceProcessor
*
rp
=
g1h
->
ref_processor
();
// Process weak references.
if
(
clear_all_soft_refs
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif
}
rp
->
snap_policy
(
clear_all_soft_refs
);
assert
(
_markStack
.
isEmpty
(),
"mark stack should be empty"
);
G1CollectedHeap
*
g1
=
G1CollectedHeap
::
heap
();
G1CMIsAliveClosure
g1IsAliveClosure
(
g1
);
G1CMKeepAliveClosure
g1KeepAliveClosure
(
g1
,
this
,
nextMarkBitMap
());
G1CMIsAliveClosure
g1IsAliveClosure
(
g1h
);
G1CMKeepAliveClosure
g1KeepAliveClosure
(
g1h
,
this
,
nextMarkBitMap
());
G1CMDrainMarkingStackClosure
g1DrainMarkingStackClosure
(
nextMarkBitMap
(),
&
_markStack
,
&
g1KeepAliveClosure
);
// XXXYYY Also: copy the parallel ref processing code from CMS.
ReferenceProcessor
*
rp
=
g1
->
ref_processor
();
rp
->
process_discovered_references
(
soft_ref_policy
,
&
g1IsAliveClosure
,
rp
->
process_discovered_references
(
&
g1IsAliveClosure
,
&
g1KeepAliveClosure
,
&
g1DrainMarkingStackClosure
,
NULL
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
68ac085d
...
...
@@ -891,6 +891,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
ReferenceProcessorIsAliveMutator
rp_is_alive_null
(
ref_processor
(),
NULL
);
ref_processor
()
->
enable_discovery
();
ref_processor
()
->
snap_policy
(
clear_all_soft_refs
);
// Do collection work
{
...
...
@@ -2463,7 +2464,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
// We want to turn off ref discover
e
, if necessary, and turn it back on
// We want to turn off ref discover
y
, if necessary, and turn it back on
// on again later if we do.
bool
was_enabled
=
ref_processor
()
->
discovery_enabled
();
if
(
was_enabled
)
ref_processor
()
->
disable_discovery
();
...
...
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
68ac085d
...
...
@@ -33,8 +33,9 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
// hook up weak ref data so it can be used during Mark-Sweep
assert
(
GenMarkSweep
::
ref_processor
()
==
NULL
,
"no stomping"
);
GenMarkSweep
::
_ref_processor
=
rp
;
assert
(
rp
!=
NULL
,
"should be non-NULL"
);
GenMarkSweep
::
_ref_processor
=
rp
;
rp
->
snap_policy
(
clear_all_softrefs
);
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
...
...
@@ -121,23 +122,12 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&
GenMarkSweep
::
follow_root_closure
);
// Process reference objects found during marking
ReferencePolicy
*
soft_ref_policy
;
if
(
clear_all_softrefs
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif
}
assert
(
soft_ref_policy
!=
NULL
,
"No soft reference policy"
);
GenMarkSweep
::
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
GenMarkSweep
::
is_alive
,
&
GenMarkSweep
::
keep_alive
,
&
GenMarkSweep
::
follow_stack_closure
,
NULL
);
ReferenceProcessor
*
rp
=
GenMarkSweep
::
ref_processor
();
rp
->
snap_policy
(
clear_all_softrefs
);
rp
->
process_discovered_references
(
&
GenMarkSweep
::
is_alive
,
&
GenMarkSweep
::
keep_alive
,
&
GenMarkSweep
::
follow_stack_closure
,
NULL
);
// Follow system dictionary roots and unload classes
bool
purged_class
=
SystemDictionary
::
do_unloading
(
&
GenMarkSweep
::
is_alive
);
...
...
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
浏览文件 @
68ac085d
...
...
@@ -759,17 +759,12 @@ void ParNewGeneration::collect(bool full,
thread_state_set
.
steals
(),
thread_state_set
.
pops
()
+
thread_state_set
.
steals
());
}
assert
(
thread_state_set
.
pushes
()
==
thread_state_set
.
pops
()
+
thread_state_set
.
steals
(),
assert
(
thread_state_set
.
pushes
()
==
thread_state_set
.
pops
()
+
thread_state_set
.
steals
(),
"Or else the queues are leaky."
);
// For now, process discovered weak refs sequentially.
#ifdef COMPILER2
ReferencePolicy
*
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
ReferencePolicy
*
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
// Process (weak) reference objects found during scavenge.
ReferenceProcessor
*
rp
=
ref_processor
();
IsAliveClosure
is_alive
(
this
);
ScanWeakRefClosure
scan_weak_ref
(
this
);
KeepAliveClosure
keep_alive
(
&
scan_weak_ref
);
...
...
@@ -778,18 +773,17 @@ void ParNewGeneration::collect(bool full,
set_promo_failure_scan_stack_closure
(
&
scan_without_gc_barrier
);
EvacuateFollowersClosureGeneral
evacuate_followers
(
gch
,
_level
,
&
scan_without_gc_barrier
,
&
scan_with_gc_barrier
);
if
(
ref_processor
()
->
processing_is_mt
())
{
rp
->
snap_policy
(
clear_all_soft_refs
);
if
(
rp
->
processing_is_mt
())
{
ParNewRefProcTaskExecutor
task_executor
(
*
this
,
thread_state_set
);
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
&
task_executor
);
rp
->
process_discovered_references
(
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
&
task_executor
);
}
else
{
thread_state_set
.
flush
();
gch
->
set_par_threads
(
0
);
// 0 ==> non-parallel.
gch
->
save_marks
();
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
NULL
);
rp
->
process_discovered_references
(
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
NULL
);
}
if
(
!
promotion_failed
())
{
// Swap the survivor spaces.
...
...
@@ -851,14 +845,14 @@ void ParNewGeneration::collect(bool full,
SpecializationStats
::
print
();
r
ef_processor
()
->
set_enqueuing_is_done
(
true
);
if
(
r
ef_processor
()
->
processing_is_mt
())
{
r
p
->
set_enqueuing_is_done
(
true
);
if
(
r
p
->
processing_is_mt
())
{
ParNewRefProcTaskExecutor
task_executor
(
*
this
,
thread_state_set
);
r
ef_processor
()
->
enqueue_discovered_references
(
&
task_executor
);
r
p
->
enqueue_discovered_references
(
&
task_executor
);
}
else
{
r
ef_processor
()
->
enqueue_discovered_references
(
NULL
);
r
p
->
enqueue_discovered_references
(
NULL
);
}
r
ef_processor
()
->
verify_no_references_recorded
();
r
p
->
verify_no_references_recorded
();
}
static
int
sum
;
...
...
@@ -1211,7 +1205,7 @@ ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
int
n
=
0
;
while
(
cur
!=
NULL
)
{
oop
obj_to_push
=
cur
->
forwardee
();
oop
next
=
oop
(
cur
->
klass
());
oop
next
=
oop
(
cur
->
klass
_or_null
());
cur
->
set_klass
(
obj_to_push
->
klass
());
if
(
par_scan_state
->
should_be_partially_scanned
(
obj_to_push
,
cur
))
{
obj_to_push
=
cur
;
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
68ac085d
...
...
@@ -172,6 +172,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
ref_processor
()
->
enable_discovery
();
ref_processor
()
->
snap_policy
(
clear_all_softrefs
);
mark_sweep_phase1
(
clear_all_softrefs
);
...
...
@@ -517,20 +518,9 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
ReferencePolicy
*
soft_ref_policy
;
if
(
clear_all_softrefs
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
}
assert
(
soft_ref_policy
!=
NULL
,
"No soft reference policy"
);
ref_processor
()
->
snap_policy
(
clear_all_softrefs
);
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
is_alive_closure
(),
mark_and_push_closure
(),
follow_stack_closure
(),
NULL
);
is_alive_closure
(),
mark_and_push_closure
(),
follow_stack_closure
(),
NULL
);
}
// Follow system dictionary roots and unload classes
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
68ac085d
...
...
@@ -1578,6 +1578,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
ref_processor
()
->
enable_discovery
();
ref_processor
()
->
snap_policy
(
maximum_heap_compaction
);
bool
marked_for_unloading
=
false
;
...
...
@@ -1894,26 +1895,14 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
TraceTime
tm_r
(
"reference processing"
,
print_phases
(),
true
,
gclog_or_tty
);
ReferencePolicy
*
soft_ref_policy
;
if
(
maximum_heap_compaction
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
}
assert
(
soft_ref_policy
!=
NULL
,
"No soft reference policy"
);
if
(
ref_processor
()
->
processing_is_mt
())
{
RefProcTaskExecutor
task_executor
;
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
is_alive_closure
(),
&
mark_and_push
_closure
,
&
follow_stack_closure
,
&
task_executor
);
is_alive_closure
(),
&
mark_and_push_closure
,
&
follow_stack
_closure
,
&
task_executor
);
}
else
{
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
is_alive_closure
(),
&
mark_and_push_closure
,
&
follow_stack_closure
,
NULL
);
is_alive_closure
(),
&
mark_and_push_closure
,
&
follow_stack_closure
,
NULL
);
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
浏览文件 @
68ac085d
...
...
@@ -330,6 +330,7 @@ bool PSScavenge::invoke_no_policy() {
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
reference_processor
()
->
enable_discovery
();
reference_processor
()
->
snap_policy
(
false
);
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
...
...
@@ -394,24 +395,16 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
#ifdef COMPILER2
ReferencePolicy
*
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
ReferencePolicy
*
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
reference_processor
()
->
snap_policy
(
false
);
// not always_clear
PSKeepAliveClosure
keep_alive
(
promotion_manager
);
PSEvacuateFollowersClosure
evac_followers
(
promotion_manager
);
assert
(
soft_ref_policy
!=
NULL
,
"No soft reference policy"
);
if
(
reference_processor
()
->
processing_is_mt
())
{
PSRefProcTaskExecutor
task_executor
;
reference_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
_is_alive_closure
,
&
keep_alive
,
&
evac_followers
,
&
task_executor
);
&
_is_alive_closure
,
&
keep_alive
,
&
evac_followers
,
&
task_executor
);
}
else
{
reference_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
_is_alive_closure
,
&
keep_alive
,
&
evac_followers
,
NULL
);
&
_is_alive_closure
,
&
keep_alive
,
&
evac_followers
,
NULL
);
}
}
...
...
src/share/vm/includeDB_core
浏览文件 @
68ac085d
...
...
@@ -3434,6 +3434,7 @@ referenceProcessor.cpp referenceProcessor.hpp
referenceProcessor.cpp systemDictionary.hpp
referenceProcessor.hpp instanceRefKlass.hpp
referenceProcessor.hpp referencePolicy.hpp
reflection.cpp arguments.hpp
reflection.cpp handles.inline.hpp
...
...
src/share/vm/memory/defNewGeneration.cpp
浏览文件 @
68ac085d
...
...
@@ -540,14 +540,6 @@ void DefNewGeneration::collect(bool full,
assert
(
gch
->
no_allocs_since_save_marks
(
0
),
"save marks have not been newly set."
);
// Weak refs.
// FIXME: Are these storage leaks, or are they resource objects?
#ifdef COMPILER2
ReferencePolicy
*
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
ReferencePolicy
*
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
// Not very pretty.
CollectorPolicy
*
cp
=
gch
->
collector_policy
();
...
...
@@ -574,8 +566,10 @@ void DefNewGeneration::collect(bool full,
evacuate_followers
.
do_void
();
FastKeepAliveClosure
keep_alive
(
this
,
&
scan_weak_ref
);
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
NULL
);
ReferenceProcessor
*
rp
=
ref_processor
();
rp
->
snap_policy
(
clear_all_soft_refs
);
rp
->
process_discovered_references
(
&
is_alive
,
&
keep_alive
,
&
evacuate_followers
,
NULL
);
if
(
!
promotion_failed
())
{
// Swap the survivor spaces.
eden
()
->
clear
(
SpaceDecorator
::
Mangle
);
...
...
src/share/vm/memory/genCollectedHeap.cpp
浏览文件 @
68ac085d
...
...
@@ -525,8 +525,9 @@ void GenCollectedHeap::do_collection(bool full,
if
(
rp
->
discovery_is_atomic
())
{
rp
->
verify_no_references_recorded
();
rp
->
enable_discovery
();
rp
->
snap_policy
(
clear_all_soft_refs
);
}
else
{
// collect() will enable discovery as appropriate
// collect()
below
will enable discovery as appropriate
}
_gens
[
i
]
->
collect
(
full
,
clear_all_soft_refs
,
size
,
is_tlab
);
if
(
!
rp
->
enqueuing_is_done
())
{
...
...
src/share/vm/memory/genMarkSweep.cpp
浏览文件 @
68ac085d
...
...
@@ -31,8 +31,9 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// hook up weak ref data so it can be used during Mark-Sweep
assert
(
ref_processor
()
==
NULL
,
"no stomping"
);
_ref_processor
=
rp
;
assert
(
rp
!=
NULL
,
"should be non-NULL"
);
_ref_processor
=
rp
;
rp
->
snap_policy
(
clear_all_softrefs
);
TraceTime
t1
(
"Full GC"
,
PrintGC
&&
!
PrintGCDetails
,
true
,
gclog_or_tty
);
...
...
@@ -245,20 +246,9 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// Process reference objects found during marking
{
ReferencePolicy
*
soft_ref_policy
;
if
(
clear_all_softrefs
)
{
soft_ref_policy
=
new
AlwaysClearPolicy
();
}
else
{
#ifdef COMPILER2
soft_ref_policy
=
new
LRUMaxHeapPolicy
();
#else
soft_ref_policy
=
new
LRUCurrentHeapPolicy
();
#endif // COMPILER2
}
assert
(
soft_ref_policy
!=
NULL
,
"No soft reference policy"
);
ref_processor
()
->
snap_policy
(
clear_all_softrefs
);
ref_processor
()
->
process_discovered_references
(
soft_ref_policy
,
&
is_alive
,
&
keep_alive
,
&
follow_stack_closure
,
NULL
);
&
is_alive
,
&
keep_alive
,
&
follow_stack_closure
,
NULL
);
}
// Follow system dictionary roots and unload classes
...
...
src/share/vm/memory/referencePolicy.cpp
浏览文件 @
68ac085d
...
...
@@ -26,6 +26,11 @@
# include "incls/_referencePolicy.cpp.incl"
LRUCurrentHeapPolicy
::
LRUCurrentHeapPolicy
()
{
snap
();
}
// Capture state (of-the-VM) information needed to evaluate the policy
void
LRUCurrentHeapPolicy
::
snap
()
{
_max_interval
=
(
Universe
::
get_heap_free_at_last_gc
()
/
M
)
*
SoftRefLRUPolicyMSPerMB
;
assert
(
_max_interval
>=
0
,
"Sanity check"
);
}
...
...
@@ -47,6 +52,11 @@ bool LRUCurrentHeapPolicy::should_clear_reference(oop p) {
/////////////////////// MaxHeap //////////////////////
LRUMaxHeapPolicy
::
LRUMaxHeapPolicy
()
{
snap
();
}
// Capture state (of-the-VM) information needed to evaluate the policy
void
LRUMaxHeapPolicy
::
snap
()
{
size_t
max_heap
=
MaxHeapSize
;
max_heap
-=
Universe
::
get_heap_used_at_last_gc
();
max_heap
/=
M
;
...
...
src/share/vm/memory/referencePolicy.hpp
浏览文件 @
68ac085d
...
...
@@ -26,9 +26,11 @@
// should be cleared.
class
ReferencePolicy
:
public
Resource
Obj
{
class
ReferencePolicy
:
public
CHeap
Obj
{
public:
virtual
bool
should_clear_reference
(
oop
p
)
{
ShouldNotReachHere
();
return
true
;
}
// Capture state (of-the-VM) information needed to evaluate the policy
virtual
void
snap
()
{
/* do nothing */
}
};
class
NeverClearPolicy
:
public
ReferencePolicy
{
...
...
@@ -48,6 +50,8 @@ class LRUCurrentHeapPolicy : public ReferencePolicy {
public:
LRUCurrentHeapPolicy
();
// Capture state (of-the-VM) information needed to evaluate the policy
void
snap
();
bool
should_clear_reference
(
oop
p
);
};
...
...
@@ -58,5 +62,7 @@ class LRUMaxHeapPolicy : public ReferencePolicy {
public:
LRUMaxHeapPolicy
();
// Capture state (of-the-VM) information needed to evaluate the policy
void
snap
();
bool
should_clear_reference
(
oop
p
);
};
src/share/vm/memory/referenceProcessor.cpp
浏览文件 @
68ac085d
...
...
@@ -25,6 +25,11 @@
# include "incls/_precompiled.incl"
# include "incls/_referenceProcessor.cpp.incl"
ReferencePolicy
*
ReferenceProcessor
::
_always_clear_soft_ref_policy
=
NULL
;
ReferencePolicy
*
ReferenceProcessor
::
_default_soft_ref_policy
=
NULL
;
oop
ReferenceProcessor
::
_sentinelRef
=
NULL
;
const
int
subclasses_of_ref
=
REF_PHANTOM
-
REF_OTHER
;
// List of discovered references.
class
DiscoveredList
{
public:
...
...
@@ -47,7 +52,9 @@ public:
}
bool
empty
()
const
{
return
head
()
==
ReferenceProcessor
::
sentinel_ref
();
}
size_t
length
()
{
return
_len
;
}
void
set_length
(
size_t
len
)
{
_len
=
len
;
}
void
set_length
(
size_t
len
)
{
_len
=
len
;
}
void
inc_length
(
size_t
inc
)
{
_len
+=
inc
;
assert
(
_len
>
0
,
"Error"
);
}
void
dec_length
(
size_t
dec
)
{
_len
-=
dec
;
}
private:
// Set value depending on UseCompressedOops. This could be a template class
// but then we have to fix all the instantiations and declarations that use this class.
...
...
@@ -56,10 +63,6 @@ private:
size_t
_len
;
};
oop
ReferenceProcessor
::
_sentinelRef
=
NULL
;
const
int
subclasses_of_ref
=
REF_PHANTOM
-
REF_OTHER
;
void
referenceProcessor_init
()
{
ReferenceProcessor
::
init_statics
();
}
...
...
@@ -80,6 +83,12 @@ void ReferenceProcessor::init_statics() {
}
assert
(
_sentinelRef
!=
NULL
&&
_sentinelRef
->
is_oop
(),
"Just constructed it!"
);
_always_clear_soft_ref_policy
=
new
AlwaysClearPolicy
();
_default_soft_ref_policy
=
new
COMPILER2_PRESENT
(
LRUMaxHeapPolicy
())
NOT_COMPILER2
(
LRUCurrentHeapPolicy
());
if
(
_always_clear_soft_ref_policy
==
NULL
||
_default_soft_ref_policy
==
NULL
)
{
vm_exit_during_initialization
(
"Could not allocate reference policy object"
);
}
guarantee
(
RefDiscoveryPolicy
==
ReferenceBasedDiscovery
||
RefDiscoveryPolicy
==
ReferentBasedDiscovery
,
"Unrecongnized RefDiscoveryPolicy"
);
...
...
@@ -106,6 +115,7 @@ ReferenceProcessor::create_ref_processor(MemRegion span,
vm_exit_during_initialization
(
"Could not allocate ReferenceProcessor object"
);
}
rp
->
set_is_alive_non_header
(
is_alive_non_header
);
rp
->
snap_policy
(
false
/* default soft ref policy */
);
return
rp
;
}
...
...
@@ -192,7 +202,6 @@ void ReferenceProcessor::update_soft_ref_master_clock() {
}
void
ReferenceProcessor
::
process_discovered_references
(
ReferencePolicy
*
policy
,
BoolObjectClosure
*
is_alive
,
OopClosure
*
keep_alive
,
VoidClosure
*
complete_gc
,
...
...
@@ -207,7 +216,7 @@ void ReferenceProcessor::process_discovered_references(
// Soft references
{
TraceTime
tt
(
"SoftReference"
,
trace_time
,
false
,
gclog_or_tty
);
process_discovered_reflist
(
_discoveredSoftRefs
,
policy
,
true
,
process_discovered_reflist
(
_discoveredSoftRefs
,
_current_soft_ref_
policy
,
true
,
is_alive
,
keep_alive
,
complete_gc
,
task_executor
);
}
...
...
@@ -436,13 +445,13 @@ public:
// The "allow_null_referent" argument tells us to allow for the possibility
// of a NULL referent in the discovered Reference object. This typically
// happens in the case of concurrent collectors that may have done the
// discovery concurrently
or interleaved
with mutator execution.
// discovery concurrently
, or interleaved,
with mutator execution.
inline
void
load_ptrs
(
DEBUG_ONLY
(
bool
allow_null_referent
));
// Move to the next discovered reference.
inline
void
next
();
// Remove the current reference from the list
and move to the next.
// Remove the current reference from the list
inline
void
remove
();
// Make the Reference object active again.
...
...
@@ -476,7 +485,6 @@ public:
inline
size_t
removed
()
const
{
return
_removed
;
}
)
private:
inline
void
move_to_next
();
private:
...
...
@@ -553,7 +561,7 @@ inline void DiscoveredListIterator::remove() {
oopDesc
::
store_heap_oop
((
oop
*
)
_prev_next
,
_next
);
}
NOT_PRODUCT
(
_removed
++
);
move_to_next
(
);
_refs_list
.
dec_length
(
1
);
}
inline
void
DiscoveredListIterator
::
move_to_next
()
{
...
...
@@ -591,12 +599,13 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
gclog_or_tty
->
print_cr
(
"Dropping reference ("
INTPTR_FORMAT
": %s"
") by policy"
,
iter
.
obj
(),
iter
.
obj
()
->
blueprint
()
->
internal_name
());
}
// Remove Reference object from list
iter
.
remove
();
// Make the Reference object active again
iter
.
make_active
();
// keep the referent around
iter
.
make_referent_alive
();
// Remove Reference object from list
iter
.
remove
();
iter
.
move_to_next
();
}
else
{
iter
.
next
();
}
...
...
@@ -629,12 +638,13 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
iter
.
obj
(),
iter
.
obj
()
->
blueprint
()
->
internal_name
());
}
// The referent is reachable after all.
// Remove Reference object from list.
iter
.
remove
();
// Update the referent pointer as necessary: Note that this
// should not entail any recursive marking because the
// referent must already have been traversed.
iter
.
make_referent_alive
();
// Remove Reference object from list
iter
.
remove
();
iter
.
move_to_next
();
}
else
{
iter
.
next
();
}
...
...
@@ -670,6 +680,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
}
else
{
keep_alive
->
do_oop
((
oop
*
)
next_addr
);
}
iter
.
move_to_next
();
}
else
{
iter
.
next
();
}
...
...
@@ -832,9 +843,9 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
}
java_lang_ref_Reference
::
set_discovered
(
move_tail
,
ref_lists
[
to_idx
].
head
());
ref_lists
[
to_idx
].
set_head
(
move_head
);
ref_lists
[
to_idx
].
set_length
(
ref_lists
[
to_idx
].
length
()
+
refs_to_move
);
ref_lists
[
to_idx
].
inc_length
(
refs_to_move
);
ref_lists
[
from_idx
].
set_head
(
new_head
);
ref_lists
[
from_idx
].
set_length
(
ref_lists
[
from_idx
].
length
()
-
refs_to_move
);
ref_lists
[
from_idx
].
dec_length
(
refs_to_move
);
}
else
{
++
to_idx
;
}
...
...
@@ -923,7 +934,6 @@ void ReferenceProcessor::clean_up_discovered_references() {
void
ReferenceProcessor
::
clean_up_discovered_reflist
(
DiscoveredList
&
refs_list
)
{
assert
(
!
discovery_is_atomic
(),
"Else why call this method?"
);
DiscoveredListIterator
iter
(
refs_list
,
NULL
,
NULL
);
size_t
length
=
refs_list
.
length
();
while
(
iter
.
has_next
())
{
iter
.
load_ptrs
(
DEBUG_ONLY
(
true
/* allow_null_referent */
));
oop
next
=
java_lang_ref_Reference
::
next
(
iter
.
obj
());
...
...
@@ -941,12 +951,11 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list)
)
// Remove Reference object from list
iter
.
remove
();
--
length
;
iter
.
move_to_next
()
;
}
else
{
iter
.
next
();
}
}
refs_list
.
set_length
(
length
);
NOT_PRODUCT
(
if
(
PrintGCDetails
&&
TraceReferenceGC
)
{
gclog_or_tty
->
print
(
...
...
@@ -1024,7 +1033,7 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
// We have separate lists for enqueueing so no synchronization
// is necessary.
refs_list
.
set_head
(
obj
);
refs_list
.
set_length
(
refs_list
.
length
()
+
1
);
refs_list
.
inc_length
(
1
);
if
(
_discovered_list_needs_barrier
)
{
_bs
->
write_ref_field
((
void
*
)
discovered_addr
,
current_head
);
guarantee
(
false
,
"Needs to be fixed: YSR"
);
}
...
...
@@ -1090,15 +1099,28 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// reachable.
if
(
is_alive_non_header
()
!=
NULL
)
{
oop
referent
=
java_lang_ref_Reference
::
referent
(
obj
);
// We'd like to assert the following:
// assert(referent != NULL, "Refs with null referents already filtered");
// However, since this code may be executed concurrently with
// mutators, which can clear() the referent, it is not
// guaranteed that the referent is non-NULL.
// In the case of non-concurrent discovery, the last
// disjunct below should hold. It may not hold in the
// case of concurrent discovery because mutators may
// concurrently clear() a Reference.
assert
(
UseConcMarkSweepGC
||
UseG1GC
||
referent
!=
NULL
,
"Refs with null referents already filtered"
);
if
(
is_alive_non_header
()
->
do_object_b
(
referent
))
{
return
false
;
// referent is reachable
}
}
if
(
rt
==
REF_SOFT
)
{
// For soft refs we can decide now if these are not
// current candidates for clearing, in which case we
// can mark through them now, rather than delaying that
// to the reference-processing phase. Since all current
// time-stamp policies advance the soft-ref clock only
// at a major collection cycle, this is always currently
// accurate.
if
(
!
_current_soft_ref_policy
->
should_clear_reference
(
obj
))
{
return
false
;
}
}
HeapWord
*
const
discovered_addr
=
java_lang_ref_Reference
::
discovered_addr
(
obj
);
const
oop
discovered
=
java_lang_ref_Reference
::
discovered
(
obj
);
...
...
@@ -1168,7 +1190,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
_bs
->
write_ref_field
((
oop
*
)
discovered_addr
,
current_head
);
}
list
->
set_head
(
obj
);
list
->
set_length
(
list
->
length
()
+
1
);
list
->
inc_length
(
1
);
}
// In the MT discovery case, it is currently possible to see
...
...
@@ -1209,45 +1231,48 @@ void ReferenceProcessor::preclean_discovered_references(
TraceTime
tt
(
"Preclean SoftReferences"
,
PrintGCDetails
&&
PrintReferenceGC
,
false
,
gclog_or_tty
);
for
(
int
i
=
0
;
i
<
_num_q
;
i
++
)
{
if
(
yield
->
should_return
())
{
return
;
}
preclean_discovered_reflist
(
_discoveredSoftRefs
[
i
],
is_alive
,
keep_alive
,
complete_gc
,
yield
);
}
}
if
(
yield
->
should_return
())
{
return
;
}
// Weak references
{
TraceTime
tt
(
"Preclean WeakReferences"
,
PrintGCDetails
&&
PrintReferenceGC
,
false
,
gclog_or_tty
);
for
(
int
i
=
0
;
i
<
_num_q
;
i
++
)
{
if
(
yield
->
should_return
())
{
return
;
}
preclean_discovered_reflist
(
_discoveredWeakRefs
[
i
],
is_alive
,
keep_alive
,
complete_gc
,
yield
);
}
}
if
(
yield
->
should_return
())
{
return
;
}
// Final references
{
TraceTime
tt
(
"Preclean FinalReferences"
,
PrintGCDetails
&&
PrintReferenceGC
,
false
,
gclog_or_tty
);
for
(
int
i
=
0
;
i
<
_num_q
;
i
++
)
{
if
(
yield
->
should_return
())
{
return
;
}
preclean_discovered_reflist
(
_discoveredFinalRefs
[
i
],
is_alive
,
keep_alive
,
complete_gc
,
yield
);
}
}
if
(
yield
->
should_return
())
{
return
;
}
// Phantom references
{
TraceTime
tt
(
"Preclean PhantomReferences"
,
PrintGCDetails
&&
PrintReferenceGC
,
false
,
gclog_or_tty
);
for
(
int
i
=
0
;
i
<
_num_q
;
i
++
)
{
if
(
yield
->
should_return
())
{
return
;
}
preclean_discovered_reflist
(
_discoveredPhantomRefs
[
i
],
is_alive
,
keep_alive
,
complete_gc
,
yield
);
}
...
...
@@ -1256,9 +1281,12 @@ void ReferenceProcessor::preclean_discovered_references(
// Walk the given discovered ref list, and remove all reference objects
// whose referents are still alive, whose referents are NULL or which
// are not active (have a non-NULL next field). NOTE: For this to work
// correctly, refs discovery can not be happening concurrently with this
// step.
// are not active (have a non-NULL next field). NOTE: When we are
// thus precleaning the ref lists (which happens single-threaded today),
// we do not disable refs discovery to honour the correct semantics of
// java.lang.Reference. As a result, we need to be careful below
// that ref removal steps interleave safely with ref discovery steps
// (in this thread).
void
ReferenceProcessor
::
preclean_discovered_reflist
(
DiscoveredList
&
refs_list
,
BoolObjectClosure
*
is_alive
,
...
...
@@ -1266,7 +1294,6 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
VoidClosure
*
complete_gc
,
YieldClosure
*
yield
)
{
DiscoveredListIterator
iter
(
refs_list
,
keep_alive
,
is_alive
);
size_t
length
=
refs_list
.
length
();
while
(
iter
.
has_next
())
{
iter
.
load_ptrs
(
DEBUG_ONLY
(
true
/* allow_null_referent */
));
oop
obj
=
iter
.
obj
();
...
...
@@ -1281,7 +1308,6 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
}
// Remove Reference object from list
iter
.
remove
();
--
length
;
// Keep alive its cohort.
iter
.
make_referent_alive
();
if
(
UseCompressedOops
)
{
...
...
@@ -1291,12 +1317,11 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
oop
*
next_addr
=
(
oop
*
)
java_lang_ref_Reference
::
next_addr
(
obj
);
keep_alive
->
do_oop
(
next_addr
);
}
iter
.
move_to_next
();
}
else
{
iter
.
next
();
}
}
refs_list
.
set_length
(
length
);
// Close the reachable set
complete_gc
->
do_void
();
...
...
src/share/vm/memory/referenceProcessor.hpp
浏览文件 @
68ac085d
...
...
@@ -23,7 +23,7 @@
*/
// ReferenceProcessor class encapsulates the per-"collector" processing
// of
"weak" reference
s for GC. The interface is useful for supporting
// of
java.lang.Reference object
s for GC. The interface is useful for supporting
// a generational abstraction, in particular when there are multiple
// generations that are being independently collected -- possibly
// concurrently and/or incrementally. Note, however, that the
...
...
@@ -75,6 +75,14 @@ class ReferenceProcessor : public CHeapObj {
// all collectors but the CMS collector).
BoolObjectClosure
*
_is_alive_non_header
;
// Soft ref clearing policies
// . the default policy
static
ReferencePolicy
*
_default_soft_ref_policy
;
// . the "clear all" policy
static
ReferencePolicy
*
_always_clear_soft_ref_policy
;
// . the current policy below is either one of the above
ReferencePolicy
*
_current_soft_ref_policy
;
// The discovered ref lists themselves
// The MT'ness degree of the queues below
...
...
@@ -90,6 +98,12 @@ class ReferenceProcessor : public CHeapObj {
DiscoveredList
*
discovered_soft_refs
()
{
return
_discoveredSoftRefs
;
}
static
oop
sentinel_ref
()
{
return
_sentinelRef
;
}
static
oop
*
adr_sentinel_ref
()
{
return
&
_sentinelRef
;
}
ReferencePolicy
*
snap_policy
(
bool
always_clear
)
{
_current_soft_ref_policy
=
always_clear
?
_always_clear_soft_ref_policy
:
_default_soft_ref_policy
;
_current_soft_ref_policy
->
snap
();
// snapshot the policy threshold
return
_current_soft_ref_policy
;
}
public:
// Process references with a certain reachability level.
...
...
@@ -297,8 +311,7 @@ class ReferenceProcessor : public CHeapObj {
bool
discover_reference
(
oop
obj
,
ReferenceType
rt
);
// Process references found during GC (called by the garbage collector)
void
process_discovered_references
(
ReferencePolicy
*
policy
,
BoolObjectClosure
*
is_alive
,
void
process_discovered_references
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
keep_alive
,
VoidClosure
*
complete_gc
,
AbstractRefProcTaskExecutor
*
task_executor
);
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
68ac085d
...
...
@@ -96,7 +96,7 @@ bool Universe::_bootstrapping = false;
bool
Universe
::
_fully_initialized
=
false
;
size_t
Universe
::
_heap_capacity_at_last_gc
;
size_t
Universe
::
_heap_used_at_last_gc
;
size_t
Universe
::
_heap_used_at_last_gc
=
0
;
CollectedHeap
*
Universe
::
_collectedHeap
=
NULL
;
address
Universe
::
_heap_base
=
NULL
;
...
...
src/share/vm/oops/oop.inline.hpp
浏览文件 @
68ac085d
...
...
@@ -92,7 +92,7 @@ inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
if
(
UseCompressedOops
)
{
_metadata
.
_compressed_klass
=
encode_heap_oop
_not_null
(
k
);
_metadata
.
_compressed_klass
=
encode_heap_oop
(
k
);
// may be null (parnew overflow handling)
}
else
{
_metadata
.
_klass
=
(
klassOop
)
k
;
}
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
68ac085d
...
...
@@ -1474,7 +1474,7 @@ class CommandLineFlags {
"CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \
" ratio") \
\
product(bool, CMSPrecleanRefLists1,
false,
\
product(bool, CMSPrecleanRefLists1,
true,
\
"Preclean ref lists during (initial) preclean phase") \
\
product(bool, CMSPrecleanRefLists2, false, \
...
...
src/share/vm/utilities/macros.hpp
浏览文件 @
68ac085d
...
...
@@ -65,8 +65,10 @@
// COMPILER2 variant
#ifdef COMPILER2
#define COMPILER2_PRESENT(code) code
#define NOT_COMPILER2(code)
#else // COMPILER2
#define COMPILER2_PRESENT(code)
#define NOT_COMPILER2(code) code
#endif // COMPILER2
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录