Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
f83c3096
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f83c3096
编写于
10年前
作者:
A
amurillo
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e2759b0d
f2a3644f
master
Tags不可用
无相关合并请求
变更
74
展开全部
隐藏空白更改
内联
并排
Showing
74 changed file
with
2929 addition
and
1764 deletion
+2929
-1764
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
...sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
+3
-6
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java
...n/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java
+21
-5
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java
...s/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java
+8
-4
make/hotspot_version
make/hotspot_version
+1
-1
make/jprt.properties
make/jprt.properties
+18
-18
src/share/vm/classfile/verifier.cpp
src/share/vm/classfile/verifier.cpp
+192
-9
src/share/vm/classfile/verifier.hpp
src/share/vm/classfile/verifier.hpp
+11
-0
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+24
-0
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+2
-2
src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
+2
-1
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+134
-120
src/share/vm/gc_implementation/g1/concurrentMark.hpp
src/share/vm/gc_implementation/g1/concurrentMark.hpp
+35
-37
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
+30
-0
src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
+1
-1
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
+40
-60
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
+37
-62
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp
...are/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp
+60
-3
src/share/vm/gc_implementation/g1/g1CardCounts.cpp
src/share/vm/gc_implementation/g1/g1CardCounts.cpp
+47
-112
src/share/vm/gc_implementation/g1/g1CardCounts.hpp
src/share/vm/gc_implementation/g1/g1CardCounts.hpp
+21
-27
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+234
-355
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+47
-68
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+31
-33
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+7
-7
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
+3
-6
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
+1
-4
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
+42
-40
src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
...share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
+167
-0
src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
...share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
+111
-0
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
+159
-0
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
+83
-0
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+6
-11
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+9
-7
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
...share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
+41
-2
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
...share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
+24
-2
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+19
-17
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+11
-24
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+13
-14
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+8
-9
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+353
-164
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+151
-71
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
+16
-12
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
+111
-133
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
+18
-54
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
+30
-87
src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
+1
-2
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
..._implementation/parallelScavenge/parallelScavengeHeap.cpp
+1
-0
src/share/vm/memory/cardTableModRefBS.cpp
src/share/vm/memory/cardTableModRefBS.cpp
+41
-25
src/share/vm/memory/cardTableModRefBS.hpp
src/share/vm/memory/cardTableModRefBS.hpp
+12
-5
src/share/vm/memory/cardTableRS.cpp
src/share/vm/memory/cardTableRS.cpp
+1
-0
src/share/vm/opto/c2_globals.hpp
src/share/vm/opto/c2_globals.hpp
+0
-3
src/share/vm/opto/callGenerator.cpp
src/share/vm/opto/callGenerator.cpp
+40
-31
src/share/vm/opto/callGenerator.hpp
src/share/vm/opto/callGenerator.hpp
+1
-3
src/share/vm/opto/callnode.cpp
src/share/vm/opto/callnode.cpp
+1
-0
src/share/vm/opto/callnode.hpp
src/share/vm/opto/callnode.hpp
+33
-0
src/share/vm/opto/compile.cpp
src/share/vm/opto/compile.cpp
+14
-5
src/share/vm/opto/compile.hpp
src/share/vm/opto/compile.hpp
+0
-18
src/share/vm/opto/doCall.cpp
src/share/vm/opto/doCall.cpp
+2
-2
src/share/vm/opto/graphKit.cpp
src/share/vm/opto/graphKit.cpp
+27
-53
src/share/vm/opto/graphKit.hpp
src/share/vm/opto/graphKit.hpp
+1
-1
src/share/vm/opto/ifnode.cpp
src/share/vm/opto/ifnode.cpp
+1
-1
src/share/vm/opto/library_call.cpp
src/share/vm/opto/library_call.cpp
+2
-2
src/share/vm/opto/node.cpp
src/share/vm/opto/node.cpp
+6
-0
src/share/vm/opto/parse.hpp
src/share/vm/opto/parse.hpp
+4
-5
src/share/vm/opto/parse1.cpp
src/share/vm/opto/parse1.cpp
+28
-3
src/share/vm/opto/replacednodes.cpp
src/share/vm/opto/replacednodes.cpp
+219
-0
src/share/vm/opto/replacednodes.hpp
src/share/vm/opto/replacednodes.hpp
+81
-0
src/share/vm/prims/jni.cpp
src/share/vm/prims/jni.cpp
+8
-11
src/share/vm/prims/whitebox.cpp
src/share/vm/prims/whitebox.cpp
+1
-1
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+0
-4
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+9
-0
src/share/vm/utilities/growableArray.hpp
src/share/vm/utilities/growableArray.hpp
+2
-1
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
+1
-0
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
+1
-0
test/gc/g1/TestEagerReclaimHumongousRegions2.java
test/gc/g1/TestEagerReclaimHumongousRegions2.java
+9
-0
未找到文件。
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
浏览文件 @
f83c3096
...
...
@@ -45,8 +45,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
public
class
G1CollectedHeap
extends
SharedHeap
{
// HeapRegionSeq _seq;
static
private
long
hrsFieldOffset
;
// MemRegion _g1_
committ
ed;
static
private
long
g1
Committ
edFieldOffset
;
// MemRegion _g1_
reserv
ed;
static
private
long
g1
Reserv
edFieldOffset
;
// size_t _summary_bytes_used;
static
private
CIntegerField
summaryBytesUsedField
;
// G1MonitoringSupport* _g1mm;
...
...
@@ -68,7 +68,6 @@ public class G1CollectedHeap extends SharedHeap {
Type
type
=
db
.
lookupType
(
"G1CollectedHeap"
);
hrsFieldOffset
=
type
.
getField
(
"_hrs"
).
getOffset
();
g1CommittedFieldOffset
=
type
.
getField
(
"_g1_committed"
).
getOffset
();
summaryBytesUsedField
=
type
.
getCIntegerField
(
"_summary_bytes_used"
);
g1mmField
=
type
.
getAddressField
(
"_g1mm"
);
oldSetFieldOffset
=
type
.
getField
(
"_old_set"
).
getOffset
();
...
...
@@ -76,9 +75,7 @@ public class G1CollectedHeap extends SharedHeap {
}
public
long
capacity
()
{
Address
g1CommittedAddr
=
addr
.
addOffsetTo
(
g1CommittedFieldOffset
);
MemRegion
g1Committed
=
new
MemRegion
(
g1CommittedAddr
);
return
g1Committed
.
byteSize
();
return
hrs
().
capacity
();
}
public
long
used
()
{
...
...
This diff is collapsed.
Click to expand it.
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java
浏览文件 @
f83c3096
...
...
@@ -93,19 +93,35 @@ public class G1HeapRegionTable extends VMObject {
private
class
HeapRegionIterator
implements
Iterator
<
HeapRegion
>
{
private
long
index
;
private
long
length
;
private
HeapRegion
next
;
public
HeapRegion
positionToNext
()
{
HeapRegion
result
=
next
;
while
(
index
<
length
&&
at
(
index
)
==
null
)
{
index
++;
}
if
(
index
<
length
)
{
next
=
at
(
index
);
index
++;
// restart search at next element
}
else
{
next
=
null
;
}
return
result
;
}
@Override
public
boolean
hasNext
()
{
return
index
<
length
;
}
public
boolean
hasNext
()
{
return
next
!=
null
;
}
@Override
public
HeapRegion
next
()
{
return
at
(
index
++);
}
public
HeapRegion
next
()
{
return
positionToNext
();
}
@Override
public
void
remove
()
{
/* not supported */
}
public
void
remove
()
{
/* not supported */
}
HeapRegionIterator
(
long
committed
Length
)
{
HeapRegionIterator
(
long
total
Length
)
{
index
=
0
;
length
=
committedLength
;
length
=
totalLength
;
positionToNext
();
}
}
...
...
This diff is collapsed.
Click to expand it.
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java
浏览文件 @
f83c3096
...
...
@@ -43,7 +43,7 @@ public class HeapRegionSeq extends VMObject {
// G1HeapRegionTable _regions
static
private
long
regionsFieldOffset
;
// uint _committed_length
static
private
CIntegerField
committedLength
Field
;
static
private
CIntegerField
numCommitted
Field
;
static
{
VM
.
registerVMInitializedObserver
(
new
Observer
()
{
...
...
@@ -57,7 +57,7 @@ public class HeapRegionSeq extends VMObject {
Type
type
=
db
.
lookupType
(
"HeapRegionSeq"
);
regionsFieldOffset
=
type
.
getField
(
"_regions"
).
getOffset
();
committedLengthField
=
type
.
getCIntegerField
(
"_committed_length
"
);
numCommittedField
=
type
.
getCIntegerField
(
"_num_committed
"
);
}
private
G1HeapRegionTable
regions
()
{
...
...
@@ -66,16 +66,20 @@ public class HeapRegionSeq extends VMObject {
regionsAddr
);
}
public
long
capacity
()
{
return
length
()
*
HeapRegion
.
grainBytes
();
}
public
long
length
()
{
return
regions
().
length
();
}
public
long
committedLength
()
{
return
committedLength
Field
.
getValue
(
addr
);
return
numCommitted
Field
.
getValue
(
addr
);
}
public
Iterator
<
HeapRegion
>
heapRegionIterator
()
{
return
regions
().
heapRegionIterator
(
committedL
ength
());
return
regions
().
heapRegionIterator
(
l
ength
());
}
public
HeapRegionSeq
(
Address
addr
)
{
...
...
This diff is collapsed.
Click to expand it.
make/hotspot_version
浏览文件 @
f83c3096
...
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=0
5
HS_BUILD_NUMBER=0
6
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
...
...
This diff is collapsed.
Click to expand it.
make/jprt.properties
浏览文件 @
f83c3096
...
...
@@ -33,7 +33,7 @@ jprt.need.sibling.build=false
# This tells jprt what default release we want to build
jprt.hotspot.default.release
=
jdk8u
2
0
jprt.hotspot.default.release
=
jdk8u
4
0
jprt.tools.default.release
=
${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
...
...
@@ -47,65 +47,65 @@ jprt.sync.push=false
# sparc etc.
# Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparcv9.jdk8u
2
0
=
solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk8u
4
0
=
solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7
=
solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u8
=
${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9
=
${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8u
2
0
=
solaris_x64_5.10
jprt.my.solaris.x64.jdk8u
4
0
=
solaris_x64_5.10
jprt.my.solaris.x64.jdk7
=
solaris_x64_5.10
jprt.my.solaris.x64.jdk7u8
=
${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64
=
${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8u
2
0
=
linux_i586_2.6
jprt.my.linux.i586.jdk8u
4
0
=
linux_i586_2.6
jprt.my.linux.i586.jdk7
=
linux_i586_2.6
jprt.my.linux.i586.jdk7u8
=
${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586
=
${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8u
2
0
=
linux_x64_2.6
jprt.my.linux.x64.jdk8u
4
0
=
linux_x64_2.6
jprt.my.linux.x64.jdk7
=
linux_x64_2.6
jprt.my.linux.x64.jdk7u8
=
${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64
=
${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8u
2
0
=
linux_ppc_2.6
jprt.my.linux.ppc.jdk8u
4
0
=
linux_ppc_2.6
jprt.my.linux.ppc.jdk7
=
linux_ppc_2.6
jprt.my.linux.ppc.jdk7u8
=
${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc
=
${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8u
2
0
=
linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk8u
4
0
=
linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7
=
linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u8
=
${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2
=
${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.armvfpsflt.jdk8u
2
0
=
linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt.jdk8u
4
0
=
linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt
=
${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfphflt.jdk8u
2
0
=
linux_armvfphflt_2.6
jprt.my.linux.armvfphflt.jdk8u
4
0
=
linux_armvfphflt_2.6
jprt.my.linux.armvfphflt
=
${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
# The ARM GP vfp-sflt build is not currently supported
#jprt.my.linux.armvs.jdk8u
2
0=linux_armvs_2.6
#jprt.my.linux.armvs.jdk8u
4
0=linux_armvs_2.6
#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
jprt.my.linux.armvh.jdk8u
2
0
=
linux_armvh_2.6
jprt.my.linux.armvh.jdk8u
4
0
=
linux_armvh_2.6
jprt.my.linux.armvh
=
${jprt.my.linux.armvh.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8u
2
0
=
linux_armsflt_2.6
jprt.my.linux.armsflt.jdk8u
4
0
=
linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7
=
linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u8
=
${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt
=
${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8u
2
0
=
macosx_x64_10.7
jprt.my.macosx.x64.jdk8u
4
0
=
macosx_x64_10.7
jprt.my.macosx.x64.jdk7
=
macosx_x64_10.7
jprt.my.macosx.x64.jdk7u8
=
${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64
=
${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8u
2
0
=
windows_i586_6.1
jprt.my.windows.i586.jdk8u
4
0
=
windows_i586_6.1
jprt.my.windows.i586.jdk7
=
windows_i586_6.1
jprt.my.windows.i586.jdk7u8
=
${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586
=
${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8u
2
0
=
windows_x64_6.1
jprt.my.windows.x64.jdk8u
4
0
=
windows_x64_6.1
jprt.my.windows.x64.jdk7
=
windows_x64_6.1
jprt.my.windows.x64.jdk7u8
=
${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64
=
${jprt.my.windows.x64.${jprt.tools.default.release}}
...
...
@@ -137,7 +137,7 @@ jprt.build.targets.embedded= \
jprt.build.targets.all
=
${jprt.build.targets.standard},
\
${jprt.build.targets.embedded}, ${jprt.build.targets.open}
jprt.build.targets.jdk8u
2
0
=
${jprt.build.targets.all}
jprt.build.targets.jdk8u
4
0
=
${jprt.build.targets.all}
jprt.build.targets.jdk7
=
${jprt.build.targets.all}
jprt.build.targets.jdk7u8
=
${jprt.build.targets.all}
jprt.build.targets
=
${jprt.build.targets.${jprt.tools.default.release}}
...
...
@@ -343,7 +343,7 @@ jprt.test.targets.embedded= \
${jprt.my.windows.i586.test.targets},
\
${jprt.my.windows.x64.test.targets}
jprt.test.targets.jdk8u
2
0
=
${jprt.test.targets.standard}
jprt.test.targets.jdk8u
4
0
=
${jprt.test.targets.standard}
jprt.test.targets.jdk7
=
${jprt.test.targets.standard}
jprt.test.targets.jdk7u8
=
${jprt.test.targets.jdk7}
jprt.test.targets
=
${jprt.test.targets.${jprt.tools.default.release}}
...
...
@@ -393,7 +393,7 @@ jprt.make.rule.test.targets.standard = \
jprt.make.rule.test.targets.embedded
=
\
${jprt.make.rule.test.targets.standard.client}
jprt.make.rule.test.targets.jdk8u
2
0
=
${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk8u
4
0
=
${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7
=
${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u8
=
${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets
=
${jprt.make.rule.test.targets.${jprt.tools.default.release}}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/classfile/verifier.cpp
浏览文件 @
f83c3096
...
...
@@ -2231,6 +2231,181 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
}
}
// Look at the method's handlers. If the bci is in the handler's try block
// then check if the handler_pc is already on the stack. If not, push it.
void
ClassVerifier
::
push_handlers
(
ExceptionTable
*
exhandlers
,
GrowableArray
<
u4
>*
handler_stack
,
u4
bci
)
{
int
exlength
=
exhandlers
->
length
();
for
(
int
x
=
0
;
x
<
exlength
;
x
++
)
{
if
(
bci
>=
exhandlers
->
start_pc
(
x
)
&&
bci
<
exhandlers
->
end_pc
(
x
))
{
handler_stack
->
append_if_missing
(
exhandlers
->
handler_pc
(
x
));
}
}
}
// Return TRUE if all code paths starting with start_bc_offset end in
// bytecode athrow or loop.
bool
ClassVerifier
::
ends_in_athrow
(
u4
start_bc_offset
)
{
ResourceMark
rm
;
// Create bytecode stream.
RawBytecodeStream
bcs
(
method
());
u4
code_length
=
method
()
->
code_size
();
bcs
.
set_start
(
start_bc_offset
);
u4
target
;
// Create stack for storing bytecode start offsets for if* and *switch.
GrowableArray
<
u4
>*
bci_stack
=
new
GrowableArray
<
u4
>
(
30
);
// Create stack for handlers for try blocks containing this handler.
GrowableArray
<
u4
>*
handler_stack
=
new
GrowableArray
<
u4
>
(
30
);
// Create list of visited branch opcodes (goto* and if*).
GrowableArray
<
u4
>*
visited_branches
=
new
GrowableArray
<
u4
>
(
30
);
ExceptionTable
exhandlers
(
_method
());
while
(
true
)
{
if
(
bcs
.
is_last_bytecode
())
{
// if no more starting offsets to parse or if at the end of the
// method then return false.
if
((
bci_stack
->
is_empty
())
||
((
u4
)
bcs
.
end_bci
()
==
code_length
))
return
false
;
// Pop a bytecode starting offset and scan from there.
bcs
.
set_start
(
bci_stack
->
pop
());
}
Bytecodes
::
Code
opcode
=
bcs
.
raw_next
();
u4
bci
=
bcs
.
bci
();
// If the bytecode is in a TRY block, push its handlers so they
// will get parsed.
push_handlers
(
&
exhandlers
,
handler_stack
,
bci
);
switch
(
opcode
)
{
case
Bytecodes
::
_if_icmpeq
:
case
Bytecodes
::
_if_icmpne
:
case
Bytecodes
::
_if_icmplt
:
case
Bytecodes
::
_if_icmpge
:
case
Bytecodes
::
_if_icmpgt
:
case
Bytecodes
::
_if_icmple
:
case
Bytecodes
::
_ifeq
:
case
Bytecodes
::
_ifne
:
case
Bytecodes
::
_iflt
:
case
Bytecodes
::
_ifge
:
case
Bytecodes
::
_ifgt
:
case
Bytecodes
::
_ifle
:
case
Bytecodes
::
_if_acmpeq
:
case
Bytecodes
::
_if_acmpne
:
case
Bytecodes
::
_ifnull
:
case
Bytecodes
::
_ifnonnull
:
target
=
bcs
.
dest
();
if
(
visited_branches
->
contains
(
bci
))
{
if
(
bci_stack
->
is_empty
())
return
true
;
// Pop a bytecode starting offset and scan from there.
bcs
.
set_start
(
bci_stack
->
pop
());
}
else
{
if
(
target
>
bci
)
{
// forward branch
if
(
target
>=
code_length
)
return
false
;
// Push the branch target onto the stack.
bci_stack
->
push
(
target
);
// then, scan bytecodes starting with next.
bcs
.
set_start
(
bcs
.
next_bci
());
}
else
{
// backward branch
// Push bytecode offset following backward branch onto the stack.
bci_stack
->
push
(
bcs
.
next_bci
());
// Check bytecodes starting with branch target.
bcs
.
set_start
(
target
);
}
// Record target so we don't branch here again.
visited_branches
->
append
(
bci
);
}
break
;
case
Bytecodes
::
_goto
:
case
Bytecodes
::
_goto_w
:
target
=
(
opcode
==
Bytecodes
::
_goto
?
bcs
.
dest
()
:
bcs
.
dest_w
());
if
(
visited_branches
->
contains
(
bci
))
{
if
(
bci_stack
->
is_empty
())
return
true
;
// Been here before, pop new starting offset from stack.
bcs
.
set_start
(
bci_stack
->
pop
());
}
else
{
if
(
target
>=
code_length
)
return
false
;
// Continue scanning from the target onward.
bcs
.
set_start
(
target
);
// Record target so we don't branch here again.
visited_branches
->
append
(
bci
);
}
break
;
// Check that all switch alternatives end in 'athrow' bytecodes. Since it
// is difficult to determine where each switch alternative ends, parse
// each switch alternative until either hit a 'return', 'athrow', or reach
// the end of the method's bytecodes. This is gross but should be okay
// because:
// 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
// constructor invocations should be rare.
// 2. if each switch alternative ends in an athrow then the parsing should be
// short. If there is no athrow then it is bogus code, anyway.
case
Bytecodes
::
_lookupswitch
:
case
Bytecodes
::
_tableswitch
:
{
address
aligned_bcp
=
(
address
)
round_to
((
intptr_t
)(
bcs
.
bcp
()
+
1
),
jintSize
);
u4
default_offset
=
Bytes
::
get_Java_u4
(
aligned_bcp
)
+
bci
;
int
keys
,
delta
;
if
(
opcode
==
Bytecodes
::
_tableswitch
)
{
jint
low
=
(
jint
)
Bytes
::
get_Java_u4
(
aligned_bcp
+
jintSize
);
jint
high
=
(
jint
)
Bytes
::
get_Java_u4
(
aligned_bcp
+
2
*
jintSize
);
// This is invalid, but let the regular bytecode verifier
// report this because the user will get a better error message.
if
(
low
>
high
)
return
true
;
keys
=
high
-
low
+
1
;
delta
=
1
;
}
else
{
keys
=
(
int
)
Bytes
::
get_Java_u4
(
aligned_bcp
+
jintSize
);
delta
=
2
;
}
// Invalid, let the regular bytecode verifier deal with it.
if
(
keys
<
0
)
return
true
;
// Push the offset of the next bytecode onto the stack.
bci_stack
->
push
(
bcs
.
next_bci
());
// Push the switch alternatives onto the stack.
for
(
int
i
=
0
;
i
<
keys
;
i
++
)
{
u4
target
=
bci
+
(
jint
)
Bytes
::
get_Java_u4
(
aligned_bcp
+
(
3
+
i
*
delta
)
*
jintSize
);
if
(
target
>
code_length
)
return
false
;
bci_stack
->
push
(
target
);
}
// Start bytecode parsing for the switch at the default alternative.
if
(
default_offset
>
code_length
)
return
false
;
bcs
.
set_start
(
default_offset
);
break
;
}
case
Bytecodes
::
_return
:
return
false
;
case
Bytecodes
::
_athrow
:
{
if
(
bci_stack
->
is_empty
())
{
if
(
handler_stack
->
is_empty
())
{
return
true
;
}
else
{
// Parse the catch handlers for try blocks containing athrow.
bcs
.
set_start
(
handler_stack
->
pop
());
}
}
else
{
// Pop a bytecode offset and starting scanning from there.
bcs
.
set_start
(
bci_stack
->
pop
());
}
}
break
;
default:
;
}
// end switch
}
// end while loop
return
false
;
}
void
ClassVerifier
::
verify_invoke_init
(
RawBytecodeStream
*
bcs
,
u2
ref_class_index
,
VerificationType
ref_class_type
,
StackMapFrame
*
current_frame
,
u4
code_length
,
bool
*
this_uninit
,
...
...
@@ -2250,18 +2425,26 @@ void ClassVerifier::verify_invoke_init(
return
;
}
// Make sure that this call is not done from within a TRY block because
// that can result in returning an incomplete object. Simply checking
// (bci >= start_pc) also ensures that this call is not done after a TRY
// block. That is also illegal because this call must be the first Java
// statement in the constructor.
// Check if this call is done from inside of a TRY block. If so, make
// sure that all catch clause paths end in a throw. Otherwise, this
// can result in returning an incomplete object.
ExceptionTable
exhandlers
(
_method
());
int
exlength
=
exhandlers
.
length
();
for
(
int
i
=
0
;
i
<
exlength
;
i
++
)
{
if
(
bci
>=
exhandlers
.
start_pc
(
i
))
{
verify_error
(
ErrorContext
::
bad_code
(
bci
),
"Bad <init> method call from after the start of a try block"
);
return
;
u2
start_pc
=
exhandlers
.
start_pc
(
i
);
u2
end_pc
=
exhandlers
.
end_pc
(
i
);
if
(
bci
>=
start_pc
&&
bci
<
end_pc
)
{
if
(
!
ends_in_athrow
(
exhandlers
.
handler_pc
(
i
)))
{
verify_error
(
ErrorContext
::
bad_code
(
bci
),
"Bad <init> method call from after the start of a try block"
);
return
;
}
else
if
(
VerboseVerification
)
{
ResourceMark
rm
;
tty
->
print_cr
(
"Survived call to ends_in_athrow(): %s"
,
current_class
()
->
name
()
->
as_C_string
());
}
}
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/classfile/verifier.hpp
浏览文件 @
f83c3096
...
...
@@ -30,6 +30,7 @@
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "runtime/handles.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/exceptions.hpp"
// The verifier class
...
...
@@ -303,6 +304,16 @@ class ClassVerifier : public StackObj {
StackMapFrame
*
current_frame
,
u4
code_length
,
bool
*
this_uninit
,
constantPoolHandle
cp
,
TRAPS
);
// Used by ends_in_athrow() to push all handlers that contain bci onto
// the handler_stack, if the handler is not already on the stack.
void
push_handlers
(
ExceptionTable
*
exhandlers
,
GrowableArray
<
u4
>*
handler_stack
,
u4
bci
);
// Returns true if all paths starting with start_bc_offset end in athrow
// bytecode or loop.
bool
ends_in_athrow
(
u4
start_bc_offset
);
void
verify_invoke_instructions
(
RawBytecodeStream
*
bcs
,
u4
code_length
,
StackMapFrame
*
current_frame
,
bool
*
this_uninit
,
VerificationType
return_type
,
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
f83c3096
...
...
@@ -1514,6 +1514,8 @@ bool CMSCollector::shouldConcurrentCollect() {
gclog_or_tty
->
print_cr
(
"cms_allocation_rate=%g"
,
stats
().
cms_allocation_rate
());
gclog_or_tty
->
print_cr
(
"occupancy=%3.7f"
,
_cmsGen
->
occupancy
());
gclog_or_tty
->
print_cr
(
"initiatingOccupancy=%3.7f"
,
_cmsGen
->
initiating_occupancy
());
gclog_or_tty
->
print_cr
(
"cms_time_since_begin=%3.7f"
,
stats
().
cms_time_since_begin
());
gclog_or_tty
->
print_cr
(
"cms_time_since_end=%3.7f"
,
stats
().
cms_time_since_end
());
gclog_or_tty
->
print_cr
(
"metadata initialized %d"
,
MetaspaceGC
::
should_concurrent_collect
());
}
...
...
@@ -1576,6 +1578,28 @@ bool CMSCollector::shouldConcurrentCollect() {
return
true
;
}
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
if
(
CMSTriggerInterval
>=
0
)
{
if
(
CMSTriggerInterval
==
0
)
{
// Trigger always
return
true
;
}
// Check the CMS time since begin (we do not check the stats validity
// as we want to be able to trigger the first CMS cycle as well)
if
(
stats
().
cms_time_since_begin
()
>=
(
CMSTriggerInterval
/
((
double
)
MILLIUNITS
)))
{
if
(
Verbose
&&
PrintGCDetails
)
{
if
(
stats
().
valid
())
{
gclog_or_tty
->
print_cr
(
"CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)"
,
stats
().
cms_time_since_begin
());
}
else
{
gclog_or_tty
->
print_cr
(
"CMSCollector: collect because of trigger interval (first collection)"
);
}
}
return
true
;
}
}
return
false
;
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
浏览文件 @
f83c3096
...
...
@@ -81,8 +81,8 @@ void ConcurrentG1Refine::reset_threshold_step() {
}
}
void
ConcurrentG1Refine
::
init
()
{
_hot_card_cache
.
initialize
();
void
ConcurrentG1Refine
::
init
(
G1RegionToSpaceMapper
*
card_counts_storage
)
{
_hot_card_cache
.
initialize
(
card_counts_storage
);
}
void
ConcurrentG1Refine
::
stop
()
{
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
浏览文件 @
f83c3096
...
...
@@ -34,6 +34,7 @@
class
ConcurrentG1RefineThread
;
class
G1CollectedHeap
;
class
G1HotCardCache
;
class
G1RegionToSpaceMapper
;
class
G1RemSet
;
class
DirtyCardQueue
;
...
...
@@ -74,7 +75,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
ConcurrentG1Refine
(
G1CollectedHeap
*
g1h
,
CardTableEntryClosure
*
refine_closure
);
~
ConcurrentG1Refine
();
void
init
(
);
// Accomplish some initialization that has to wait.
void
init
(
G1RegionToSpaceMapper
*
card_counts_storage
);
void
stop
();
void
reinitialize_threads
();
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
f83c3096
...
...
@@ -36,6 +36,7 @@
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
...
...
@@ -98,12 +99,12 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
}
#ifndef PRODUCT
bool
CMBitMapRO
::
covers
(
ReservedSpace
heap_rs
)
const
{
bool
CMBitMapRO
::
covers
(
MemRegion
heap_rs
)
const
{
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
assert
(((
size_t
)
_bm
.
size
()
*
((
size_t
)
1
<<
_shifter
))
==
_bmWordSize
,
"size inconsistency"
);
return
_bmStartWord
==
(
HeapWord
*
)(
heap_rs
.
base
())
&&
_bmWordSize
==
heap_rs
.
size
()
>>
LogHeapWordSize
;
return
_bmStartWord
==
(
HeapWord
*
)(
heap_rs
.
start
())
&&
_bmWordSize
==
heap_rs
.
word_size
()
;
}
#endif
...
...
@@ -111,33 +112,73 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
_bm
.
print_on_error
(
st
,
prefix
);
}
bool
CMBitMap
::
allocate
(
ReservedSpace
heap_rs
)
{
_bmStartWord
=
(
HeapWord
*
)(
heap_rs
.
base
());
_bmWordSize
=
heap_rs
.
size
()
/
HeapWordSize
;
// heap_rs.size() is in bytes
ReservedSpace
brs
(
ReservedSpace
::
allocation_align_size_up
(
(
_bmWordSize
>>
(
_shifter
+
LogBitsPerByte
))
+
1
));
if
(
!
brs
.
is_reserved
())
{
warning
(
"ConcurrentMark marking bit map allocation failure"
);
return
false
;
size_t
CMBitMap
::
compute_size
(
size_t
heap_size
)
{
return
heap_size
/
mark_distance
();
}
size_t
CMBitMap
::
mark_distance
()
{
return
MinObjAlignmentInBytes
*
BitsPerByte
;
}
void
CMBitMap
::
initialize
(
MemRegion
heap
,
G1RegionToSpaceMapper
*
storage
)
{
_bmStartWord
=
heap
.
start
();
_bmWordSize
=
heap
.
word_size
();
_bm
.
set_map
((
BitMap
::
bm_word_t
*
)
storage
->
reserved
().
start
());
_bm
.
set_size
(
_bmWordSize
>>
_shifter
);
storage
->
set_mapping_changed_listener
(
&
_listener
);
}
void
CMBitMapMappingChangedListener
::
on_commit
(
uint
start_region
,
size_t
num_regions
)
{
// We need to clear the bitmap on commit, removing any existing information.
MemRegion
mr
(
G1CollectedHeap
::
heap
()
->
bottom_addr_for_region
(
start_region
),
num_regions
*
HeapRegion
::
GrainWords
);
_bm
->
clearRange
(
mr
);
}
// Closure used for clearing the given mark bitmap.
class
ClearBitmapHRClosure
:
public
HeapRegionClosure
{
private:
ConcurrentMark
*
_cm
;
CMBitMap
*
_bitmap
;
bool
_may_yield
;
// The closure may yield during iteration. If yielded, abort the iteration.
public:
ClearBitmapHRClosure
(
ConcurrentMark
*
cm
,
CMBitMap
*
bitmap
,
bool
may_yield
)
:
HeapRegionClosure
(),
_cm
(
cm
),
_bitmap
(
bitmap
),
_may_yield
(
may_yield
)
{
assert
(
!
may_yield
||
cm
!=
NULL
,
"CM must be non-NULL if this closure is expected to yield."
);
}
MemTracker
::
record_virtual_memory_type
((
address
)
brs
.
base
(),
mtGC
);
// For now we'll just commit all of the bit map up front.
// Later on we'll try to be more parsimonious with swap.
if
(
!
_virtual_space
.
initialize
(
brs
,
brs
.
size
()))
{
warning
(
"ConcurrentMark marking bit map backing store failure"
);
virtual
bool
doHeapRegion
(
HeapRegion
*
r
)
{
size_t
const
chunk_size_in_words
=
M
/
HeapWordSize
;
HeapWord
*
cur
=
r
->
bottom
();
HeapWord
*
const
end
=
r
->
end
();
while
(
cur
<
end
)
{
MemRegion
mr
(
cur
,
MIN2
(
cur
+
chunk_size_in_words
,
end
));
_bitmap
->
clearRange
(
mr
);
cur
+=
chunk_size_in_words
;
// Abort iteration if after yielding the marking has been aborted.
if
(
_may_yield
&&
_cm
->
do_yield_check
()
&&
_cm
->
has_aborted
())
{
return
true
;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert
(
!
_may_yield
||
_cm
->
cmThread
()
->
during_cycle
(),
"invariant"
);
assert
(
!
_may_yield
||
!
G1CollectedHeap
::
heap
()
->
mark_in_progress
(),
"invariant"
);
}
return
false
;
}
assert
(
_virtual_space
.
committed_size
()
==
brs
.
size
(),
"didn't reserve backing store for all of concurrent marking bit map?"
);
_bm
.
set_map
((
BitMap
::
bm_word_t
*
)
_virtual_space
.
low
());
assert
(
_virtual_space
.
committed_size
()
<<
(
_shifter
+
LogBitsPerByte
)
>=
_bmWordSize
,
"inconsistency in bit map sizing"
);
_bm
.
set_size
(
_bmWordSize
>>
_shifter
);
return
true
;
}
};
void
CMBitMap
::
clearAll
()
{
_bm
.
clear
();
ClearBitmapHRClosure
cl
(
NULL
,
this
,
false
/* may_yield */
);
G1CollectedHeap
::
heap
()
->
heap_region_iterate
(
&
cl
);
guarantee
(
cl
.
complete
(),
"Must have completed iteration."
);
return
;
}
...
...
@@ -482,10 +523,10 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return
MAX2
((
n_par_threads
+
2
)
/
4
,
1U
);
}
ConcurrentMark
::
ConcurrentMark
(
G1CollectedHeap
*
g1h
,
ReservedSpace
heap_rs
)
:
ConcurrentMark
::
ConcurrentMark
(
G1CollectedHeap
*
g1h
,
G1RegionToSpaceMapper
*
prev_bitmap_storage
,
G1RegionToSpaceMapper
*
next_bitmap_storage
)
:
_g1h
(
g1h
),
_markBitMap1
(
log2_intptr
(
MinObjAlignment
)
),
_markBitMap2
(
log2_intptr
(
MinObjAlignment
)
),
_markBitMap1
(),
_markBitMap2
(),
_parallel_marking_threads
(
0
),
_max_parallel_marking_threads
(
0
),
_sleep_factor
(
0.0
),
...
...
@@ -494,7 +535,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_cleanup_task_overhead
(
1.0
),
_cleanup_list
(
"Cleanup List"
),
_region_bm
((
BitMap
::
idx_t
)(
g1h
->
max_regions
()),
false
/* in_resource_area*/
),
_card_bm
((
heap_rs
.
size
()
+
CardTableModRefBS
::
card_size
-
1
)
>>
_card_bm
((
g1h
->
reserved_region
().
byte_
size
()
+
CardTableModRefBS
::
card_size
-
1
)
>>
CardTableModRefBS
::
card_shift
,
false
/* in_resource_area*/
),
...
...
@@ -544,14 +585,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
"heap end = "
INTPTR_FORMAT
,
p2i
(
_heap_start
),
p2i
(
_heap_end
));
}
if
(
!
_markBitMap1
.
allocate
(
heap_rs
))
{
warning
(
"Failed to allocate first CM bit map"
);
return
;
}
if
(
!
_markBitMap2
.
allocate
(
heap_rs
))
{
warning
(
"Failed to allocate second CM bit map"
);
return
;
}
_markBitMap1
.
initialize
(
g1h
->
reserved_region
(),
prev_bitmap_storage
);
_markBitMap2
.
initialize
(
g1h
->
reserved_region
(),
next_bitmap_storage
);
// Create & start a ConcurrentMark thread.
_cmThread
=
new
ConcurrentMarkThread
(
this
);
...
...
@@ -562,8 +597,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
}
assert
(
CGC_lock
!=
NULL
,
"Where's the CGC_lock?"
);
assert
(
_markBitMap1
.
covers
(
heap_rs
),
"_markBitMap1 inconsistency"
);
assert
(
_markBitMap2
.
covers
(
heap_rs
),
"_markBitMap2 inconsistency"
);
assert
(
_markBitMap1
.
covers
(
g1h
->
reserved_region
()
),
"_markBitMap1 inconsistency"
);
assert
(
_markBitMap2
.
covers
(
g1h
->
reserved_region
()
),
"_markBitMap2 inconsistency"
);
SATBMarkQueueSet
&
satb_qs
=
JavaThread
::
satb_mark_queue_set
();
satb_qs
.
set_buffer_size
(
G1SATBBufferSize
);
...
...
@@ -723,38 +758,17 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
clear_all_count_data
();
// so that the call below can read a sensible value
_heap_start
=
(
HeapWord
*
)
heap_rs
.
base
();
_heap_start
=
g1h
->
reserved_region
().
start
();
set_non_marking_state
();
_completed_initialization
=
true
;
}
void
ConcurrentMark
::
update_g1_committed
(
bool
force
)
{
// If concurrent marking is not in progress, then we do not need to
// update _heap_end.
if
(
!
concurrent_marking_in_progress
()
&&
!
force
)
return
;
MemRegion
committed
=
_g1h
->
g1_committed
();
assert
(
committed
.
start
()
==
_heap_start
,
"start shouldn't change"
);
HeapWord
*
new_end
=
committed
.
end
();
if
(
new_end
>
_heap_end
)
{
// The heap has been expanded.
_heap_end
=
new_end
;
}
// Notice that the heap can also shrink. However, this only happens
// during a Full GC (at least currently) and the entire marking
// phase will bail out and the task will not be restarted. So, let's
// do nothing.
}
void
ConcurrentMark
::
reset
()
{
// Starting values for these two. This should be called in a STW
// phase. CM will be notified of any future g1_committed expansions
// will be at the end of evacuation pauses, when tasks are
// inactive.
MemRegion
committed
=
_g1h
->
g1_committed
();
_heap_start
=
committed
.
start
();
_heap_end
=
committed
.
end
();
// phase.
MemRegion
reserved
=
_g1h
->
g1_reserved
();
_heap_start
=
reserved
.
start
();
_heap_end
=
reserved
.
end
();
// Separated the asserts so that we know which one fires.
assert
(
_heap_start
!=
NULL
,
"heap bounds should look ok"
);
...
...
@@ -826,7 +840,6 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
assert
(
out_of_regions
(),
err_msg
(
"only way to get here: _finger: "
PTR_FORMAT
", _heap_end: "
PTR_FORMAT
,
p2i
(
_finger
),
p2i
(
_heap_end
)));
update_g1_committed
(
true
);
}
}
...
...
@@ -845,7 +858,6 @@ ConcurrentMark::~ConcurrentMark() {
void
ConcurrentMark
::
clearNextBitmap
()
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectorPolicy
*
g1p
=
g1h
->
g1_policy
();
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
...
...
@@ -857,41 +869,36 @@ void ConcurrentMark::clearNextBitmap() {
// is the case.
guarantee
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
// clear the mark bitmap (no grey objects to start with).
// We need to do this in chunks and offer to yield in between
// each chunk.
HeapWord
*
start
=
_nextMarkBitMap
->
startWord
();
HeapWord
*
end
=
_nextMarkBitMap
->
endWord
();
HeapWord
*
cur
=
start
;
size_t
chunkSize
=
M
;
while
(
cur
<
end
)
{
HeapWord
*
next
=
cur
+
chunkSize
;
if
(
next
>
end
)
{
next
=
end
;
}
MemRegion
mr
(
cur
,
next
);
_nextMarkBitMap
->
clearRange
(
mr
);
cur
=
next
;
do_yield_check
();
ClearBitmapHRClosure
cl
(
this
,
_nextMarkBitMap
,
true
/* may_yield */
);
g1h
->
heap_region_iterate
(
&
cl
);
// Repeat the asserts from above. We'll do them as asserts here to
// minimize their overhead on the product. However, we'll have
// them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert
(
cmThread
()
->
during_cycle
(),
"invariant"
);
assert
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.
if
(
cl
.
complete
())
{
clear_all_count_data
();
}
// Clear the liveness counting data
clear_all_count_data
();
// Repeat the asserts from above.
guarantee
(
cmThread
()
->
during_cycle
(),
"invariant"
);
guarantee
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
}
class
CheckBitmapClearHRClosure
:
public
HeapRegionClosure
{
CMBitMap
*
_bitmap
;
bool
_error
;
public:
CheckBitmapClearHRClosure
(
CMBitMap
*
bitmap
)
:
_bitmap
(
bitmap
)
{
}
virtual
bool
doHeapRegion
(
HeapRegion
*
r
)
{
return
_bitmap
->
getNextMarkedWordAddress
(
r
->
bottom
(),
r
->
end
())
!=
r
->
end
();
}
};
bool
ConcurrentMark
::
nextMarkBitmapIsClear
()
{
return
_nextMarkBitMap
->
getNextMarkedWordAddress
(
_heap_start
,
_heap_end
)
==
_heap_end
;
CheckBitmapClearHRClosure
cl
(
_nextMarkBitMap
);
_g1h
->
heap_region_iterate
(
&
cl
);
return
cl
.
complete
();
}
class
NoteStartOfMarkHRClosure
:
public
HeapRegionClosure
{
...
...
@@ -2191,10 +2198,10 @@ void ConcurrentMark::completeCleanup() {
_cleanup_list
.
length
());
}
// Noone else should be accessing the _cleanup_list at this point,
// so it
'
s not necessary to take any locks
// No
one else should be accessing the _cleanup_list at this point,
// so it
i
s not necessary to take any locks
while
(
!
_cleanup_list
.
is_empty
())
{
HeapRegion
*
hr
=
_cleanup_list
.
remove_
head
(
);
HeapRegion
*
hr
=
_cleanup_list
.
remove_
region
(
true
/* from_head */
);
assert
(
hr
!=
NULL
,
"Got NULL from a non-empty list"
);
hr
->
par_clear
();
tmp_free_list
.
add_ordered
(
hr
);
...
...
@@ -2800,7 +2807,6 @@ public:
str
=
" O"
;
}
else
{
HeapRegion
*
hr
=
_g1h
->
heap_region_containing
(
obj
);
guarantee
(
hr
!=
NULL
,
"invariant"
);
bool
over_tams
=
_g1h
->
allocated_since_marking
(
obj
,
hr
,
_vo
);
bool
marked
=
_g1h
->
is_marked
(
obj
,
_vo
);
...
...
@@ -2979,22 +2985,25 @@ ConcurrentMark::claim_region(uint worker_id) {
// claim_region() and a humongous object allocation might force us
// to do a bit of unnecessary work (due to some unnecessary bitmap
// iterations) but it should not introduce and correctness issues.
HeapRegion
*
curr_region
=
_g1h
->
heap_region_containing_raw
(
finger
);
HeapWord
*
bottom
=
curr_region
->
bottom
();
HeapWord
*
end
=
curr_region
->
end
();
HeapWord
*
limit
=
curr_region
->
next_top_at_mark_start
();
if
(
verbose_low
())
{
gclog_or_tty
->
print_cr
(
"[%u] curr_region = "
PTR_FORMAT
" "
"["
PTR_FORMAT
", "
PTR_FORMAT
"), "
"limit = "
PTR_FORMAT
,
worker_id
,
p2i
(
curr_region
),
p2i
(
bottom
),
p2i
(
end
),
p2i
(
limit
));
}
HeapRegion
*
curr_region
=
_g1h
->
heap_region_containing_raw
(
finger
);
// Above heap_region_containing_raw may return NULL as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
HeapWord
*
end
=
curr_region
!=
NULL
?
curr_region
->
end
()
:
finger
+
HeapRegion
::
GrainWords
;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord
*
res
=
(
HeapWord
*
)
Atomic
::
cmpxchg_ptr
(
end
,
&
_finger
,
finger
);
if
(
res
==
finger
)
{
if
(
res
==
finger
&&
curr_region
!=
NULL
)
{
// we succeeded
HeapWord
*
bottom
=
curr_region
->
bottom
();
HeapWord
*
limit
=
curr_region
->
next_top_at_mark_start
();
if
(
verbose_low
())
{
gclog_or_tty
->
print_cr
(
"[%u] curr_region = "
PTR_FORMAT
" "
"["
PTR_FORMAT
", "
PTR_FORMAT
"), "
"limit = "
PTR_FORMAT
,
worker_id
,
p2i
(
curr_region
),
p2i
(
bottom
),
p2i
(
end
),
p2i
(
limit
));
}
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
...
...
@@ -3025,10 +3034,17 @@ ConcurrentMark::claim_region(uint worker_id) {
}
else
{
assert
(
_finger
>
finger
,
"the finger should have moved forward"
);
if
(
verbose_low
())
{
gclog_or_tty
->
print_cr
(
"[%u] somebody else moved the finger, "
"global finger = "
PTR_FORMAT
", "
"our finger = "
PTR_FORMAT
,
worker_id
,
p2i
(
_finger
),
p2i
(
finger
));
if
(
curr_region
==
NULL
)
{
gclog_or_tty
->
print_cr
(
"[%u] found uncommitted region, moving finger, "
"global finger = "
PTR_FORMAT
", "
"our finger = "
PTR_FORMAT
,
worker_id
,
p2i
(
_finger
),
p2i
(
finger
));
}
else
{
gclog_or_tty
->
print_cr
(
"[%u] somebody else moved the finger, "
"global finger = "
PTR_FORMAT
", "
"our finger = "
PTR_FORMAT
,
worker_id
,
p2i
(
_finger
),
p2i
(
finger
));
}
}
// read it again
...
...
@@ -3143,8 +3159,10 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
// Since we always iterate over all regions, we might get a NULL HeapRegion
// here.
HeapRegion
*
global_hr
=
_g1h
->
heap_region_containing_raw
(
global_finger
);
guarantee
(
global_finger
==
global_hr
->
bottom
(),
guarantee
(
global_
hr
==
NULL
||
global_
finger
==
global_hr
->
bottom
(),
err_msg
(
"global finger: "
PTR_FORMAT
" region: "
HR_FORMAT
,
p2i
(
global_finger
),
HR_FORMAT_PARAMS
(
global_hr
)));
}
...
...
@@ -3157,7 +3175,7 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
if
(
task_finger
!=
NULL
&&
task_finger
<
_heap_end
)
{
// See above note on the global finger verification.
HeapRegion
*
task_hr
=
_g1h
->
heap_region_containing_raw
(
task_finger
);
guarantee
(
task_finger
==
task_hr
->
bottom
()
||
guarantee
(
task_
hr
==
NULL
||
task_
finger
==
task_hr
->
bottom
()
||
!
task_hr
->
in_collection_set
(),
err_msg
(
"task finger: "
PTR_FORMAT
" region: "
HR_FORMAT
,
p2i
(
task_finger
),
HR_FORMAT_PARAMS
(
task_hr
)));
...
...
@@ -3565,9 +3583,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
}
void
CMTask
::
setup_for_region
(
HeapRegion
*
hr
)
{
// Separated the asserts so that we know which one fires.
assert
(
hr
!=
NULL
,
"claim_region() should have filtered out
continues humongous
regions"
);
"claim_region() should have filtered out
NULL
regions"
);
assert
(
!
hr
->
continuesHumongous
(),
"claim_region() should have filtered out continues humongous regions"
);
...
...
@@ -4674,7 +4691,6 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_hum_prev_live_bytes
(
0
),
_hum_next_live_bytes
(
0
),
_total_remset_bytes
(
0
),
_total_strong_code_roots_bytes
(
0
)
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
MemRegion
g1_committed
=
g1h
->
g1_committed
();
MemRegion
g1_reserved
=
g1h
->
g1_reserved
();
double
now
=
os
::
elapsedTime
();
...
...
@@ -4682,10 +4698,8 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_out
->
cr
();
_out
->
print_cr
(
G1PPRL_LINE_PREFIX
" PHASE %s @ %1.3f"
,
phase_name
,
now
);
_out
->
print_cr
(
G1PPRL_LINE_PREFIX
" HEAP"
G1PPRL_SUM_ADDR_FORMAT
(
"committed"
)
G1PPRL_SUM_ADDR_FORMAT
(
"reserved"
)
G1PPRL_SUM_BYTE_FORMAT
(
"region-size"
),
p2i
(
g1_committed
.
start
()),
p2i
(
g1_committed
.
end
()),
p2i
(
g1_reserved
.
start
()),
p2i
(
g1_reserved
.
end
()),
HeapRegion
::
GrainBytes
);
_out
->
print_cr
(
G1PPRL_LINE_PREFIX
);
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/concurrentMark.hpp
浏览文件 @
f83c3096
...
...
@@ -27,10 +27,12 @@
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "utilities/taskqueue.hpp"
class
G1CollectedHeap
;
class
CMBitMap
;
class
CMTask
;
typedef
GenericTaskQueue
<
oop
,
mtGC
>
CMTaskQueue
;
typedef
GenericTaskQueueSet
<
CMTaskQueue
,
mtGC
>
CMTaskQueueSet
;
...
...
@@ -57,7 +59,6 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
HeapWord
*
_bmStartWord
;
// base address of range covered by map
size_t
_bmWordSize
;
// map size (in #HeapWords covered)
const
int
_shifter
;
// map to char or bit
VirtualSpace
_virtual_space
;
// underlying the bit map
BitMap
_bm
;
// the bit map itself
public:
...
...
@@ -115,42 +116,41 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
void
print_on_error
(
outputStream
*
st
,
const
char
*
prefix
)
const
;
// debugging
NOT_PRODUCT
(
bool
covers
(
ReservedSpace
rs
)
const
;)
NOT_PRODUCT
(
bool
covers
(
MemRegion
rs
)
const
;)
};
class
CMBitMapMappingChangedListener
:
public
G1MappingChangedListener
{
private:
CMBitMap
*
_bm
;
public:
CMBitMapMappingChangedListener
()
:
_bm
(
NULL
)
{}
void
set_bitmap
(
CMBitMap
*
bm
)
{
_bm
=
bm
;
}
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
);
};
class
CMBitMap
:
public
CMBitMapRO
{
private:
CMBitMapMappingChangedListener
_listener
;
public:
// constructor
CMBitMap
(
int
shifter
)
:
CMBitMapRO
(
shifter
)
{}
static
size_t
compute_size
(
size_t
heap_size
);
// Returns the amount of bytes on the heap between two marks in the bitmap.
static
size_t
mark_distance
();
// Allocates the back store for the marking bitmap
bool
allocate
(
ReservedSpace
heap_rs
);
CMBitMap
()
:
CMBitMapRO
(
LogMinObjAlignment
),
_listener
()
{
_listener
.
set_bitmap
(
this
);
}
// Initializes the underlying BitMap to cover the given area.
void
initialize
(
MemRegion
heap
,
G1RegionToSpaceMapper
*
storage
);
// Write marks.
inline
void
mark
(
HeapWord
*
addr
);
inline
void
clear
(
HeapWord
*
addr
);
inline
bool
parMark
(
HeapWord
*
addr
);
inline
bool
parClear
(
HeapWord
*
addr
);
// write marks
void
mark
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
_bm
.
set_bit
(
heapWordToOffset
(
addr
));
}
void
clear
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
_bm
.
clear_bit
(
heapWordToOffset
(
addr
));
}
bool
parMark
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
return
_bm
.
par_set_bit
(
heapWordToOffset
(
addr
));
}
bool
parClear
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
return
_bm
.
par_clear_bit
(
heapWordToOffset
(
addr
));
}
void
markRange
(
MemRegion
mr
);
void
clearAll
();
void
clearRange
(
MemRegion
mr
);
// Starting at the bit corresponding to "addr" (inclusive), find the next
...
...
@@ -161,6 +161,9 @@ class CMBitMap : public CMBitMapRO {
// the run. If there is no "1" bit at or after "addr", return an empty
// MemRegion.
MemRegion
getAndClearMarkedRegion
(
HeapWord
*
addr
,
HeapWord
*
end_addr
);
// Clear the whole mark bitmap.
void
clearAll
();
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
...
...
@@ -680,7 +683,7 @@ public:
return
_task_queues
->
steal
(
worker_id
,
hash_seed
,
obj
);
}
ConcurrentMark
(
G1CollectedHeap
*
g1h
,
ReservedSpace
heap_rs
);
ConcurrentMark
(
G1CollectedHeap
*
g1h
,
G1RegionToSpaceMapper
*
prev_bitmap_storage
,
G1RegionToSpaceMapper
*
next_bitmap_storage
);
~
ConcurrentMark
();
ConcurrentMarkThread
*
cmThread
()
{
return
_cmThread
;
}
...
...
@@ -736,7 +739,8 @@ public:
// Clear the next marking bitmap (will be called concurrently).
void
clearNextBitmap
();
// Return whether the next mark bitmap has no marks set.
// Return whether the next mark bitmap has no marks set. To be used for assertions
// only. Will not yield to pause requests.
bool
nextMarkBitmapIsClear
();
// These two do the work that needs to be done before and after the
...
...
@@ -794,12 +798,6 @@ public:
bool
verify_thread_buffers
,
bool
verify_fingers
)
PRODUCT_RETURN
;
// It is called at the end of an evacuation pause during marking so
// that CM is notified of where the new end of the heap is. It
// doesn't do anything if concurrent_marking_in_progress() is false,
// unless the force parameter is true.
void
update_g1_committed
(
bool
force
=
false
);
bool
isMarked
(
oop
p
)
const
{
assert
(
p
!=
NULL
&&
p
->
is_oop
(),
"expected an oop"
);
HeapWord
*
addr
=
(
HeapWord
*
)
p
;
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -268,6 +268,36 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
return
iterate
(
cl
,
mr
);
}
#define check_mark(addr) \
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
"outside underlying space?"); \
assert(G1CollectedHeap::heap()->is_in_exact(addr), \
err_msg("Trying to access not available bitmap "PTR_FORMAT \
" corresponding to "PTR_FORMAT" (%u)", \
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
inline
void
CMBitMap
::
mark
(
HeapWord
*
addr
)
{
check_mark
(
addr
);
_bm
.
set_bit
(
heapWordToOffset
(
addr
));
}
inline
void
CMBitMap
::
clear
(
HeapWord
*
addr
)
{
check_mark
(
addr
);
_bm
.
clear_bit
(
heapWordToOffset
(
addr
));
}
inline
bool
CMBitMap
::
parMark
(
HeapWord
*
addr
)
{
check_mark
(
addr
);
return
_bm
.
par_set_bit
(
heapWordToOffset
(
addr
));
}
inline
bool
CMBitMap
::
parClear
(
HeapWord
*
addr
)
{
check_mark
(
addr
);
return
_bm
.
par_clear_bit
(
heapWordToOffset
(
addr
));
}
#undef check_mark
inline
void
CMTask
::
push
(
oop
obj
)
{
HeapWord
*
objAddr
=
(
HeapWord
*
)
obj
;
assert
(
_g1h
->
is_in_g1_reserved
(
objAddr
),
"invariant"
);
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
浏览文件 @
f83c3096
...
...
@@ -173,7 +173,7 @@ public:
// Should be called when we want to release the active region which
// is returned after it's been retired.
HeapRegion
*
release
();
virtual
HeapRegion
*
release
();
#if G1_ALLOC_REGION_TRACING
void
trace
(
const
char
*
str
,
size_t
word_size
=
0
,
HeapWord
*
result
=
NULL
);
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
浏览文件 @
f83c3096
...
...
@@ -32,64 +32,37 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void
G1BlockOffsetSharedArrayMappingChangedListener
::
on_commit
(
uint
start_idx
,
size_t
num_regions
)
{
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
// retrieve it here since this would cause firing of several asserts. The code
// executed after commit of a region already needs to do some re-initialization of
// the HeapRegion, so we combine that.
}
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetSharedArray
//////////////////////////////////////////////////////////////////////
G1BlockOffsetSharedArray
::
G1BlockOffsetSharedArray
(
MemRegion
reserved
,
size_t
init_word_size
)
:
_reserved
(
reserved
),
_end
(
NULL
)
{
size_t
size
=
compute_size
(
reserved
.
word_size
());
ReservedSpace
rs
(
ReservedSpace
::
allocation_align_size_up
(
size
));
if
(
!
rs
.
is_reserved
())
{
vm_exit_during_initialization
(
"Could not reserve enough space for heap offset array"
);
}
if
(
!
_vs
.
initialize
(
rs
,
0
))
{
vm_exit_during_initialization
(
"Could not reserve enough space for heap offset array"
);
}
G1BlockOffsetSharedArray
::
G1BlockOffsetSharedArray
(
MemRegion
heap
,
G1RegionToSpaceMapper
*
storage
)
:
_reserved
(),
_end
(
NULL
),
_listener
(),
_offset_array
(
NULL
)
{
_reserved
=
heap
;
_end
=
NULL
;
MemRegion
bot_reserved
=
storage
->
reserved
();
MemTracker
::
record_virtual_memory_type
((
address
)
rs
.
base
(),
mtGC
);
_offset_array
=
(
u_char
*
)
bot_reserved
.
start
();
_end
=
_reserved
.
end
();
storage
->
set_mapping_changed_listener
(
&
_listener
);
_offset_array
=
(
u_char
*
)
_vs
.
low_boundary
();
resize
(
init_word_size
);
if
(
TraceBlockOffsetTable
)
{
gclog_or_tty
->
print_cr
(
"G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "
);
gclog_or_tty
->
print_cr
(
" "
" rs.base(): "
INTPTR_FORMAT
" rs.size(): "
INTPTR_FORMAT
" rs end(): "
INTPTR_FORMAT
,
rs
.
base
(),
rs
.
size
(),
rs
.
base
()
+
rs
.
size
());
gclog_or_tty
->
print_cr
(
" "
" _vs.low_boundary(): "
INTPTR_FORMAT
" _vs.high_boundary(): "
INTPTR_FORMAT
,
_vs
.
low_boundary
(),
_vs
.
high_boundary
());
}
}
void
G1BlockOffsetSharedArray
::
resize
(
size_t
new_word_size
)
{
assert
(
new_word_size
<=
_reserved
.
word_size
(),
"Resize larger than reserved"
);
size_t
new_size
=
compute_size
(
new_word_size
);
size_t
old_size
=
_vs
.
committed_size
();
size_t
delta
;
char
*
high
=
_vs
.
high
();
_end
=
_reserved
.
start
()
+
new_word_size
;
if
(
new_size
>
old_size
)
{
delta
=
ReservedSpace
::
page_align_size_up
(
new_size
-
old_size
);
assert
(
delta
>
0
,
"just checking"
);
if
(
!
_vs
.
expand_by
(
delta
))
{
// Do better than this for Merlin
vm_exit_out_of_memory
(
delta
,
OOM_MMAP_ERROR
,
"offset table expansion"
);
}
assert
(
_vs
.
high
()
==
high
+
delta
,
"invalid expansion"
);
// Initialization of the contents is left to the
// G1BlockOffsetArray that uses it.
}
else
{
delta
=
ReservedSpace
::
page_align_size_down
(
old_size
-
new_size
);
if
(
delta
==
0
)
return
;
_vs
.
shrink_by
(
delta
);
assert
(
_vs
.
high
()
==
high
-
delta
,
"invalid expansion"
);
bot_reserved
.
start
(),
bot_reserved
.
byte_size
(),
bot_reserved
.
end
());
}
}
...
...
@@ -100,18 +73,7 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
}
void
G1BlockOffsetSharedArray
::
set_offset_array
(
HeapWord
*
left
,
HeapWord
*
right
,
u_char
offset
)
{
check_index
(
index_for
(
right
-
1
),
"right address out of range"
);
assert
(
left
<
right
,
"Heap addresses out of order"
);
size_t
num_cards
=
pointer_delta
(
right
,
left
)
>>
LogN_words
;
if
(
UseMemSetInBOT
)
{
memset
(
&
_offset_array
[
index_for
(
left
)],
offset
,
num_cards
);
}
else
{
size_t
i
=
index_for
(
left
);
const
size_t
end
=
i
+
num_cards
;
for
(;
i
<
end
;
i
++
)
{
_offset_array
[
i
]
=
offset
;
}
}
set_offset_array
(
index_for
(
left
),
index_for
(
right
-
1
),
offset
);
}
//////////////////////////////////////////////////////////////////////
...
...
@@ -651,6 +613,25 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
_next_offset_index
=
0
;
}
HeapWord
*
G1BlockOffsetArrayContigSpace
::
initialize_threshold_raw
()
{
assert
(
!
Universe
::
heap
()
->
is_in_reserved
(
_array
->
_offset_array
),
"just checking"
);
_next_offset_index
=
_array
->
index_for_raw
(
_bottom
);
_next_offset_index
++
;
_next_offset_threshold
=
_array
->
address_for_index_raw
(
_next_offset_index
);
return
_next_offset_threshold
;
}
void
G1BlockOffsetArrayContigSpace
::
zero_bottom_entry_raw
()
{
assert
(
!
Universe
::
heap
()
->
is_in_reserved
(
_array
->
_offset_array
),
"just checking"
);
size_t
bottom_index
=
_array
->
index_for_raw
(
_bottom
);
assert
(
_array
->
address_for_index_raw
(
bottom_index
)
==
_bottom
,
"Precondition of call"
);
_array
->
set_offset_array_raw
(
bottom_index
,
0
);
}
HeapWord
*
G1BlockOffsetArrayContigSpace
::
initialize_threshold
()
{
assert
(
!
Universe
::
heap
()
->
is_in_reserved
(
_array
->
_offset_array
),
"just checking"
);
...
...
@@ -675,8 +656,7 @@ G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
assert
(
new_top
<=
_end
,
"_end should have already been updated"
);
// The first BOT entry should have offset 0.
zero_bottom_entry
();
initialize_threshold
();
reset_bot
();
alloc_block
(
_bottom
,
new_top
);
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
浏览文件 @
f83c3096
...
...
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/memRegion.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
...
...
@@ -106,6 +107,11 @@ public:
inline
HeapWord
*
block_start_const
(
const
void
*
addr
)
const
;
};
class
G1BlockOffsetSharedArrayMappingChangedListener
:
public
G1MappingChangedListener
{
public:
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
);
};
// This implementation of "G1BlockOffsetTable" divides the covered region
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
// for each such subregion indicates how far back one must go to find the
...
...
@@ -125,6 +131,7 @@ class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
friend
class
VMStructs
;
private:
G1BlockOffsetSharedArrayMappingChangedListener
_listener
;
// The reserved region covered by the shared array.
MemRegion
_reserved
;
...
...
@@ -133,16 +140,8 @@ private:
// Array for keeping offsets for retrieving object start fast given an
// address.
VirtualSpace
_vs
;
u_char
*
_offset_array
;
// byte array keeping backwards offsets
void
check_index
(
size_t
index
,
const
char
*
msg
)
const
{
assert
(
index
<
_vs
.
committed_size
(),
err_msg
(
"%s - "
"index: "
SIZE_FORMAT
", _vs.committed_size: "
SIZE_FORMAT
,
msg
,
index
,
_vs
.
committed_size
()));
}
void
check_offset
(
size_t
offset
,
const
char
*
msg
)
const
{
assert
(
offset
<=
N_words
,
err_msg
(
"%s - "
...
...
@@ -152,63 +151,33 @@ private:
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
u_char
offset_array
(
size_t
index
)
const
{
check_index
(
index
,
"index out of range"
);
return
_offset_array
[
index
];
}
inline
u_char
offset_array
(
size_t
index
)
const
;
void
set_offset_array
(
HeapWord
*
left
,
HeapWord
*
right
,
u_char
offset
);
void
set_offset_array
(
size_t
index
,
u_char
offset
)
{
check_index
(
index
,
"index out of range"
);
check_offset
(
offset
,
"offset too large"
);
void
set_offset_array_raw
(
size_t
index
,
u_char
offset
)
{
_offset_array
[
index
]
=
offset
;
}
void
set_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
{
check_index
(
index
,
"index out of range"
);
assert
(
high
>=
low
,
"addresses out of order"
);
check_offset
(
pointer_delta
(
high
,
low
),
"offset too large"
);
_offset_array
[
index
]
=
(
u_char
)
pointer_delta
(
high
,
low
);
}
inline
void
set_offset_array
(
size_t
index
,
u_char
offset
);
void
set_offset_array
(
size_t
left
,
size_t
right
,
u_char
offset
)
{
check_index
(
right
,
"right index out of range"
);
assert
(
left
<=
right
,
"indexes out of order"
);
size_t
num_cards
=
right
-
left
+
1
;
if
(
UseMemSetInBOT
)
{
memset
(
&
_offset_array
[
left
],
offset
,
num_cards
);
}
else
{
size_t
i
=
left
;
const
size_t
end
=
i
+
num_cards
;
for
(;
i
<
end
;
i
++
)
{
_offset_array
[
i
]
=
offset
;
}
}
}
inline
void
set_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
);
void
check_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
const
{
check_index
(
index
,
"index out of range"
);
assert
(
high
>=
low
,
"addresses out of order"
);
check_offset
(
pointer_delta
(
high
,
low
),
"offset too large"
);
assert
(
_offset_array
[
index
]
==
pointer_delta
(
high
,
low
),
"Wrong offset"
);
}
inline
void
set_offset_array
(
size_t
left
,
size_t
right
,
u_char
offset
);
inline
void
check_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
const
;
bool
is_card_boundary
(
HeapWord
*
p
)
const
;
public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
// We always add an extra slot because if an object
// ends on a card boundary we put a 0 in the next
// offset array slot, so we want that slot always
// to be reserved.
size_t
compute_size
(
size_t
mem_region_words
)
{
size_t
number_of_slots
=
(
mem_region_words
/
N_words
)
+
1
;
return
ReservedSpace
::
page_align_size_up
(
number_of_slots
);
static
size_t
compute_size
(
size_t
mem_region_words
)
{
size_t
number_of_slots
=
(
mem_region_words
/
N_words
);
return
ReservedSpace
::
allocation_align_size_up
(
number_of_slots
);
}
public:
enum
SomePublicConstants
{
LogN
=
9
,
LogN_words
=
LogN
-
LogHeapWordSize
,
...
...
@@ -222,25 +191,21 @@ public:
// least "init_word_size".) The contents of the initial table are
// undefined; it is the responsibility of the constituent
// G1BlockOffsetTable(s) to initialize cards.
G1BlockOffsetSharedArray
(
MemRegion
reserved
,
size_t
init_word_size
);
// Notes a change in the committed size of the region covered by the
// table. The "new_word_size" may not be larger than the size of the
// reserved region this table covers.
void
resize
(
size_t
new_word_size
);
G1BlockOffsetSharedArray
(
MemRegion
heap
,
G1RegionToSpaceMapper
*
storage
);
void
set_bottom
(
HeapWord
*
new_bottom
);
// Updates all the BlockOffsetArray's sharing this shared array to
// reflect the current "top"'s of their spaces.
void
update_offset_arrays
();
// Return the appropriate index into "_offset_array" for "p".
inline
size_t
index_for
(
const
void
*
p
)
const
;
inline
size_t
index_for_raw
(
const
void
*
p
)
const
;
// Return the address indicating the start of the region corresponding to
// "index" in "_offset_array".
inline
HeapWord
*
address_for_index
(
size_t
index
)
const
;
// Variant of address_for_index that does not check the index for validity.
inline
HeapWord
*
address_for_index_raw
(
size_t
index
)
const
{
return
_reserved
.
start
()
+
(
index
<<
LogN_words
);
}
};
// And here is the G1BlockOffsetTable subtype that uses the array.
...
...
@@ -480,6 +445,14 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
blk_start
,
blk_end
);
}
// Variant of zero_bottom_entry that does not check for availability of the
// memory first.
void
zero_bottom_entry_raw
();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord
*
initialize_threshold_raw
();
// Zero out the entry for _bottom (offset will be zero).
void
zero_bottom_entry
();
public:
G1BlockOffsetArrayContigSpace
(
G1BlockOffsetSharedArray
*
array
,
MemRegion
mr
);
...
...
@@ -487,8 +460,10 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
// bottom of the covered region.
HeapWord
*
initialize_threshold
();
// Zero out the entry for _bottom (offset will be zero).
void
zero_bottom_entry
();
void
reset_bot
()
{
zero_bottom_entry_raw
();
initialize_threshold_raw
();
}
// Return the next threshold, the point at which the table should be
// updated.
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -47,14 +47,69 @@ G1BlockOffsetTable::block_start_const(const void* addr) const {
}
}
#define check_index(index, msg) \
assert((index) < (_reserved.word_size() >> LogN_words), \
err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
msg, (index), (_reserved.word_size() >> LogN_words))); \
assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \
err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \
" (%u) is not in committed area.", \
(index), \
p2i(address_for_index_raw(index)), \
G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
u_char
G1BlockOffsetSharedArray
::
offset_array
(
size_t
index
)
const
{
check_index
(
index
,
"index out of range"
);
return
_offset_array
[
index
];
}
void
G1BlockOffsetSharedArray
::
set_offset_array
(
size_t
index
,
u_char
offset
)
{
check_index
(
index
,
"index out of range"
);
set_offset_array_raw
(
index
,
offset
);
}
void
G1BlockOffsetSharedArray
::
set_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
{
check_index
(
index
,
"index out of range"
);
assert
(
high
>=
low
,
"addresses out of order"
);
size_t
offset
=
pointer_delta
(
high
,
low
);
check_offset
(
offset
,
"offset too large"
);
set_offset_array
(
index
,
(
u_char
)
offset
);
}
void
G1BlockOffsetSharedArray
::
set_offset_array
(
size_t
left
,
size_t
right
,
u_char
offset
)
{
check_index
(
right
,
"right index out of range"
);
assert
(
left
<=
right
,
"indexes out of order"
);
size_t
num_cards
=
right
-
left
+
1
;
if
(
UseMemSetInBOT
)
{
memset
(
&
_offset_array
[
left
],
offset
,
num_cards
);
}
else
{
size_t
i
=
left
;
const
size_t
end
=
i
+
num_cards
;
for
(;
i
<
end
;
i
++
)
{
_offset_array
[
i
]
=
offset
;
}
}
}
void
G1BlockOffsetSharedArray
::
check_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
const
{
check_index
(
index
,
"index out of range"
);
assert
(
high
>=
low
,
"addresses out of order"
);
check_offset
(
pointer_delta
(
high
,
low
),
"offset too large"
);
assert
(
_offset_array
[
index
]
==
pointer_delta
(
high
,
low
),
"Wrong offset"
);
}
// Variant of index_for that does not check the index for validity.
inline
size_t
G1BlockOffsetSharedArray
::
index_for_raw
(
const
void
*
p
)
const
{
return
pointer_delta
((
char
*
)
p
,
_reserved
.
start
(),
sizeof
(
char
))
>>
LogN
;
}
inline
size_t
G1BlockOffsetSharedArray
::
index_for
(
const
void
*
p
)
const
{
char
*
pc
=
(
char
*
)
p
;
assert
(
pc
>=
(
char
*
)
_reserved
.
start
()
&&
pc
<
(
char
*
)
_reserved
.
end
(),
err_msg
(
"p ("
PTR_FORMAT
") not in reserved ["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p2i
(
p
),
p2i
(
_reserved
.
start
()),
p2i
(
_reserved
.
end
())));
size_t
delta
=
pointer_delta
(
pc
,
_reserved
.
start
(),
sizeof
(
char
));
size_t
result
=
delta
>>
LogN
;
size_t
result
=
index_for_raw
(
p
);
check_index
(
result
,
"bad index from address"
);
return
result
;
}
...
...
@@ -62,7 +117,7 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
inline
HeapWord
*
G1BlockOffsetSharedArray
::
address_for_index
(
size_t
index
)
const
{
check_index
(
index
,
"index out of range"
);
HeapWord
*
result
=
_reserved
.
start
()
+
(
index
<<
LogN_words
);
HeapWord
*
result
=
address_for_index_raw
(
index
);
assert
(
result
>=
_reserved
.
start
()
&&
result
<
_reserved
.
end
(),
err_msg
(
"bad address from index result "
PTR_FORMAT
" _reserved.start() "
PTR_FORMAT
" _reserved.end() "
...
...
@@ -71,6 +126,8 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
return
result
;
}
#undef check_index
inline
size_t
G1BlockOffsetArray
::
block_size
(
const
HeapWord
*
p
)
const
{
return
gsp
()
->
block_size
(
p
);
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1CardCounts.cpp
浏览文件 @
f83c3096
...
...
@@ -33,31 +33,26 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void
G1CardCountsMappingChangedListener
::
on_commit
(
uint
start_idx
,
size_t
num_regions
)
{
MemRegion
mr
(
G1CollectedHeap
::
heap
()
->
bottom_addr_for_region
(
start_idx
),
num_regions
*
HeapRegion
::
GrainWords
);
_counts
->
clear_range
(
mr
);
}
void
G1CardCounts
::
clear_range
(
size_t
from_card_num
,
size_t
to_card_num
)
{
if
(
has_count_table
())
{
assert
(
from_card_num
>=
0
&&
from_card_num
<
_committed_max_card_num
,
err_msg
(
"from card num out of range: "
SIZE_FORMAT
,
from_card_num
));
assert
(
from_card_num
<
to_card_num
,
err_msg
(
"Wrong order? from: "
SIZE_FORMAT
", to: "
SIZE_FORMAT
,
from_card_num
,
to_card_num
));
assert
(
to_card_num
<=
_committed_max_card_num
,
err_msg
(
"to card num out of range: "
"to: "
SIZE_FORMAT
", "
"max: "
SIZE_FORMAT
,
to_card_num
,
_committed_max_card_num
));
to_card_num
=
MIN2
(
_committed_max_card_num
,
to_card_num
);
Copy
::
fill_to_bytes
(
&
_card_counts
[
from_card_num
],
(
to_card_num
-
from_card_num
));
}
}
G1CardCounts
::
G1CardCounts
(
G1CollectedHeap
*
g1h
)
:
_
g1h
(
g1h
),
_card_counts
(
NULL
),
_
reserved_max_card_num
(
0
),
_committed_max_card_num
(
0
),
_committed_size
(
0
)
{
}
_
listener
(),
_g1h
(
g1h
),
_card_counts
(
NULL
),
_reserved_max_card_num
(
0
)
{
_
listener
.
set_cardcounts
(
this
);
}
void
G1CardCounts
::
initialize
()
{
void
G1CardCounts
::
initialize
(
G1RegionToSpaceMapper
*
mapper
)
{
assert
(
_g1h
->
max_capacity
()
>
0
,
"initialization order"
);
assert
(
_g1h
->
capacity
()
==
0
,
"initialization order"
);
...
...
@@ -70,70 +65,9 @@ void G1CardCounts::initialize() {
_ct_bs
=
_g1h
->
g1_barrier_set
();
_ct_bot
=
_ct_bs
->
byte_for_const
(
_g1h
->
reserved_region
().
start
());
// Allocate/Reserve the counts table
size_t
reserved_bytes
=
_g1h
->
max_capacity
();
_reserved_max_card_num
=
reserved_bytes
>>
CardTableModRefBS
::
card_shift
;
size_t
reserved_size
=
_reserved_max_card_num
*
sizeof
(
jbyte
);
ReservedSpace
rs
(
ReservedSpace
::
allocation_align_size_up
(
reserved_size
));
if
(
!
rs
.
is_reserved
())
{
warning
(
"Could not reserve enough space for the card counts table"
);
guarantee
(
!
has_reserved_count_table
(),
"should be NULL"
);
return
;
}
MemTracker
::
record_virtual_memory_type
((
address
)
rs
.
base
(),
mtGC
);
_card_counts_storage
.
initialize
(
rs
,
0
);
_card_counts
=
(
jubyte
*
)
_card_counts_storage
.
low
();
}
}
void
G1CardCounts
::
resize
(
size_t
heap_capacity
)
{
// Expand the card counts table to handle a heap with the given capacity.
if
(
!
has_reserved_count_table
())
{
// Don't expand if we failed to reserve the card counts table.
return
;
}
assert
(
_committed_size
==
ReservedSpace
::
allocation_align_size_up
(
_committed_size
),
err_msg
(
"Unaligned? committed_size: "
SIZE_FORMAT
,
_committed_size
));
// Verify that the committed space for the card counts matches our
// committed max card num. Note for some allocation alignments, the
// amount of space actually committed for the counts table will be able
// to span more cards than the number spanned by the maximum heap.
size_t
prev_committed_size
=
_committed_size
;
size_t
prev_committed_card_num
=
committed_to_card_num
(
prev_committed_size
);
assert
(
prev_committed_card_num
==
_committed_max_card_num
,
err_msg
(
"Card mismatch: "
"prev: "
SIZE_FORMAT
", "
"committed: "
SIZE_FORMAT
", "
"reserved: "
SIZE_FORMAT
,
prev_committed_card_num
,
_committed_max_card_num
,
_reserved_max_card_num
));
size_t
new_size
=
(
heap_capacity
>>
CardTableModRefBS
::
card_shift
)
*
sizeof
(
jbyte
);
size_t
new_committed_size
=
ReservedSpace
::
allocation_align_size_up
(
new_size
);
size_t
new_committed_card_num
=
committed_to_card_num
(
new_committed_size
);
if
(
_committed_max_card_num
<
new_committed_card_num
)
{
// we need to expand the backing store for the card counts
size_t
expand_size
=
new_committed_size
-
prev_committed_size
;
if
(
!
_card_counts_storage
.
expand_by
(
expand_size
))
{
warning
(
"Card counts table backing store commit failure"
);
return
;
}
assert
(
_card_counts_storage
.
committed_size
()
==
new_committed_size
,
"expansion commit failure"
);
_committed_size
=
new_committed_size
;
_committed_max_card_num
=
new_committed_card_num
;
clear_range
(
prev_committed_card_num
,
_committed_max_card_num
);
_card_counts
=
(
jubyte
*
)
mapper
->
reserved
().
start
();
_reserved_max_card_num
=
mapper
->
reserved
().
byte_size
();
mapper
->
set_mapping_changed_listener
(
&
_listener
);
}
}
...
...
@@ -149,12 +83,13 @@ uint G1CardCounts::add_card_count(jbyte* card_ptr) {
uint
count
=
0
;
if
(
has_count_table
())
{
size_t
card_num
=
ptr_2_card_num
(
card_ptr
);
if
(
card_num
<
_committed_max_card_num
)
{
count
=
(
uint
)
_card_counts
[
card_num
];
if
(
count
<
G1ConcRSHotCardLimit
)
{
_card_counts
[
card_num
]
=
(
jubyte
)(
MIN2
((
uintx
)(
_card_counts
[
card_num
]
+
1
),
G1ConcRSHotCardLimit
));
}
assert
(
card_num
<
_reserved_max_card_num
,
err_msg
(
"Card "
SIZE_FORMAT
" outside of card counts table (max size "
SIZE_FORMAT
")"
,
card_num
,
_reserved_max_card_num
));
count
=
(
uint
)
_card_counts
[
card_num
];
if
(
count
<
G1ConcRSHotCardLimit
)
{
_card_counts
[
card_num
]
=
(
jubyte
)(
MIN2
((
uintx
)(
_card_counts
[
card_num
]
+
1
),
G1ConcRSHotCardLimit
));
}
}
return
count
;
...
...
@@ -165,31 +100,23 @@ bool G1CardCounts::is_hot(uint count) {
}
void
G1CardCounts
::
clear_region
(
HeapRegion
*
hr
)
{
assert
(
!
hr
->
isHumongous
(),
"Should have been cleared"
);
MemRegion
mr
(
hr
->
bottom
(),
hr
->
end
());
clear_range
(
mr
);
}
void
G1CardCounts
::
clear_range
(
MemRegion
mr
)
{
if
(
has_count_table
())
{
HeapWord
*
bottom
=
hr
->
bottom
();
// We use the last address in hr as hr could be the
// last region in the heap. In which case trying to find
// the card for hr->end() will be an OOB accesss to the
// card table.
HeapWord
*
last
=
hr
->
end
()
-
1
;
assert
(
_g1h
->
g1_committed
().
contains
(
last
),
err_msg
(
"last not in committed: "
"last: "
PTR_FORMAT
", "
"committed: ["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
last
,
_g1h
->
g1_committed
().
start
(),
_g1h
->
g1_committed
().
end
()));
const
jbyte
*
from_card_ptr
=
_ct_bs
->
byte_for_const
(
bottom
);
const
jbyte
*
last_card_ptr
=
_ct_bs
->
byte_for_const
(
last
);
const
jbyte
*
from_card_ptr
=
_ct_bs
->
byte_for_const
(
mr
.
start
());
// We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an
// OOB access to the card table.
const
jbyte
*
last_card_ptr
=
_ct_bs
->
byte_for_const
(
mr
.
last
());
#ifdef ASSERT
HeapWord
*
start_addr
=
_ct_bs
->
addr_for
(
from_card_ptr
);
assert
(
start_addr
==
hr
->
bottom
(),
"alignment
"
);
assert
(
start_addr
==
mr
.
start
(),
"MemRegion start must be aligned to a card.
"
);
HeapWord
*
last_addr
=
_ct_bs
->
addr_for
(
last_card_ptr
);
assert
((
last_addr
+
CardTableModRefBS
::
card_size_in_words
)
==
hr
->
end
(),
"alignment
"
);
assert
((
last_addr
+
CardTableModRefBS
::
card_size_in_words
)
==
mr
.
end
(),
"MemRegion end must be aligned to a card.
"
);
#endif // ASSERT
// Clear the counts for the (exclusive) card range.
...
...
@@ -199,14 +126,22 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
}
}
void
G1CardCounts
::
clear_all
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"don't call this otherwise"
);
clear_range
((
size_t
)
0
,
_committed_max_card_num
);
}
class
G1CardCountsClearClosure
:
public
HeapRegionClosure
{
private:
G1CardCounts
*
_card_counts
;
public:
G1CardCountsClearClosure
(
G1CardCounts
*
card_counts
)
:
HeapRegionClosure
(),
_card_counts
(
card_counts
)
{
}
G1CardCounts
::~
G1CardCounts
()
{
if
(
has_reserved_count_table
())
{
_card_counts_storage
.
release
();
virtual
bool
doHeapRegion
(
HeapRegion
*
r
)
{
_card_counts
->
clear_region
(
r
);
return
false
;
}
}
}
;
void
G1CardCounts
::
clear_all
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"don't call this otherwise"
);
G1CardCountsClearClosure
cl
(
this
);
_g1h
->
heap_region_iterate
(
&
cl
);
}
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1CardCounts.hpp
浏览文件 @
f83c3096
...
...
@@ -25,14 +25,26 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
class
CardTableModRefBS
;
class
G1CardCounts
;
class
G1CollectedHeap
;
class
G1RegionToSpaceMapper
;
class
HeapRegion
;
class
G1CardCountsMappingChangedListener
:
public
G1MappingChangedListener
{
private:
G1CardCounts
*
_counts
;
public:
void
set_cardcounts
(
G1CardCounts
*
counts
)
{
_counts
=
counts
;
}
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
);
};
// Table to track the number of times a card has been refined. Once
// a card has been refined a certain number of times, it is
// considered 'hot' and its refinement is delayed by inserting the
...
...
@@ -41,6 +53,8 @@ class HeapRegion;
// is 'drained' during the next evacuation pause.
class
G1CardCounts
:
public
CHeapObj
<
mtGC
>
{
G1CardCountsMappingChangedListener
_listener
;
G1CollectedHeap
*
_g1h
;
// The table of counts
...
...
@@ -49,27 +63,18 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Max capacity of the reserved space for the counts table
size_t
_reserved_max_card_num
;
// Max capacity of the committed space for the counts table
size_t
_committed_max_card_num
;
// Size of committed space for the counts table
size_t
_committed_size
;
// CardTable bottom.
const
jbyte
*
_ct_bot
;
// Barrier set
CardTableModRefBS
*
_ct_bs
;
// The virtual memory backing the counts table
VirtualSpace
_card_counts_storage
;
// Returns true if the card counts table has been reserved.
bool
has_reserved_count_table
()
{
return
_card_counts
!=
NULL
;
}
// Returns true if the card counts table has been reserved and committed.
bool
has_count_table
()
{
return
has_reserved_count_table
()
&&
_committed_max_card_num
>
0
;
return
has_reserved_count_table
();
}
size_t
ptr_2_card_num
(
const
jbyte
*
card_ptr
)
{
...
...
@@ -79,37 +84,24 @@ class G1CardCounts: public CHeapObj<mtGC> {
"_ct_bot: "
PTR_FORMAT
,
p2i
(
card_ptr
),
p2i
(
_ct_bot
)));
size_t
card_num
=
pointer_delta
(
card_ptr
,
_ct_bot
,
sizeof
(
jbyte
));
assert
(
card_num
>=
0
&&
card_num
<
_
committ
ed_max_card_num
,
assert
(
card_num
>=
0
&&
card_num
<
_
reserv
ed_max_card_num
,
err_msg
(
"card pointer out of range: "
PTR_FORMAT
,
p2i
(
card_ptr
)));
return
card_num
;
}
jbyte
*
card_num_2_ptr
(
size_t
card_num
)
{
assert
(
card_num
>=
0
&&
card_num
<
_
committ
ed_max_card_num
,
assert
(
card_num
>=
0
&&
card_num
<
_
reserv
ed_max_card_num
,
err_msg
(
"card num out of range: "
SIZE_FORMAT
,
card_num
));
return
(
jbyte
*
)
(
_ct_bot
+
card_num
);
}
// Helper routine.
// Returns the number of cards that can be counted by the given committed
// table size, with a maximum of the number of cards spanned by the max
// capacity of the heap.
size_t
committed_to_card_num
(
size_t
committed_size
)
{
return
MIN2
(
_reserved_max_card_num
,
committed_size
/
sizeof
(
jbyte
));
}
// Clear the counts table for the given (exclusive) index range.
void
clear_range
(
size_t
from_card_num
,
size_t
to_card_num
);
public:
G1CardCounts
(
G1CollectedHeap
*
g1h
);
~
G1CardCounts
();
void
initialize
();
// Resize the committed space for the card counts table in
// response to a resize of the committed space for the heap.
void
resize
(
size_t
heap_capacity
);
void
initialize
(
G1RegionToSpaceMapper
*
mapper
);
// Increments the refinement count for the given card.
// Returns the pre-increment count value.
...
...
@@ -122,8 +114,10 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Clears the card counts for the cards spanned by the region
void
clear_region
(
HeapRegion
*
hr
);
// Clears the card counts for the cards spanned by the MemRegion
void
clear_range
(
MemRegion
mr
);
// Clear the entire card counts table during GC.
// Updates the policy stats with the duration.
void
clear_all
();
};
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
f83c3096
...
...
@@ -183,6 +183,13 @@ protected:
public:
OldGCAllocRegion
()
:
G1AllocRegion
(
"Old GC Alloc Region"
,
true
/* bot_updates */
)
{
}
// This specialization of release() makes sure that the last card that has been
// allocated into has been completely filled by a dummy object.
// This avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual
HeapRegion
*
release
();
};
// The G1 STW is alive closure.
...
...
@@ -199,6 +206,13 @@ public:
class
RefineCardTableEntryClosure
;
class
G1RegionMappingChangedListener
:
public
G1MappingChangedListener
{
private:
void
reset_from_card_cache
(
uint
start_idx
,
size_t
num_regions
);
public:
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
);
};
class
G1CollectedHeap
:
public
SharedHeap
{
friend
class
VM_CollectForMetadataAllocation
;
friend
class
VM_G1CollectForAllocation
;
...
...
@@ -237,19 +251,9 @@ private:
static
size_t
_humongous_object_threshold_in_words
;
// Storage for the G1 heap.
VirtualSpace
_g1_storage
;
MemRegion
_g1_reserved
;
// The part of _g1_storage that is currently committed.
MemRegion
_g1_committed
;
// The master free list. It will satisfy all new region allocations.
FreeRegionList
_free_list
;
// The secondary free list which contains regions that have been
// freed up during the cleanup process. This will be appended to
the
// master free list when appropriate.
// freed up during the cleanup process. This will be appended to
//
the
master free list when appropriate.
FreeRegionList
_secondary_free_list
;
// It keeps track of the old regions.
...
...
@@ -283,6 +287,9 @@ private:
// after heap shrinking (free_list_only == true).
void
rebuild_region_sets
(
bool
free_list_only
);
// Callback for region mapping changed events.
G1RegionMappingChangedListener
_listener
;
// The sequence of all heap regions in the heap.
HeapRegionSeq
_hrs
;
...
...
@@ -513,14 +520,6 @@ protected:
// humongous object, set is_old to true. If not, to false.
HeapRegion
*
new_region
(
size_t
word_size
,
bool
is_old
,
bool
do_expand
);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
uint
humongous_obj_allocate_find_first
(
uint
num_regions
,
size_t
word_size
);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
...
...
@@ -862,11 +861,6 @@ protected:
CodeBlobClosure
*
scan_strong_code
,
uint
worker_i
);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void
update_committed_space
(
HeapWord
*
old_end
,
HeapWord
*
new_end
);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark
*
_cm
;
ConcurrentMarkThread
*
_cmThread
;
...
...
@@ -1186,27 +1180,20 @@ public:
virtual
size_t
unsafe_max_alloc
();
virtual
bool
is_maximal_no_gc
()
const
{
return
_
g1_storage
.
uncommitted_siz
e
()
==
0
;
return
_
hrs
.
availabl
e
()
==
0
;
}
// The
total
number of regions in the heap.
uint
n_regions
()
const
{
return
_hrs
.
length
();
}
// The
current
number of regions in the heap.
uint
n
um
_regions
()
const
{
return
_hrs
.
length
();
}
// The max number of regions in the heap.
uint
max_regions
()
const
{
return
_hrs
.
max_length
();
}
// The number of regions that are completely free.
uint
free_regions
()
const
{
return
_free_list
.
length
();
}
uint
num_free_regions
()
const
{
return
_hrs
.
num_free_regions
();
}
// The number of regions that are not completely free.
uint
used_regions
()
const
{
return
n_regions
()
-
free_regions
();
}
// The number of regions available for "regular" expansion.
uint
expansion_regions
()
const
{
return
_expansion_regions
;
}
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion
*
new_heap_region
(
uint
hrs_index
,
HeapWord
*
bottom
);
uint
num_used_regions
()
const
{
return
num_regions
()
-
num_free_regions
();
}
void
verify_not_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
...
...
@@ -1255,7 +1242,7 @@ public:
#ifdef ASSERT
bool
is_on_master_free_list
(
HeapRegion
*
hr
)
{
return
hr
->
containing_set
()
==
&
_free_list
;
return
_hrs
.
is_free
(
hr
)
;
}
#endif // ASSERT
...
...
@@ -1267,7 +1254,7 @@ public:
}
void
append_secondary_free_list
()
{
_
free_list
.
add_ordered
(
&
_secondary_free_list
);
_
hrs
.
insert_list_into_free_list
(
&
_secondary_free_list
);
}
void
append_secondary_free_list_if_not_empty_with_lock
()
{
...
...
@@ -1313,6 +1300,11 @@ public:
// Returns "TRUE" iff "p" points into the committed areas of the heap.
virtual
bool
is_in
(
const
void
*
p
)
const
;
#ifdef ASSERT
// Returns whether p is in one of the available areas of the heap. Slow but
// extensive version.
bool
is_in_exact
(
const
void
*
p
)
const
;
#endif
// Return "TRUE" iff the given object address is within the collection
// set. Slow implementation.
...
...
@@ -1373,25 +1365,19 @@ public:
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
bool
is_in_g1_reserved
(
const
void
*
p
)
const
{
return
_
g1_reserved
.
contains
(
p
);
return
_
hrs
.
reserved
()
.
contains
(
p
);
}
// Returns a MemRegion that corresponds to the space that has been
// reserved for the heap
MemRegion
g1_reserved
()
{
return
_g1_reserved
;
}
// Returns a MemRegion that corresponds to the space that has been
// committed in the heap
MemRegion
g1_committed
()
{
return
_g1_committed
;
MemRegion
g1_reserved
()
const
{
return
_hrs
.
reserved
();
}
virtual
bool
is_in_closed_subset
(
const
void
*
p
)
const
;
G1SATBCardTableModRefBS
*
g1_barrier_set
()
{
return
(
G1SATBCardTableModRefBS
*
)
barrier_set
();
G1SATBCardTable
Logging
ModRefBS
*
g1_barrier_set
()
{
return
(
G1SATBCardTable
Logging
ModRefBS
*
)
barrier_set
();
}
// This resets the card table to all zeros. It is used after
...
...
@@ -1425,6 +1411,8 @@ public:
// within the heap.
inline
uint
addr_to_region
(
HeapWord
*
addr
)
const
;
inline
HeapWord
*
bottom_addr_for_region
(
uint
index
)
const
;
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
...
...
@@ -1438,10 +1426,10 @@ public:
// setting the claim value of the second and subsequent regions of the
// chunk.) For now requires that "doHeapRegion" always returns "false",
// i.e., that a closure never attempt to abort a traversal.
void
heap_region_par_iterate_chunked
(
HeapRegionClosure
*
blk
,
uint
worker
,
uint
n
o_of_par
_workers
,
jint
claim_value
);
void
heap_region_par_iterate_chunked
(
HeapRegionClosure
*
cl
,
uint
worker
_id
,
uint
n
um
_workers
,
jint
claim_value
)
const
;
// It resets all the region claim values to the default.
void
reset_heap_region_claim_values
();
...
...
@@ -1466,11 +1454,6 @@ public:
// starting region for iterating over the current collection set.
HeapRegion
*
start_cset_region_for_worker
(
uint
worker_i
);
// This is a convenience method that is used by the
// HeapRegionIterator classes to calculate the starting region for
// each worker so that they do not all start from the same region.
HeapRegion
*
start_region_for_worker
(
uint
worker_i
,
uint
no_of_par_workers
);
// Iterate over the regions (if any) in the current collection set.
void
collection_set_iterate
(
HeapRegionClosure
*
blk
);
...
...
@@ -1483,16 +1466,14 @@ public:
// space containing a given address, or else returns NULL.
virtual
Space
*
space_containing
(
const
void
*
addr
)
const
;
// A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL.
// Returns the HeapRegion that contains addr. addr must not be NULL.
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing
(
const
T
addr
)
const
;
inline
HeapRegion
*
heap_region_containing
_raw
(
const
T
addr
)
const
;
// Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous
// region.
// Returns the HeapRegion that contains addr. addr must not be NULL.
// If addr is within a humongous continues region, it returns its humongous start region.
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing
_raw
(
const
T
addr
)
const
;
inline
HeapRegion
*
heap_region_containing
(
const
T
addr
)
const
;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
...
...
@@ -1635,7 +1616,6 @@ public:
// the region to which the object belongs. An object is dead
// iff a) it was not allocated since the last mark and b) it
// is not marked.
bool
is_obj_dead
(
const
oop
obj
,
const
HeapRegion
*
hr
)
const
{
return
!
hr
->
obj_allocated_since_prev_marking
(
obj
)
&&
...
...
@@ -1645,7 +1625,6 @@ public:
// This function returns true when an object has been
// around since the previous marking and hasn't yet
// been marked during this marking.
bool
is_obj_ill
(
const
oop
obj
,
const
HeapRegion
*
hr
)
const
{
return
!
hr
->
obj_allocated_since_next_marking
(
obj
)
&&
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -47,23 +47,26 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
return
(
uint
)(
pointer_delta
(
addr
,
_reserved
.
start
(),
sizeof
(
uint8_t
))
>>
HeapRegion
::
LogOfHRGrainBytes
);
}
inline
HeapWord
*
G1CollectedHeap
::
bottom_addr_for_region
(
uint
index
)
const
{
return
_hrs
.
reserved
().
start
()
+
index
*
HeapRegion
::
GrainWords
;
}
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing
(
const
T
addr
)
const
{
HeapRegion
*
hr
=
_hrs
.
addr_to_region
((
HeapWord
*
)
addr
);
// hr can be null if addr in perm_gen
if
(
hr
!=
NULL
&&
hr
->
continuesHumongous
())
{
hr
=
hr
->
humongous_start_region
();
}
return
hr
;
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
T
addr
)
const
{
assert
(
addr
!=
NULL
,
"invariant"
);
assert
(
is_in_g1_reserved
((
const
void
*
)
addr
),
err_msg
(
"Address "
PTR_FORMAT
" is outside of the heap ranging from ["
PTR_FORMAT
" to "
PTR_FORMAT
")"
,
p2i
((
void
*
)
addr
),
p2i
(
g1_reserved
().
start
()),
p2i
(
g1_reserved
().
end
())));
return
_hrs
.
addr_to_region
((
HeapWord
*
)
addr
);
}
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
T
addr
)
const
{
assert
(
_g1_reserved
.
contains
((
const
void
*
)
addr
),
"invariant"
);
HeapRegion
*
res
=
_hrs
.
addr_to_region_unsafe
((
HeapWord
*
)
addr
);
return
res
;
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing
(
const
T
addr
)
const
{
HeapRegion
*
hr
=
heap_region_containing_raw
(
addr
);
if
(
hr
->
continuesHumongous
())
{
return
hr
->
humongous_start_region
();
}
return
hr
;
}
inline
void
G1CollectedHeap
::
reset_gc_time_stamp
()
{
...
...
@@ -88,10 +91,9 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return
r
!=
NULL
&&
r
->
in_collection_set
();
}
inline
HeapWord
*
G1CollectedHeap
::
attempt_allocation
(
size_t
word_size
,
unsigned
int
*
gc_count_before_ret
,
int
*
gclocker_retry_count_ret
)
{
inline
HeapWord
*
G1CollectedHeap
::
attempt_allocation
(
size_t
word_size
,
unsigned
int
*
gc_count_before_ret
,
int
*
gclocker_retry_count_ret
)
{
assert_heap_not_locked_and_not_at_safepoint
();
assert
(
!
isHumongous
(
word_size
),
"attempt_allocation() should not "
"be called for humongous allocation requests"
);
...
...
@@ -154,8 +156,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
// have to keep calling heap_region_containing_raw() in the
// asserts below.
DEBUG_ONLY
(
HeapRegion
*
containing_hr
=
heap_region_containing_raw
(
start
);)
assert
(
containing_hr
!=
NULL
&&
start
!=
NULL
&&
word_size
>
0
,
"pre-condition"
);
assert
(
word_size
>
0
,
"pre-condition"
);
assert
(
containing_hr
->
is_in
(
start
),
"it should contain start"
);
assert
(
containing_hr
->
is_young
(),
"it should be young"
);
assert
(
!
containing_hr
->
isHumongous
(),
"it should not be humongous"
);
...
...
@@ -252,8 +253,7 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
}
}
inline
bool
G1CollectedHeap
::
evacuation_should_fail
()
{
inline
bool
G1CollectedHeap
::
evacuation_should_fail
()
{
if
(
!
G1EvacuationFailureALot
||
!
_evacuation_failure_alot_for_current_gc
)
{
return
false
;
}
...
...
@@ -277,8 +277,10 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
#endif // #ifndef PRODUCT
inline
bool
G1CollectedHeap
::
is_in_young
(
const
oop
obj
)
{
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
return
hr
!=
NULL
&&
hr
->
is_young
();
if
(
obj
==
NULL
)
{
return
false
;
}
return
heap_region_containing
(
obj
)
->
is_young
();
}
// We don't need barriers for initializing stores to objects
...
...
@@ -291,21 +293,17 @@ inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
}
inline
bool
G1CollectedHeap
::
is_obj_dead
(
const
oop
obj
)
const
{
const
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
if
(
hr
==
NULL
)
{
if
(
obj
==
NULL
)
return
false
;
else
return
true
;
if
(
obj
==
NULL
)
{
return
false
;
}
else
return
is_obj_dead
(
obj
,
hr
);
return
is_obj_dead
(
obj
,
heap_region_containing
(
obj
)
);
}
inline
bool
G1CollectedHeap
::
is_obj_ill
(
const
oop
obj
)
const
{
const
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
if
(
hr
==
NULL
)
{
if
(
obj
==
NULL
)
return
false
;
else
return
true
;
if
(
obj
==
NULL
)
{
return
false
;
}
else
return
is_obj_ill
(
obj
,
hr
);
return
is_obj_ill
(
obj
,
heap_region_containing
(
obj
)
);
}
inline
void
G1CollectedHeap
::
set_humongous_is_live
(
oop
obj
)
{
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
f83c3096
...
...
@@ -455,7 +455,7 @@ void G1CollectorPolicy::init() {
}
else
{
_young_list_fixed_length
=
_young_gen_sizer
->
min_desired_young_length
();
}
_free_regions_at_end_of_collection
=
_g1
->
free_regions
();
_free_regions_at_end_of_collection
=
_g1
->
num_
free_regions
();
update_young_list_target_length
();
// We may immediately start allocating regions and placing them on the
...
...
@@ -828,7 +828,7 @@ void G1CollectorPolicy::record_full_collection_end() {
record_survivor_regions
(
0
,
NULL
,
NULL
);
_free_regions_at_end_of_collection
=
_g1
->
free_regions
();
_free_regions_at_end_of_collection
=
_g1
->
num_
free_regions
();
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group
->
reset
();
update_young_list_target_length
();
...
...
@@ -1180,7 +1180,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
_in_marking_window
=
new_in_marking_window
;
_in_marking_window_im
=
new_in_marking_window_im
;
_free_regions_at_end_of_collection
=
_g1
->
free_regions
();
_free_regions_at_end_of_collection
=
_g1
->
num_
free_regions
();
update_young_list_target_length
();
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
...
...
@@ -1202,7 +1202,7 @@ void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
_survivor_used_bytes_before_gc
=
young_list
->
survivor_used_bytes
();
_heap_capacity_bytes_before_gc
=
_g1
->
capacity
();
_heap_used_bytes_before_gc
=
_g1
->
used
();
_cur_collection_pause_used_regions_at_start
=
_g1
->
used_regions
();
_cur_collection_pause_used_regions_at_start
=
_g1
->
num_
used_regions
();
_eden_capacity_bytes_before_gc
=
(
_young_list_target_length
*
HeapRegion
::
GrainBytes
)
-
_survivor_used_bytes_before_gc
;
...
...
@@ -1617,7 +1617,7 @@ void
G1CollectorPolicy
::
record_concurrent_mark_cleanup_end
(
int
no_of_gc_threads
)
{
_collectionSetChooser
->
clear
();
uint
region_num
=
_g1
->
n_regions
();
uint
region_num
=
_g1
->
n
um
_regions
();
if
(
G1CollectedHeap
::
use_parallel_gc_threads
())
{
const
uint
OverpartitionFactor
=
4
;
uint
WorkUnit
;
...
...
@@ -1638,7 +1638,7 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
MAX2
(
region_num
/
(
uint
)
(
ParallelGCThreads
*
OverpartitionFactor
),
MinWorkUnit
);
}
_collectionSetChooser
->
prepare_for_par_region_addition
(
_g1
->
n_regions
(),
_collectionSetChooser
->
prepare_for_par_region_addition
(
_g1
->
n
um
_regions
(),
WorkUnit
);
ParKnownGarbageTask
parKnownGarbageTask
(
_collectionSetChooser
,
(
int
)
WorkUnit
);
...
...
@@ -1935,7 +1935,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
// of them are available.
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
const
size_t
region_num
=
g1h
->
n_regions
();
const
size_t
region_num
=
g1h
->
n
um
_regions
();
const
size_t
perc
=
(
size_t
)
G1OldCSetRegionThresholdPercent
;
size_t
result
=
region_num
*
perc
/
100
;
// emulate ceiling
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
浏览文件 @
f83c3096
...
...
@@ -33,7 +33,7 @@
G1HotCardCache
::
G1HotCardCache
(
G1CollectedHeap
*
g1h
)
:
_g1h
(
g1h
),
_hot_cache
(
NULL
),
_use_cache
(
false
),
_card_counts
(
g1h
)
{}
void
G1HotCardCache
::
initialize
()
{
void
G1HotCardCache
::
initialize
(
G1RegionToSpaceMapper
*
card_counts_storage
)
{
if
(
default_use_cache
())
{
_use_cache
=
true
;
...
...
@@ -49,7 +49,7 @@ void G1HotCardCache::initialize() {
_hot_cache_par_chunk_size
=
MAX2
(
1
,
_hot_cache_size
/
(
int
)
n_workers
);
_hot_cache_par_claimed_idx
=
0
;
_card_counts
.
initialize
();
_card_counts
.
initialize
(
card_counts_storage
);
}
}
...
...
@@ -135,11 +135,8 @@ void G1HotCardCache::drain(uint worker_i,
// above, are discarded prior to re-enabling the cache near the end of the GC.
}
void
G1HotCardCache
::
resize_card_counts
(
size_t
heap_capacity
)
{
_card_counts
.
resize
(
heap_capacity
);
}
void
G1HotCardCache
::
reset_card_counts
(
HeapRegion
*
hr
)
{
assert
(
!
hr
->
isHumongous
(),
"Should have been cleared"
);
_card_counts
.
clear_region
(
hr
);
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
浏览文件 @
f83c3096
...
...
@@ -78,7 +78,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
G1HotCardCache
(
G1CollectedHeap
*
g1h
);
~
G1HotCardCache
();
void
initialize
();
void
initialize
(
G1RegionToSpaceMapper
*
card_counts_storage
);
bool
use_cache
()
{
return
_use_cache
;
}
...
...
@@ -115,9 +115,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
bool
hot_cache_is_empty
()
{
return
_n_hot
==
0
;
}
// Resizes the card counts table to match the given capacity
void
resize_card_counts
(
size_t
heap_capacity
);
// Zeros the values in the card counts table for entire committed heap
void
reset_card_counts
();
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -130,9 +130,7 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
HeapRegion
*
hr
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
obj
);
if
(
hr
!=
NULL
)
{
_cm
->
grayRoot
(
obj
,
obj
->
size
(),
_worker_id
,
hr
);
}
_cm
->
grayRoot
(
obj
,
obj
->
size
(),
_worker_id
,
hr
);
}
}
...
...
@@ -159,57 +157,61 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
template
<
class
T
>
inline
void
G1UpdateRSOrPushRefOopClosure
::
do_oop_nv
(
T
*
p
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
if
(
obj
==
NULL
)
{
return
;
}
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop
if
(
obj
!=
NULL
)
{
#ifdef CHECK_UNHANDLED_OOPS
oopDesc
*
o
=
obj
.
obj
();
oopDesc
*
o
=
obj
.
obj
();
#else
oopDesc
*
o
=
obj
;
oopDesc
*
o
=
obj
;
#endif // CHECK_UNHANDLED_OOPS
assert
((
intptr_t
)
o
%
MinObjAlignmentInBytes
==
0
,
"not oop aligned"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
obj
),
"must be in heap"
);
}
assert
((
intptr_t
)
o
%
MinObjAlignmentInBytes
==
0
,
"not oop aligned"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
obj
),
"must be in heap"
);
#endif // ASSERT
assert
(
_from
!=
NULL
,
"from region must be non-NULL"
);
assert
(
_from
->
is_in_reserved
(
p
),
"p is not in from"
);
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
if
(
to
!=
NULL
&&
_from
!=
to
)
{
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the rembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred
// updates are enabled).
if
(
_record_refs_into_cset
&&
to
->
in_collection_set
())
{
// We are recording references that point into the collection
// set and this particular reference does exactly that...
// If the referenced object has already been forwarded
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object
// there is no need to retry.
if
(
!
self_forwarded
(
obj
))
{
assert
(
_push_ref_cl
!=
NULL
,
"should not be null"
);
// Push the reference in the refs queue of the G1ParScanThreadState
// instance for this worker thread.
_push_ref_cl
->
do_oop
(
p
);
}
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
return
;
}
if
(
_from
==
to
)
{
// Normally this closure should only be called with cross-region references.
// But since Java threads are manipulating the references concurrently and we
// reload the values things may have changed.
return
;
}
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the remembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred
// updates are enabled).
if
(
_record_refs_into_cset
&&
to
->
in_collection_set
())
{
// We are recording references that point into the collection
// set and this particular reference does exactly that...
// If the referenced object has already been forwarded
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object
// there is no need to retry.
if
(
!
self_forwarded
(
obj
))
{
assert
(
_push_ref_cl
!=
NULL
,
"should not be null"
);
// Push the reference in the refs queue of the G1ParScanThreadState
// instance for this worker thread.
_push_ref_cl
->
do_oop
(
p
);
}
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
}
else
{
// We either don't care about pushing references that point into the
// collection set (i.e. we're not during an evacuation pause) _or_
// the reference doesn't point into the collection set. Either way
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
0 → 100644
浏览文件 @
f83c3096
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "services/memTracker.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
#include "utilities/bitMap.inline.hpp"
G1PageBasedVirtualSpace
::
G1PageBasedVirtualSpace
()
:
_low_boundary
(
NULL
),
_high_boundary
(
NULL
),
_committed
(),
_page_size
(
0
),
_special
(
false
),
_executable
(
false
)
{
}
bool
G1PageBasedVirtualSpace
::
initialize_with_granularity
(
ReservedSpace
rs
,
size_t
page_size
)
{
if
(
!
rs
.
is_reserved
())
{
return
false
;
// Allocation failed.
}
assert
(
_low_boundary
==
NULL
,
"VirtualSpace already initialized"
);
assert
(
page_size
>
0
,
"Granularity must be non-zero."
);
_low_boundary
=
rs
.
base
();
_high_boundary
=
_low_boundary
+
rs
.
size
();
_special
=
rs
.
special
();
_executable
=
rs
.
executable
();
_page_size
=
page_size
;
assert
(
_committed
.
size
()
==
0
,
"virtual space initialized more than once"
);
uintx
size_in_bits
=
rs
.
size
()
/
page_size
;
_committed
.
resize
(
size_in_bits
,
/* in_resource_area */
false
);
return
true
;
}
G1PageBasedVirtualSpace
::~
G1PageBasedVirtualSpace
()
{
release
();
}
void
G1PageBasedVirtualSpace
::
release
()
{
// This does not release memory it never reserved.
// Caller must release via rs.release();
_low_boundary
=
NULL
;
_high_boundary
=
NULL
;
_special
=
false
;
_executable
=
false
;
_page_size
=
0
;
_committed
.
resize
(
0
,
false
);
}
size_t
G1PageBasedVirtualSpace
::
committed_size
()
const
{
return
_committed
.
count_one_bits
()
*
_page_size
;
}
size_t
G1PageBasedVirtualSpace
::
reserved_size
()
const
{
return
pointer_delta
(
_high_boundary
,
_low_boundary
,
sizeof
(
char
));
}
size_t
G1PageBasedVirtualSpace
::
uncommitted_size
()
const
{
return
reserved_size
()
-
committed_size
();
}
uintptr_t
G1PageBasedVirtualSpace
::
addr_to_page_index
(
char
*
addr
)
const
{
return
(
addr
-
_low_boundary
)
/
_page_size
;
}
bool
G1PageBasedVirtualSpace
::
is_area_committed
(
uintptr_t
start
,
size_t
size_in_pages
)
const
{
uintptr_t
end
=
start
+
size_in_pages
;
return
_committed
.
get_next_zero_offset
(
start
,
end
)
>=
end
;
}
bool
G1PageBasedVirtualSpace
::
is_area_uncommitted
(
uintptr_t
start
,
size_t
size_in_pages
)
const
{
uintptr_t
end
=
start
+
size_in_pages
;
return
_committed
.
get_next_one_offset
(
start
,
end
)
>=
end
;
}
char
*
G1PageBasedVirtualSpace
::
page_start
(
uintptr_t
index
)
{
return
_low_boundary
+
index
*
_page_size
;
}
size_t
G1PageBasedVirtualSpace
::
byte_size_for_pages
(
size_t
num
)
{
return
num
*
_page_size
;
}
MemRegion
G1PageBasedVirtualSpace
::
commit
(
uintptr_t
start
,
size_t
size_in_pages
)
{
// We need to make sure to commit all pages covered by the given area.
guarantee
(
is_area_uncommitted
(
start
,
size_in_pages
),
"Specified area is not uncommitted"
);
if
(
!
_special
)
{
os
::
commit_memory_or_exit
(
page_start
(
start
),
byte_size_for_pages
(
size_in_pages
),
_executable
,
err_msg
(
"Failed to commit pages from "
SIZE_FORMAT
" of length "
SIZE_FORMAT
,
start
,
size_in_pages
));
}
_committed
.
set_range
(
start
,
start
+
size_in_pages
);
MemRegion
result
((
HeapWord
*
)
page_start
(
start
),
byte_size_for_pages
(
size_in_pages
)
/
HeapWordSize
);
return
result
;
}
MemRegion
G1PageBasedVirtualSpace
::
uncommit
(
uintptr_t
start
,
size_t
size_in_pages
)
{
guarantee
(
is_area_committed
(
start
,
size_in_pages
),
"checking"
);
if
(
!
_special
)
{
os
::
uncommit_memory
(
page_start
(
start
),
byte_size_for_pages
(
size_in_pages
));
}
_committed
.
clear_range
(
start
,
start
+
size_in_pages
);
MemRegion
result
((
HeapWord
*
)
page_start
(
start
),
byte_size_for_pages
(
size_in_pages
)
/
HeapWordSize
);
return
result
;
}
bool
G1PageBasedVirtualSpace
::
contains
(
const
void
*
p
)
const
{
return
_low_boundary
<=
(
const
char
*
)
p
&&
(
const
char
*
)
p
<
_high_boundary
;
}
#ifndef PRODUCT
void
G1PageBasedVirtualSpace
::
print_on
(
outputStream
*
out
)
{
out
->
print
(
"Virtual space:"
);
if
(
special
())
out
->
print
(
" (pinned in memory)"
);
out
->
cr
();
out
->
print_cr
(
" - committed: "
SIZE_FORMAT
,
committed_size
());
out
->
print_cr
(
" - reserved: "
SIZE_FORMAT
,
reserved_size
());
out
->
print_cr
(
" - [low_b, high_b]: ["
INTPTR_FORMAT
", "
INTPTR_FORMAT
"]"
,
p2i
(
_low_boundary
),
p2i
(
_high_boundary
));
}
void
G1PageBasedVirtualSpace
::
print
()
{
print_on
(
tty
);
}
#endif
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
0 → 100644
浏览文件 @
f83c3096
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/bitMap.hpp"
// Virtual space management helper for a virtual space with an OS page allocation
// granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
class
G1PageBasedVirtualSpace
VALUE_OBJ_CLASS_SPEC
{
friend
class
VMStructs
;
private:
// Reserved area addresses.
char
*
_low_boundary
;
char
*
_high_boundary
;
// The commit/uncommit granularity in bytes.
size_t
_page_size
;
// Bitmap used for verification of commit/uncommit operations.
BitMap
_committed
;
// Indicates that the entire space has been committed and pinned in memory,
// os::commit_memory() or os::uncommit_memory() have no function.
bool
_special
;
// Indicates whether the committed space should be executable.
bool
_executable
;
// Returns the index of the page which contains the given address.
uintptr_t
addr_to_page_index
(
char
*
addr
)
const
;
// Returns the address of the given page index.
char
*
page_start
(
uintptr_t
index
);
// Returns the byte size of the given number of pages.
size_t
byte_size_for_pages
(
size_t
num
);
// Returns true if the entire area is backed by committed memory.
bool
is_area_committed
(
uintptr_t
start
,
size_t
size_in_pages
)
const
;
// Returns true if the entire area is not backed by committed memory.
bool
is_area_uncommitted
(
uintptr_t
start
,
size_t
size_in_pages
)
const
;
public:
// Commit the given area of pages starting at start being size_in_pages large.
MemRegion
commit
(
uintptr_t
start
,
size_t
size_in_pages
);
// Uncommit the given area of pages starting at start being size_in_pages large.
MemRegion
uncommit
(
uintptr_t
start
,
size_t
size_in_pages
);
bool
special
()
const
{
return
_special
;
}
// Initialization
G1PageBasedVirtualSpace
();
bool
initialize_with_granularity
(
ReservedSpace
rs
,
size_t
page_size
);
// Destruction
~
G1PageBasedVirtualSpace
();
// Amount of reserved memory.
size_t
reserved_size
()
const
;
// Memory used in this virtual space.
size_t
committed_size
()
const
;
// Memory left to use/expand in this virtual space.
size_t
uncommitted_size
()
const
;
bool
contains
(
const
void
*
p
)
const
;
MemRegion
reserved
()
{
MemRegion
x
((
HeapWord
*
)
_low_boundary
,
reserved_size
()
/
HeapWordSize
);
return
x
;
}
void
release
();
void
check_for_contiguity
()
PRODUCT_RETURN
;
// Debugging
void
print_on
(
outputStream
*
out
)
PRODUCT_RETURN
;
void
print
();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
0 → 100644
浏览文件 @
f83c3096
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memTracker.hpp"
#include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper
::
G1RegionToSpaceMapper
(
ReservedSpace
rs
,
size_t
commit_granularity
,
size_t
region_granularity
,
MemoryType
type
)
:
_storage
(),
_commit_granularity
(
commit_granularity
),
_region_granularity
(
region_granularity
),
_listener
(
NULL
),
_commit_map
()
{
guarantee
(
is_power_of_2
(
commit_granularity
),
"must be"
);
guarantee
(
is_power_of_2
(
region_granularity
),
"must be"
);
_storage
.
initialize_with_granularity
(
rs
,
commit_granularity
);
MemTracker
::
record_virtual_memory_type
((
address
)
rs
.
base
(),
type
);
}
// G1RegionToSpaceMapper implementation where the region granularity is larger than
// or the same as the commit granularity.
// Basically, the space corresponding to one region region spans several OS pages.
class
G1RegionsLargerThanCommitSizeMapper
:
public
G1RegionToSpaceMapper
{
private:
size_t
_pages_per_region
;
public:
G1RegionsLargerThanCommitSizeMapper
(
ReservedSpace
rs
,
size_t
os_commit_granularity
,
size_t
alloc_granularity
,
size_t
commit_factor
,
MemoryType
type
)
:
G1RegionToSpaceMapper
(
rs
,
os_commit_granularity
,
alloc_granularity
,
type
),
_pages_per_region
(
alloc_granularity
/
(
os_commit_granularity
*
commit_factor
))
{
guarantee
(
alloc_granularity
>=
os_commit_granularity
,
"allocation granularity smaller than commit granularity"
);
_commit_map
.
resize
(
rs
.
size
()
*
commit_factor
/
alloc_granularity
,
/* in_resource_area */
false
);
}
virtual
void
commit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
)
{
_storage
.
commit
(
start_idx
*
_pages_per_region
,
num_regions
*
_pages_per_region
);
_commit_map
.
set_range
(
start_idx
,
start_idx
+
num_regions
);
fire_on_commit
(
start_idx
,
num_regions
);
}
virtual
void
uncommit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
)
{
_storage
.
uncommit
(
start_idx
*
_pages_per_region
,
num_regions
*
_pages_per_region
);
_commit_map
.
clear_range
(
start_idx
,
start_idx
+
num_regions
);
}
};
// G1RegionToSpaceMapper implementation where the region granularity is smaller
// than the commit granularity.
// Basically, the contents of one OS page span several regions.
class
G1RegionsSmallerThanCommitSizeMapper
:
public
G1RegionToSpaceMapper
{
private:
class
CommitRefcountArray
:
public
G1BiasedMappedArray
<
uint
>
{
protected:
virtual
uint
default_value
()
const
{
return
0
;
}
};
size_t
_regions_per_page
;
CommitRefcountArray
_refcounts
;
uintptr_t
region_idx_to_page_idx
(
uint
region
)
const
{
return
region
/
_regions_per_page
;
}
public:
G1RegionsSmallerThanCommitSizeMapper
(
ReservedSpace
rs
,
size_t
os_commit_granularity
,
size_t
alloc_granularity
,
size_t
commit_factor
,
MemoryType
type
)
:
G1RegionToSpaceMapper
(
rs
,
os_commit_granularity
,
alloc_granularity
,
type
),
_regions_per_page
((
os_commit_granularity
*
commit_factor
)
/
alloc_granularity
),
_refcounts
()
{
guarantee
((
os_commit_granularity
*
commit_factor
)
>=
alloc_granularity
,
"allocation granularity smaller than commit granularity"
);
_refcounts
.
initialize
((
HeapWord
*
)
rs
.
base
(),
(
HeapWord
*
)(
rs
.
base
()
+
rs
.
size
()),
os_commit_granularity
);
_commit_map
.
resize
(
rs
.
size
()
*
commit_factor
/
alloc_granularity
,
/* in_resource_area */
false
);
}
virtual
void
commit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
)
{
for
(
uintptr_t
i
=
start_idx
;
i
<
start_idx
+
num_regions
;
i
++
)
{
assert
(
!
_commit_map
.
at
(
i
),
err_msg
(
"Trying to commit storage at region "
INTPTR_FORMAT
" that is already committed"
,
i
));
uintptr_t
idx
=
region_idx_to_page_idx
(
i
);
uint
old_refcount
=
_refcounts
.
get_by_index
(
idx
);
if
(
old_refcount
==
0
)
{
_storage
.
commit
(
idx
,
1
);
}
_refcounts
.
set_by_index
(
idx
,
old_refcount
+
1
);
_commit_map
.
set_bit
(
i
);
fire_on_commit
(
i
,
1
);
}
}
virtual
void
uncommit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
)
{
for
(
uintptr_t
i
=
start_idx
;
i
<
start_idx
+
num_regions
;
i
++
)
{
assert
(
_commit_map
.
at
(
i
),
err_msg
(
"Trying to uncommit storage at region "
INTPTR_FORMAT
" that is not committed"
,
i
));
uintptr_t
idx
=
region_idx_to_page_idx
(
i
);
uint
old_refcount
=
_refcounts
.
get_by_index
(
idx
);
assert
(
old_refcount
>
0
,
"must be"
);
if
(
old_refcount
==
1
)
{
_storage
.
uncommit
(
idx
,
1
);
}
_refcounts
.
set_by_index
(
idx
,
old_refcount
-
1
);
_commit_map
.
clear_bit
(
i
);
}
}
};
void
G1RegionToSpaceMapper
::
fire_on_commit
(
uint
start_idx
,
size_t
num_regions
)
{
if
(
_listener
!=
NULL
)
{
_listener
->
on_commit
(
start_idx
,
num_regions
);
}
}
G1RegionToSpaceMapper
*
G1RegionToSpaceMapper
::
create_mapper
(
ReservedSpace
rs
,
size_t
os_commit_granularity
,
size_t
region_granularity
,
size_t
commit_factor
,
MemoryType
type
)
{
if
(
region_granularity
>=
(
os_commit_granularity
*
commit_factor
))
{
return
new
G1RegionsLargerThanCommitSizeMapper
(
rs
,
os_commit_granularity
,
region_granularity
,
commit_factor
,
type
);
}
else
{
return
new
G1RegionsSmallerThanCommitSizeMapper
(
rs
,
os_commit_granularity
,
region_granularity
,
commit_factor
,
type
);
}
}
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
0 → 100644
浏览文件 @
f83c3096
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
class
G1MappingChangedListener
VALUE_OBJ_CLASS_SPEC
{
public:
// Fired after commit of the memory, i.e. the memory this listener is registered
// for can be accessed.
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
)
=
0
;
};
// Maps region based commit/uncommit requests to the underlying page sized virtual
// space.
class
G1RegionToSpaceMapper
:
public
CHeapObj
<
mtGC
>
{
private:
G1MappingChangedListener
*
_listener
;
protected:
// Backing storage.
G1PageBasedVirtualSpace
_storage
;
size_t
_commit_granularity
;
size_t
_region_granularity
;
// Mapping management
BitMap
_commit_map
;
G1RegionToSpaceMapper
(
ReservedSpace
rs
,
size_t
commit_granularity
,
size_t
region_granularity
,
MemoryType
type
);
void
fire_on_commit
(
uint
start_idx
,
size_t
num_regions
);
public:
MemRegion
reserved
()
{
return
_storage
.
reserved
();
}
void
set_mapping_changed_listener
(
G1MappingChangedListener
*
listener
)
{
_listener
=
listener
;
}
virtual
~
G1RegionToSpaceMapper
()
{
_commit_map
.
resize
(
0
,
/* in_resource_area */
false
);
}
bool
is_committed
(
uintptr_t
idx
)
const
{
return
_commit_map
.
at
(
idx
);
}
virtual
void
commit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
=
1
)
=
0
;
virtual
void
uncommit_regions
(
uintptr_t
start_idx
,
size_t
num_regions
=
1
)
=
0
;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
// The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card
// table entry corresponds to.
static
G1RegionToSpaceMapper
*
create_mapper
(
ReservedSpace
rs
,
size_t
os_commit_granularity
,
size_t
region_granularity
,
size_t
byte_translation_factor
,
MemoryType
type
);
};
#endif
/* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
f83c3096
...
...
@@ -211,7 +211,6 @@ public:
#endif
HeapRegion
*
card_region
=
_g1h
->
heap_region_containing
(
card_start
);
assert
(
card_region
!=
NULL
,
"Yielding cards not in the heap?"
);
_cards
++
;
if
(
!
card_region
->
is_on_dirty_cards_region_list
())
{
...
...
@@ -406,7 +405,6 @@ public:
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
// And find the region containing it.
HeapRegion
*
r
=
_g1
->
heap_region_containing
(
start
);
assert
(
r
!=
NULL
,
"unexpected null"
);
// Scan oops in the card looking for references into the collection set
// Don't use addr_for(card_ptr + 1) which can ask for
...
...
@@ -556,6 +554,12 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
bool
G1RemSet
::
refine_card
(
jbyte
*
card_ptr
,
uint
worker_i
,
bool
check_for_refs_into_cset
)
{
assert
(
_g1
->
is_in_exact
(
_ct_bs
->
addr_for
(
card_ptr
)),
err_msg
(
"Card at "
PTR_FORMAT
" index "
SIZE_FORMAT
" representing heap at "
PTR_FORMAT
" (%u) must be in committed heap"
,
p2i
(
card_ptr
),
_ct_bs
->
index_for
(
_ct_bs
->
addr_for
(
card_ptr
)),
_ct_bs
->
addr_for
(
card_ptr
),
_g1
->
addr_to_region
(
_ct_bs
->
addr_for
(
card_ptr
))));
// If the card is no longer dirty, nothing to do.
if
(
*
card_ptr
!=
CardTableModRefBS
::
dirty_card_val
())
{
...
...
@@ -568,11 +572,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
// And find the region containing it.
HeapRegion
*
r
=
_g1
->
heap_region_containing
(
start
);
if
(
r
==
NULL
)
{
// Again no need to return that this card contains refs that
// point into the collection set.
return
false
;
// Not in the G1 heap (might be in perm, for example.)
}
// Why do we have to check here whether a card is on a young region,
// given that we dirty young regions and, as a result, the
...
...
@@ -625,10 +624,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
start
=
_ct_bs
->
addr_for
(
card_ptr
);
r
=
_g1
->
heap_region_containing
(
start
);
if
(
r
==
NULL
)
{
// Not in the G1 heap
return
false
;
}
// Checking whether the region we got back from the cache
// is young here is inappropriate. The region could have been
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -46,26 +46,28 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
template
<
class
T
>
inline
void
G1RemSet
::
par_write_ref
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
if
(
obj
==
NULL
)
{
return
;
}
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop
if
(
obj
!=
NULL
)
{
#ifdef CHECK_UNHANDLED_OOPS
oopDesc
*
o
=
obj
.
obj
();
oopDesc
*
o
=
obj
.
obj
();
#else
oopDesc
*
o
=
obj
;
oopDesc
*
o
=
obj
;
#endif // CHECK_UNHANDLED_OOPS
assert
((
intptr_t
)
o
%
MinObjAlignmentInBytes
==
0
,
"not oop aligned"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
obj
),
"must be in heap"
);
}
assert
((
intptr_t
)
o
%
MinObjAlignmentInBytes
==
0
,
"not oop aligned"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
obj
),
"must be in heap"
);
#endif // ASSERT
assert
(
from
==
NULL
||
from
->
is_in_reserved
(
p
),
"p is not in from"
);
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
if
(
to
!=
NULL
&&
from
!=
to
)
{
if
(
from
!=
to
)
{
assert
(
to
->
rem_set
()
!=
NULL
,
"Need per-region 'into' remsets."
);
to
->
rem_set
()
->
add_reference
(
p
,
tid
);
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
浏览文件 @
f83c3096
...
...
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
...
...
@@ -37,7 +38,6 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
_kind
=
G1SATBCT
;
}
void
G1SATBCardTableModRefBS
::
enqueue
(
oop
pre_val
)
{
// Nulls should have been already filtered.
assert
(
pre_val
->
is_oop
(
true
),
"Error"
);
...
...
@@ -124,13 +124,52 @@ void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
}
#endif
void
G1SATBCardTableLoggingModRefBSChangedListener
::
on_commit
(
uint
start_idx
,
size_t
num_regions
)
{
MemRegion
mr
(
G1CollectedHeap
::
heap
()
->
bottom_addr_for_region
(
start_idx
),
num_regions
*
HeapRegion
::
GrainWords
);
_card_table
->
clear
(
mr
);
}
G1SATBCardTableLoggingModRefBS
::
G1SATBCardTableLoggingModRefBS
(
MemRegion
whole_heap
,
int
max_covered_regions
)
:
G1SATBCardTableModRefBS
(
whole_heap
,
max_covered_regions
),
_dcqs
(
JavaThread
::
dirty_card_queue_set
())
_dcqs
(
JavaThread
::
dirty_card_queue_set
()),
_listener
()
{
_kind
=
G1SATBCTLogging
;
_listener
.
set_card_table
(
this
);
}
void
G1SATBCardTableLoggingModRefBS
::
initialize
(
G1RegionToSpaceMapper
*
mapper
)
{
mapper
->
set_mapping_changed_listener
(
&
_listener
);
_byte_map_size
=
mapper
->
reserved
().
byte_size
();
_guard_index
=
cards_required
(
_whole_heap
.
word_size
())
-
1
;
_last_valid_index
=
_guard_index
-
1
;
HeapWord
*
low_bound
=
_whole_heap
.
start
();
HeapWord
*
high_bound
=
_whole_heap
.
end
();
_cur_covered_regions
=
1
;
_covered
[
0
]
=
_whole_heap
;
_byte_map
=
(
jbyte
*
)
mapper
->
reserved
().
start
();
byte_map_base
=
_byte_map
-
(
uintptr_t
(
low_bound
)
>>
card_shift
);
assert
(
byte_for
(
low_bound
)
==
&
_byte_map
[
0
],
"Checking start of map"
);
assert
(
byte_for
(
high_bound
-
1
)
<=
&
_byte_map
[
_last_valid_index
],
"Checking end of map"
);
if
(
TraceCardTableModRefBS
)
{
gclog_or_tty
->
print_cr
(
"G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "
);
gclog_or_tty
->
print_cr
(
" "
" &_byte_map[0]: "
INTPTR_FORMAT
" &_byte_map[_last_valid_index]: "
INTPTR_FORMAT
,
p2i
(
&
_byte_map
[
0
]),
p2i
(
&
_byte_map
[
_last_valid_index
]));
gclog_or_tty
->
print_cr
(
" "
" byte_map_base: "
INTPTR_FORMAT
,
p2i
(
byte_map_base
));
}
}
void
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
浏览文件 @
f83c3096
...
...
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/memRegion.hpp"
#include "oops/oop.inline.hpp"
...
...
@@ -33,6 +34,7 @@
#if INCLUDE_ALL_GCS
class
DirtyCardQueueSet
;
class
G1SATBCardTableLoggingModRefBS
;
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
...
...
@@ -126,18 +128,40 @@ public:
jbyte
val
=
_byte_map
[
card_index
];
return
(
val
&
(
clean_card_mask_val
()
|
deferred_card_val
()))
==
deferred_card_val
();
}
};
class
G1SATBCardTableLoggingModRefBSChangedListener
:
public
G1MappingChangedListener
{
private:
G1SATBCardTableLoggingModRefBS
*
_card_table
;
public:
G1SATBCardTableLoggingModRefBSChangedListener
()
:
_card_table
(
NULL
)
{
}
void
set_card_table
(
G1SATBCardTableLoggingModRefBS
*
card_table
)
{
_card_table
=
card_table
;
}
virtual
void
on_commit
(
uint
start_idx
,
size_t
num_regions
);
};
// Adds card-table logging to the post-barrier.
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
class
G1SATBCardTableLoggingModRefBS
:
public
G1SATBCardTableModRefBS
{
friend
class
G1SATBCardTableLoggingModRefBSChangedListener
;
private:
G1SATBCardTableLoggingModRefBSChangedListener
_listener
;
DirtyCardQueueSet
&
_dcqs
;
public:
static
size_t
compute_size
(
size_t
mem_region_size_in_words
)
{
size_t
number_of_slots
=
(
mem_region_size_in_words
/
card_size_in_words
);
return
ReservedSpace
::
allocation_align_size_up
(
number_of_slots
);
}
G1SATBCardTableLoggingModRefBS
(
MemRegion
whole_heap
,
int
max_covered_regions
);
virtual
void
initialize
()
{
}
virtual
void
initialize
(
G1RegionToSpaceMapper
*
mapper
);
virtual
void
resize_covered_region
(
MemRegion
new_region
)
{
ShouldNotReachHere
();
}
bool
is_a
(
BarrierSet
::
Name
bsn
)
{
return
bsn
==
BarrierSet
::
G1SATBCTLogging
||
G1SATBCardTableModRefBS
::
is_a
(
bsn
);
...
...
@@ -154,8 +178,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
void
write_region_work
(
MemRegion
mr
)
{
invalidate
(
mr
);
}
void
write_ref_array_work
(
MemRegion
mr
)
{
invalidate
(
mr
);
}
};
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
f83c3096
...
...
@@ -344,11 +344,6 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
return
low
;
}
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
HeapRegion
::
HeapRegion
(
uint
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
)
:
...
...
@@ -360,7 +355,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_claimed
(
InitialClaimValue
),
_evacuation_failed
(
false
),
_prev_marked_bytes
(
0
),
_next_marked_bytes
(
0
),
_gc_efficiency
(
0.0
),
_young_type
(
NotYoung
),
_next_young_region
(
NULL
),
_next_dirty_cards_region
(
NULL
),
_next
(
NULL
),
_prev
(
NULL
),
_pending_removal
(
false
),
_next_dirty_cards_region
(
NULL
),
_next
(
NULL
),
_prev
(
NULL
),
#ifdef ASSERT
_containing_set
(
NULL
),
#endif // ASSERT
...
...
@@ -369,14 +364,20 @@ HeapRegion::HeapRegion(uint hrs_index,
_predicted_bytes_to_copy
(
0
)
{
_rem_set
=
new
HeapRegionRemSet
(
sharedOffsetArray
,
this
);
assert
(
HeapRegionRemSet
::
num_par_rem_sets
()
>
0
,
"Invariant."
);
initialize
(
mr
);
}
void
HeapRegion
::
initialize
(
MemRegion
mr
,
bool
clear_space
,
bool
mangle_space
)
{
assert
(
_rem_set
->
is_empty
(),
"Remembered set must be empty"
);
G1OffsetTableContigSpace
::
initialize
(
mr
,
clear_space
,
mangle_space
);
_orig_end
=
mr
.
end
();
// Note that initialize() will set the start of the unmarked area of the
// region.
hr_clear
(
false
/*par*/
,
false
/*clear_space*/
);
set_top
(
bottom
());
record_top_and_timestamp
();
assert
(
HeapRegionRemSet
::
num_par_rem_sets
()
>
0
,
"Invariant."
);
}
CompactibleSpace
*
HeapRegion
::
next_compaction_space
()
const
{
...
...
@@ -907,7 +908,7 @@ void HeapRegion::verify(VerifyOption vo,
}
// If it returns false, verify_for_object() will output the
// appropriate messa
s
ge.
// appropriate message.
if
(
do_bot_verify
&&
!
g1
->
is_obj_dead
(
obj
,
this
)
&&
!
_offsets
.
verify_for_object
(
p
,
obj_size
))
{
...
...
@@ -1038,8 +1039,7 @@ void G1OffsetTableContigSpace::clear(bool mangle_space) {
set_top
(
bottom
());
set_saved_mark_word
(
bottom
());
CompactibleSpace
::
clear
(
mangle_space
);
_offsets
.
zero_bottom_entry
();
_offsets
.
initialize_threshold
();
reset_bot
();
}
void
G1OffsetTableContigSpace
::
set_bottom
(
HeapWord
*
new_bottom
)
{
...
...
@@ -1129,9 +1129,11 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
_gc_time_stamp
(
0
)
{
_offsets
.
set_space
(
this
);
// false ==> we'll do the clearing if there's clearing to be done.
CompactibleSpace
::
initialize
(
mr
,
false
,
SpaceDecorator
::
Mangle
);
}
void
G1OffsetTableContigSpace
::
initialize
(
MemRegion
mr
,
bool
clear_space
,
bool
mangle_space
)
{
CompactibleSpace
::
initialize
(
mr
,
clear_space
,
mangle_space
);
_top
=
bottom
();
_offsets
.
zero_bottom_entry
();
_offsets
.
initialize_threshold
();
reset_bot
();
}
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
f83c3096
...
...
@@ -62,7 +62,7 @@ class nmethod;
p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
// sentinel value for hrs_index
#define G1_N
ULL
_HRS_INDEX ((uint) -1)
#define G1_N
O
_HRS_INDEX ((uint) -1)
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
...
...
@@ -146,6 +146,9 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
HeapWord
*
top
()
const
{
return
_top
;
}
protected:
// Reset the G1OffsetTableContigSpace.
virtual
void
initialize
(
MemRegion
mr
,
bool
clear_space
,
bool
mangle_space
);
HeapWord
**
top_addr
()
{
return
&
_top
;
}
// Allocation helpers (return NULL if full).
inline
HeapWord
*
allocate_impl
(
size_t
word_size
,
HeapWord
*
end_value
);
...
...
@@ -200,8 +203,7 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
virtual
void
print
()
const
;
void
reset_bot
()
{
_offsets
.
zero_bottom_entry
();
_offsets
.
initialize_threshold
();
_offsets
.
reset_bot
();
}
void
update_bot_for_object
(
HeapWord
*
start
,
size_t
word_size
)
{
...
...
@@ -264,7 +266,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
#ifdef ASSERT
HeapRegionSetBase
*
_containing_set
;
#endif // ASSERT
bool
_pending_removal
;
// For parallel heapRegion traversal.
jint
_claimed
;
...
...
@@ -333,6 +334,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
);
// Initializing the HeapRegion not only resets the data structure, but also
// resets the BOT for that heap region.
// The default values for clear_space means that we will do the clearing if
// there's clearing to be done ourselves. We also always mangle the space.
virtual
void
initialize
(
MemRegion
mr
,
bool
clear_space
=
false
,
bool
mangle_space
=
SpaceDecorator
::
Mangle
);
static
int
LogOfHRGrainBytes
;
static
int
LogOfHRGrainWords
;
...
...
@@ -553,26 +560,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// to provide a dummy version of it.
#endif // ASSERT
// If we want to remove regions from a list in bulk we can simply tag
// them with the pending_removal tag and call the
// remove_all_pending() method on the list.
bool
pending_removal
()
{
return
_pending_removal
;
}
void
set_pending_removal
(
bool
pending_removal
)
{
if
(
pending_removal
)
{
assert
(
!
_pending_removal
&&
containing_set
()
!=
NULL
,
"can only set pending removal to true if it's false and "
"the region belongs to a region set"
);
}
else
{
assert
(
_pending_removal
&&
containing_set
()
==
NULL
,
"can only set pending removal to false if it's true and "
"the region does not belong to a region set"
);
}
_pending_removal
=
pending_removal
;
}
HeapRegion
*
get_next_young_region
()
{
return
_next_young_region
;
}
void
set_next_young_region
(
HeapRegion
*
hr
)
{
_next_young_region
=
hr
;
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
f83c3096
...
...
@@ -372,17 +372,17 @@ void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
_max_regions
,
&
_static_mem_size
);
for
(
uint
i
=
0
;
i
<
n_par_rs
;
i
++
)
{
for
(
uint
j
=
0
;
j
<
_max_regions
;
j
++
)
{
set
(
i
,
j
,
InvalidCard
);
}
}
invalidate
(
0
,
_max_regions
);
}
void
FromCardCache
::
shrink
(
uint
new_num_regions
)
{
void
FromCardCache
::
invalidate
(
uint
start_idx
,
size_t
new_num_regions
)
{
guarantee
((
size_t
)
start_idx
+
new_num_regions
<=
max_uintx
,
err_msg
(
"Trying to invalidate beyond maximum region, from %u size "
SIZE_FORMAT
,
start_idx
,
new_num_regions
));
for
(
uint
i
=
0
;
i
<
HeapRegionRemSet
::
num_par_rem_sets
();
i
++
)
{
assert
(
new_num_regions
<=
_max_regions
,
"Must be within max."
);
for
(
uint
j
=
new_num_regions
;
j
<
_max_regions
;
j
++
)
{
uint
end_idx
=
(
start_idx
+
(
uint
)
new_num_regions
);
assert
(
end_idx
<=
_max_regions
,
"Must be within max."
);
for
(
uint
j
=
start_idx
;
j
<
end_idx
;
j
++
)
{
set
(
i
,
j
,
InvalidCard
);
}
}
...
...
@@ -406,12 +406,12 @@ void FromCardCache::clear(uint region_idx) {
}
}
void
OtherRegionsTable
::
init
_from_card_cach
e
(
uint
max_regions
)
{
void
OtherRegionsTable
::
init
ializ
e
(
uint
max_regions
)
{
FromCardCache
::
initialize
(
HeapRegionRemSet
::
num_par_rem_sets
(),
max_regions
);
}
void
OtherRegionsTable
::
shrink_from_card_cache
(
uint
new_
num_regions
)
{
FromCardCache
::
shrink
(
new_
num_regions
);
void
OtherRegionsTable
::
invalidate
(
uint
start_idx
,
size_t
num_regions
)
{
FromCardCache
::
invalidate
(
start_idx
,
num_regions
);
}
void
OtherRegionsTable
::
print_from_card_cache
()
{
...
...
@@ -802,7 +802,6 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
bool
OtherRegionsTable
::
contains_reference_locked
(
OopOrNarrowOopStar
from
)
const
{
HeapRegion
*
hr
=
_g1h
->
heap_region_containing_raw
(
from
);
if
(
hr
==
NULL
)
return
false
;
RegionIdx_t
hr_ind
=
(
RegionIdx_t
)
hr
->
hrs_index
();
// Is this region in the coarse map?
if
(
_coarse_map
.
at
(
hr_ind
))
return
true
;
...
...
@@ -840,8 +839,8 @@ uint HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet
::
HeapRegionRemSet
(
G1BlockOffsetSharedArray
*
bosa
,
HeapRegion
*
hr
)
:
_bosa
(
bosa
),
_m
(
Mutex
::
leaf
,
FormatBuffer
<
128
>
(
"HeapRegionRemSet lock #
"
UINT32_FORMAT
,
hr
->
hrs_index
()),
true
),
_code_roots
(),
_other_regions
(
hr
,
&
_m
)
{
_m
(
Mutex
::
leaf
,
FormatBuffer
<
128
>
(
"HeapRegionRemSet lock #
%u"
,
hr
->
hrs_index
()),
true
),
_code_roots
(),
_other_regions
(
hr
,
&
_m
)
,
_iter_state
(
Unclaimed
),
_iter_claimed
(
0
)
{
reset_for_par_iteration
();
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
浏览文件 @
f83c3096
...
...
@@ -84,7 +84,7 @@ class FromCardCache : public AllStatic {
static
void
initialize
(
uint
n_par_rs
,
uint
max_num_regions
);
static
void
shrink
(
uint
new_
num_regions
);
static
void
invalidate
(
uint
start_idx
,
size_t
num_regions
);
static
void
print
(
outputStream
*
out
=
gclog_or_tty
)
PRODUCT_RETURN
;
...
...
@@ -213,11 +213,11 @@ public:
// Declare the heap size (in # of regions) to the OtherRegionsTable.
// (Uses it to initialize from_card_cache).
static
void
init
_from_card_cach
e
(
uint
max_regions
);
static
void
init
ializ
e
(
uint
max_regions
);
// Declares that
only regions i s.t. 0 <= i < new_n_regs are in use.
//
Make sure any entries for higher
regions are invalid.
static
void
shrink_from_card_cache
(
uint
new_
num_regions
);
// Declares that
regions between start_idx <= i < start_idx + num_regions are
//
not in use. Make sure that any entries for these
regions are invalid.
static
void
invalidate
(
uint
start_idx
,
size_t
num_regions
);
static
void
print_from_card_cache
();
};
...
...
@@ -404,12 +404,11 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache).
static
void
init_heap
(
uint
max_regions
)
{
OtherRegionsTable
::
init
_from_card_cach
e
(
max_regions
);
OtherRegionsTable
::
init
ializ
e
(
max_regions
);
}
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
static
void
shrink_heap
(
uint
new_n_regs
)
{
OtherRegionsTable
::
shrink_from_card_cache
(
new_n_regs
);
static
void
invalidate
(
uint
start_idx
,
uint
num_regions
)
{
OtherRegionsTable
::
invalidate
(
start_idx
,
num_regions
);
}
#ifndef PRODUCT
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
浏览文件 @
f83c3096
...
...
@@ -27,28 +27,32 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
HeapWord
*
addr
)
const
{
assert
(
addr
<
heap_end
(),
err_msg
(
"addr: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
p2i
(
addr
),
p2i
(
heap_end
())));
assert
(
addr
>=
heap_bottom
(),
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
,
p2i
(
addr
),
p2i
(
heap_bottom
())));
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
{
HeapRegion
*
hr
=
_regions
.
get_by_address
(
addr
);
assert
(
hr
!=
NULL
,
"invariant"
);
return
hr
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
HeapWord
*
addr
)
const
{
if
(
addr
!=
NULL
&&
addr
<
heap_end
())
{
assert
(
addr
>=
heap_bottom
(),
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
,
p2i
(
addr
),
p2i
(
heap_bottom
())));
return
addr_to_region_unsafe
(
addr
);
}
return
NULL
;
}
inline
HeapRegion
*
HeapRegionSeq
::
at
(
uint
index
)
const
{
assert
(
i
ndex
<
length
(
),
"pre-condition"
);
assert
(
i
s_available
(
index
),
"pre-condition"
);
HeapRegion
*
hr
=
_regions
.
get_by_index
(
index
);
assert
(
hr
!=
NULL
,
"sanity"
);
assert
(
hr
->
hrs_index
()
==
index
,
"sanity"
);
return
hr
;
}
inline
void
HeapRegionSeq
::
insert_into_free_list
(
HeapRegion
*
hr
)
{
_free_list
.
add_ordered
(
hr
);
}
inline
void
HeapRegionSeq
::
allocate_free_regions_starting_at
(
uint
first
,
uint
num_regions
)
{
_free_list
.
remove_starting_at
(
at
(
first
),
num_regions
);
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
浏览文件 @
f83c3096
...
...
@@ -43,10 +43,9 @@
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionSeq, _
committed_length,
uint) \
nonstatic_field(HeapRegionSeq, _
num_committed,
uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
浏览文件 @
f83c3096
...
...
@@ -78,6 +78,7 @@ jint ParallelScavengeHeap::initialize() {
(
HeapWord
*
)(
heap_rs
.
base
()
+
heap_rs
.
size
()));
CardTableExtension
*
const
barrier_set
=
new
CardTableExtension
(
_reserved
,
3
);
barrier_set
->
initialize
();
_barrier_set
=
barrier_set
;
oopDesc
::
set_bs
(
_barrier_set
);
if
(
_barrier_set
==
NULL
)
{
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/memory/cardTableModRefBS.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/memory/cardTableModRefBS.hpp
浏览文件 @
f83c3096
...
...
@@ -96,12 +96,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
// The declaration order of these const fields is important; see the
// constructor before changing.
const
MemRegion
_whole_heap
;
// the region covered by the card table
const
size_t
_guard_index
;
// index of very last element in the card
size_t
_guard_index
;
// index of very last element in the card
// table; it is set to a guard value
// (last_card) and should never be modified
const
size_t
_last_valid_index
;
// index of the last valid element
size_t
_last_valid_index
;
// index of the last valid element
const
size_t
_page_size
;
// page size used when mapping _byte_map
const
size_t
_byte_map_size
;
// in bytes
size_t
_byte_map_size
;
// in bytes
jbyte
*
_byte_map
;
// the card marking array
int
_cur_covered_regions
;
...
...
@@ -123,7 +123,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
protected:
// Initialization utilities; covered_words is the size of the covered region
// in, um, words.
inline
size_t
cards_required
(
size_t
covered_words
);
inline
size_t
cards_required
(
size_t
covered_words
)
{
// Add one for a guard card, used to detect errors.
const
size_t
words
=
align_size_up
(
covered_words
,
card_size_in_words
);
return
words
/
card_size_in_words
+
1
;
}
inline
size_t
compute_byte_map_size
();
// Finds and return the index of the region, if any, to which the given
...
...
@@ -137,7 +142,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
int
find_covering_region_containing
(
HeapWord
*
addr
);
// Resize one of the regions covered by the remembered set.
void
resize_covered_region
(
MemRegion
new_region
);
v
irtual
v
oid
resize_covered_region
(
MemRegion
new_region
);
// Returns the leftmost end of a committed region corresponding to a
// covered region before covered region "ind", or else "NULL" if "ind" is
...
...
@@ -282,6 +287,8 @@ public:
CardTableModRefBS
(
MemRegion
whole_heap
,
int
max_covered_regions
);
~
CardTableModRefBS
();
virtual
void
initialize
();
// *** Barrier set functions.
bool
has_write_ref_pre_barrier
()
{
return
false
;
}
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/memory/cardTableRS.cpp
浏览文件 @
f83c3096
...
...
@@ -53,6 +53,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
#else
_ct_bs
=
new
CardTableModRefBSForCTRS
(
whole_heap
,
max_covered_regions
);
#endif
_ct_bs
->
initialize
();
set_bs
(
_ct_bs
);
_last_cur_val_in_gen
=
NEW_C_HEAP_ARRAY3
(
jbyte
,
GenCollectedHeap
::
max_gens
+
1
,
mtGC
,
0
,
AllocFailStrategy
::
RETURN_NULL
);
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/opto/c2_globals.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/callGenerator.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/callGenerator.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/callnode.cpp
浏览文件 @
f83c3096
...
...
@@ -1089,6 +1089,7 @@ const Type *SafePointNode::Value( PhaseTransform *phase ) const {
#ifndef PRODUCT
void
SafePointNode
::
dump_spec
(
outputStream
*
st
)
const
{
st
->
print
(
" SafePoint "
);
_replaced_nodes
.
dump
(
st
);
}
#endif
...
...
This diff is collapsed.
Click to expand it.
src/share/vm/opto/callnode.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/compile.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/compile.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/doCall.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/graphKit.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/graphKit.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/ifnode.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/library_call.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/node.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/parse.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/parse1.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/replacednodes.cpp
0 → 100644
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/opto/replacednodes.hpp
0 → 100644
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/prims/jni.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/prims/whitebox.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/runtime/arguments.cpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/runtime/globals.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
src/share/vm/utilities/growableArray.hpp
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
test/gc/g1/TestEagerReclaimHumongousRegions2.java
浏览文件 @
f83c3096
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
反馈
建议
客服
返回
顶部