Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell11
提交
9c664186
D
dragonwell11
项目概览
openanolis
/
dragonwell11
通知
7
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell11
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9c664186
编写于
13年前
作者:
T
trims
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
13c02735
088aa568
无相关合并请求
变更
61
展开全部
隐藏空白更改
内联
并排
Showing
61 changed file
with
1401 addition
and
834 deletion
+1401
-834
hotspot/make/cscope.make
hotspot/make/cscope.make
+45
-66
hotspot/make/linux/Makefile
hotspot/make/linux/Makefile
+1
-1
hotspot/make/linux/makefiles/cscope.make
hotspot/make/linux/makefiles/cscope.make
+0
-160
hotspot/make/solaris/Makefile
hotspot/make/solaris/Makefile
+1
-1
hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
+1
-1
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
+2
-2
hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
+18
-18
hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
+8
-9
hotspot/src/cpu/x86/vm/vm_version_x86.cpp
hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+15
-2
hotspot/src/os/linux/vm/globals_linux.hpp
hotspot/src/os/linux/vm/globals_linux.hpp
+13
-7
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/linux/vm/os_linux.cpp
+108
-11
hotspot/src/os/linux/vm/os_linux.hpp
hotspot/src/os/linux/vm/os_linux.hpp
+3
-0
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
+17
-1
hotspot/src/share/vm/c1/c1_Runtime1.cpp
hotspot/src/share/vm/c1/c1_Runtime1.cpp
+13
-1
hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp
hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp
+1
-8
hotspot/src/share/vm/ci/ciEnv.cpp
hotspot/src/share/vm/ci/ciEnv.cpp
+2
-2
hotspot/src/share/vm/ci/ciMethod.hpp
hotspot/src/share/vm/ci/ciMethod.hpp
+21
-4
hotspot/src/share/vm/ci/ciObject.cpp
hotspot/src/share/vm/ci/ciObject.cpp
+10
-0
hotspot/src/share/vm/classfile/javaClasses.cpp
hotspot/src/share/vm/classfile/javaClasses.cpp
+13
-3
hotspot/src/share/vm/classfile/javaClasses.hpp
hotspot/src/share/vm/classfile/javaClasses.hpp
+2
-2
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/compiler/compileBroker.cpp
+11
-11
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+12
-1
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+2
-3
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
...pot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+48
-5
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
...pot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+17
-0
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
...src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
+178
-0
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
...src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
+203
-0
hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
+5
-0
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
...hare/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
+41
-38
hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp
.../share/vm/gc_implementation/shared/generationCounters.cpp
+5
-2
hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
.../share/vm/gc_implementation/shared/generationCounters.hpp
+4
-3
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp
.../src/share/vm/gc_implementation/shared/hSpaceCounters.cpp
+66
-0
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
.../src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
+87
-0
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
+18
-14
hotspot/src/share/vm/memory/cardTableModRefBS.hpp
hotspot/src/share/vm/memory/cardTableModRefBS.hpp
+26
-18
hotspot/src/share/vm/memory/cardTableRS.cpp
hotspot/src/share/vm/memory/cardTableRS.cpp
+124
-93
hotspot/src/share/vm/memory/cardTableRS.hpp
hotspot/src/share/vm/memory/cardTableRS.hpp
+18
-1
hotspot/src/share/vm/memory/genCollectedHeap.hpp
hotspot/src/share/vm/memory/genCollectedHeap.hpp
+3
-3
hotspot/src/share/vm/memory/genOopClosures.hpp
hotspot/src/share/vm/memory/genOopClosures.hpp
+2
-2
hotspot/src/share/vm/memory/sharedHeap.cpp
hotspot/src/share/vm/memory/sharedHeap.cpp
+3
-8
hotspot/src/share/vm/memory/sharedHeap.hpp
hotspot/src/share/vm/memory/sharedHeap.hpp
+4
-6
hotspot/src/share/vm/oops/cpCacheOop.cpp
hotspot/src/share/vm/oops/cpCacheOop.cpp
+11
-9
hotspot/src/share/vm/opto/escape.cpp
hotspot/src/share/vm/opto/escape.cpp
+7
-4
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/opto/graphKit.cpp
+24
-15
hotspot/src/share/vm/opto/graphKit.hpp
hotspot/src/share/vm/opto/graphKit.hpp
+2
-4
hotspot/src/share/vm/opto/library_call.cpp
hotspot/src/share/vm/opto/library_call.cpp
+3
-7
hotspot/src/share/vm/opto/loopopts.cpp
hotspot/src/share/vm/opto/loopopts.cpp
+3
-6
hotspot/src/share/vm/opto/macro.cpp
hotspot/src/share/vm/opto/macro.cpp
+10
-3
hotspot/src/share/vm/opto/memnode.cpp
hotspot/src/share/vm/opto/memnode.cpp
+27
-31
hotspot/src/share/vm/opto/stringopts.cpp
hotspot/src/share/vm/opto/stringopts.cpp
+4
-4
hotspot/src/share/vm/prims/methodHandleWalk.cpp
hotspot/src/share/vm/prims/methodHandleWalk.cpp
+2
-4
hotspot/src/share/vm/prims/methodHandles.cpp
hotspot/src/share/vm/prims/methodHandles.cpp
+101
-110
hotspot/src/share/vm/prims/methodHandles.hpp
hotspot/src/share/vm/prims/methodHandles.hpp
+8
-8
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/globals.hpp
+3
-0
hotspot/src/share/vm/runtime/javaCalls.cpp
hotspot/src/share/vm/runtime/javaCalls.cpp
+1
-1
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
+4
-4
hotspot/src/share/vm/runtime/vmThread.cpp
hotspot/src/share/vm/runtime/vmThread.cpp
+3
-1
hotspot/src/share/vm/services/g1MemoryPool.cpp
hotspot/src/share/vm/services/g1MemoryPool.cpp
+10
-27
hotspot/src/share/vm/services/g1MemoryPool.hpp
hotspot/src/share/vm/services/g1MemoryPool.hpp
+3
-85
hotspot/src/share/vm/utilities/exceptions.cpp
hotspot/src/share/vm/utilities/exceptions.cpp
+3
-3
hotspot/src/share/vm/utilities/exceptions.hpp
hotspot/src/share/vm/utilities/exceptions.hpp
+1
-1
未找到文件。
hotspot/make/
solaris/makefiles/
cscope.make
→
hotspot/make/cscope.make
浏览文件 @
9c664186
...
...
@@ -22,29 +22,23 @@
#
#
#
# The cscope.out file is made in the current directory and spans the entire
# source tree.
#
# Things to note:
# 1. We use relative names for cscope.
# 2. We *don't* remove the old cscope.out file, because cscope is smart
# enough to only build what has changed. It can be confused, however,
# if files are renamed or removed, so it may be necessary to manually
# remove cscope.out if a lot of reorganization has occurred.
#
# The cscope.out file is generated in the current directory. The old cscope.out
# file is *not* removed because cscope is smart enough to only build what has
# changed. cscope can be confused if files are renamed or removed, so it may be
# necessary to remove cscope.out (gmake cscope.clean) if a lot of reorganization
# has occurred.
include
$(GAMMADIR)/make/scm.make
NAWK
=
/usr/xpg4/bin/awk
RM
=
rm
-f
HG
=
hg
CS_TOP
=
../..
CS_TOP
=
$(GAMMADIR)
CSDIRS
=
$(CS_TOP)
/src
$(CS_TOP)
/make
CSINCS
=
$
(
CSDIRS:%
=
-I
%
)
CSCOPE
=
cscope
CSCOPE_OUT
=
cscope.out
CSCOPE_FLAGS
=
-b
# Allow .java files to be added from the environment (CSCLASSES=yes).
...
...
@@ -61,25 +55,22 @@ ifndef CSHEADERS
RMCCHEADERS
=
-o
-name
CClassHeaders
endif
# Use CS_GENERATED=x to include auto-generated files in the make directories.
ifdef
CS_GENERATED
CS_ADD_GENERATED
=
-o
-name
'*.incl'
else
CS_PRUNE_GENERATED
=
-o
-name
'
${OS}
_*_core'
-o
-name
'
${OS}
_*_compiler?'
endif
# Ignore build products.
CS_PRUNE_GENERATED
=
-o
-name
'
${OSNAME}
_*_core'
-o
\
-name
'
${OSNAME}
_*_compiler?'
# O
S-specific files for other systems are excluded by default. Use CS_OS=yes
#
to include platform-specific files for other platfor
ms.
if
n
def
CS_OS
CS_
OS
=
linux macos solaris win32
CS_PRUNE_OS
=
$(
patsubst
%,-o
-name
'*%*'
,
$(
filter-out
${OS}
,
${CS_OS}
))
# O
/S-specific files for all systems are included by default. Set CS_OS to a
#
space-separated list of identifiers to include only those syste
ms.
ifdef
CS_OS
CS_
PRUNE_OS
=
$(
patsubst
%,-o
-name
'*%*'
,
\
$(
filter-out
${CS_OS}
,linux macos solaris windows
))
endif
#
Processor-specific files for other processors are excluded by default. Use
#
CS_CPU=x to include platform-specific files for other platform
s.
if
n
def
CS_CPU
CS_
CPU
=
i486 sparc amd64 ia64
CS_PRUNE_CPU
=
$(
patsubst
%,-o
-name
'*%*'
,
$(
filter-out
${SRCARCH}
,
${CS_CPU}
))
#
CPU-specific files for all processors are included by default. Set CS_CPU
#
space-separated list identifiers to include only those CPU
s.
ifdef
CS_CPU
CS_
PRUNE_CPU
=
$(
patsubst
%,-o
-name
'*%*'
,
\
$(
filter-out
${CS_CPU}
,arm ppc sparc x86 zero
))
endif
# What files should we include? A simple rule might be just those files under
...
...
@@ -95,10 +86,14 @@ CS_PRUNE_STD = $(SCM_DIRS) \
-o
-name
'*demo'
\
-o
-name
pkgarchive
# Placeholder for user-defined excludes.
CS_PRUNE_EX
=
CS_PRUNE
=
$(CS_PRUNE_STD)
\
$(CS_PRUNE_OS)
\
$(CS_PRUNE_CPU)
\
$(CS_PRUNE_GENERATED)
\
$(CS_PRUNE_EX)
\
$(RMCCHEADERS)
# File names to include.
...
...
@@ -114,49 +109,33 @@ CSFILENAMES = -name '*.[ch]pp' \
-o
-name
'*.ad'
\
$(ADDCLASSES)
.PHONY
:
cscope cscope.clean cscope.scratch TAGS.clean FORCE
.PRECIOUS
:
cscope.out
cscope
cscope.out
:
cscope.files FORCE
$(CSCOPE)
$(CSCOPE_FLAGS)
cscope
$(CSCOPE_OUT)
:
cscope.files FORCE
$(CSCOPE)
-f
$(CSCOPE_OUT)
$(CSCOPE_FLAGS)
# The .raw file is reordered here in an attempt to make cscope display the most
# relevant files first.
cscope.files
:
.cscope.files.raw
echo
"
$(CSINCS)
"
>
$@
-
egrep
-v
"
\.
java|
\/
make
\/
"
$<
>>
$@
-
fgrep
".java"
$<
>>
$@
-
fgrep
"/make/"
$<
>>
$@
cscope.clean
:
$(QUIETLY)
$(RM)
$(CSCOPE_OUT)
cscope.files
.cscope.files.raw
:
.nametable.files
-
find
$(CSDIRS)
-type
d
\(
$(CS_PRUNE)
\)
-prune
-o
\
-type
f
\(
$(CSFILENAMES)
\)
-print
>
$@
cscope.scratch
:
cscope.clean cscope
cscope.clean
:
nametable.clean
-
$(RM)
cscope.out cscope.files .cscope.files.raw
# The raw list is reordered so cscope displays the most relevant files first.
cscope.files
:
$(QUIETLY)
\
raw
=
cscope.
$$$$
;
\
find
$(CSDIRS)
-type
d
\(
$(CS_PRUNE)
\)
-prune
-o
\
-type
f
\(
$(CSFILENAMES)
\)
-print
>
$$
raw
;
\
{
\
echo
"
$(CSINCS)
"
;
\
egrep
-v
"
\.
java|/make/"
$$
raw
;
\
fgrep
".java"
$$
raw
;
\
fgrep
"/make/"
$$
raw
;
\
}
>
$@
;
\
rm
-f
$$
raw
TAGS
:
cscope.files FORCE
egrep
-v
'^-|^$$'
$<
| etags
--members
-
TAGS.clean
:
nametable.clean
-
$(RM)
TAGS
# .nametable.files and .nametable.files.tmp are used to determine if any files
# were added to/deleted from/renamed in the workspace. If not, then there's
# normally no need to rebuild the cscope database. To force a rebuild of
# the cscope database: gmake nametable.clean.
.nametable.files
:
.nametable.files.tmp
(
cmp
-s
$@
$<
)
||
(
cp
$<
$@
)
-
$(RM)
$<
# `hg status' is slightly faster than `hg fstatus'. Both are
# quite a bit slower on an NFS mounted file system, so this is
# really geared towards repos on local file systems.
.nametable.files.tmp
:
-
$(HG)
fstatus
-acmn
>
$@
nametable.clean
:
-
$(RM)
.nametable.files .nametable.files.tmp
FORCE
:
.PHONY
:
cscope cscope.clean TAGS.clean nametable.clean FORCE
TAGS.clean
:
$(RM)
TAGS
This diff is collapsed.
Click to expand it.
hotspot/make/linux/Makefile
浏览文件 @
9c664186
...
...
@@ -359,7 +359,7 @@ clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark:
clean
:
clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
include
$(GAMMADIR)/make/
$(OSNAME)/makefiles/
cscope.make
include
$(GAMMADIR)/make/cscope.make
#-------------------------------------------------------------------------------
...
...
This diff is collapsed.
Click to expand it.
hotspot/make/linux/makefiles/cscope.make
已删除
100644 → 0
浏览文件 @
13c02735
#
# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
#
# The cscope.out file is made in the current directory and spans the entire
# source tree.
#
# Things to note:
# 1. We use relative names for cscope.
# 2. We *don't* remove the old cscope.out file, because cscope is smart
# enough to only build what has changed. It can be confused, however,
# if files are renamed or removed, so it may be necessary to manually
# remove cscope.out if a lot of reorganization has occurred.
#
include
$(GAMMADIR)/make/scm.make
NAWK
=
awk
RM
=
rm
-f
HG
=
hg
CS_TOP
=
../..
CSDIRS
=
$(CS_TOP)
/src
$(CS_TOP)
/build
CSINCS
=
$
(
CSDIRS:%
=
-I
%
)
CSCOPE
=
cscope
CSCOPE_FLAGS
=
-b
# Allow .java files to be added from the environment (CSCLASSES=yes).
ifdef
CSCLASSES
ADDCLASSES
=
-o
-name
'*.java'
endif
# Adding CClassHeaders also pushes the file count of a full workspace up about
# 200 files (these files also don't exist in a new workspace, and thus will
# cause the recreation of the database as they get created, which might seem
# a little confusing). Thus allow these files to be added from the environment
# (CSHEADERS=yes).
ifndef
CSHEADERS
RMCCHEADERS
=
-o
-name
CClassHeaders
endif
# Use CS_GENERATED=x to include auto-generated files in the build directories.
ifdef
CS_GENERATED
CS_ADD_GENERATED
=
-o
-name
'*.incl'
else
CS_PRUNE_GENERATED
=
-o
-name
'
${OS}
_*_core'
-o
-name
'
${OS}
_*_compiler?'
endif
# OS-specific files for other systems are excluded by default. Use CS_OS=yes
# to include platform-specific files for other platforms.
ifndef
CS_OS
CS_OS
=
linux macos solaris win32
CS_PRUNE_OS
=
$(
patsubst
%,-o
-name
'*%*'
,
$(
filter-out
${OS}
,
${CS_OS}
))
endif
# Processor-specific files for other processors are excluded by default. Use
# CS_CPU=x to include platform-specific files for other platforms.
ifndef
CS_CPU
CS_CPU
=
i486 sparc amd64 ia64
CS_PRUNE_CPU
=
$(
patsubst
%,-o
-name
'*%*'
,
$(
filter-out
${SRCARCH}
,
${CS_CPU}
))
endif
# What files should we include? A simple rule might be just those files under
# SCCS control, however this would miss files we create like the opcodes and
# CClassHeaders. The following attempts to find everything that is *useful*.
# (.del files are created by sccsrm, demo directories contain many .java files
# that probably aren't useful for development, and the pkgarchive may contain
# duplicates of files within the source hierarchy).
# Directories to exclude.
CS_PRUNE_STD
=
$(SCM_DIRS)
\
-o
-name
'.del-*'
\
-o
-name
'*demo'
\
-o
-name
pkgarchive
CS_PRUNE
=
$(CS_PRUNE_STD)
\
$(CS_PRUNE_OS)
\
$(CS_PRUNE_CPU)
\
$(CS_PRUNE_GENERATED)
\
$(RMCCHEADERS)
# File names to include.
CSFILENAMES
=
-name
'*.[ch]pp'
\
-o
-name
'*.[Ccshlxy]'
\
$(CS_ADD_GENERATED)
\
-o
-name
'*.il'
\
-o
-name
'*.cc'
\
-o
-name
'*[Mm]akefile*'
\
-o
-name
'*.gmk'
\
-o
-name
'*.make'
\
-o
-name
'*.ad'
\
$(ADDCLASSES)
.PRECIOUS
:
cscope.out
cscope cscope.out
:
cscope.files FORCE
$(CSCOPE)
$(CSCOPE_FLAGS)
# The .raw file is reordered here in an attempt to make cscope display the most
# relevant files first.
cscope.files
:
.cscope.files.raw
echo
"
$(CSINCS)
"
>
$@
-
egrep
-v
"
\.
java|
\/
make
\/
"
$<
>>
$@
-
fgrep
".java"
$<
>>
$@
-
fgrep
"/make/"
$<
>>
$@
.cscope.files.raw
:
.nametable.files
-
find
$(CSDIRS)
-type
d
\(
$(CS_PRUNE)
\)
-prune
-o
\
-type
f
\(
$(CSFILENAMES)
\)
-print
>
$@
cscope.clean
:
nametable.clean
-
$(RM)
cscope.out cscope.files .cscope.files.raw
TAGS
:
cscope.files FORCE
egrep
-v
'^-|^$$'
$<
| etags
--members
-
TAGS.clean
:
nametable.clean
-
$(RM)
TAGS
# .nametable.files and .nametable.files.tmp are used to determine if any files
# were added to/deleted from/renamed in the workspace. If not, then there's
# normally no need to rebuild the cscope database. To force a rebuild of
# the cscope database: gmake nametable.clean.
.nametable.files
:
.nametable.files.tmp
(
cmp
-s
$@
$<
)
||
(
cp
$<
$@
)
-
$(RM)
$<
# `hg status' is slightly faster than `hg fstatus'. Both are
# quite a bit slower on an NFS mounted file system, so this is
# really geared towards repos on local file systems.
.nametable.files.tmp
:
-
$(HG)
fstatus
-acmn
>
$@
nametable.clean
:
-
$(RM)
.nametable.files .nametable.files.tmp
FORCE
:
.PHONY
:
cscope cscope.clean TAGS.clean nametable.clean FORCE
This diff is collapsed.
Click to expand it.
hotspot/make/solaris/Makefile
浏览文件 @
9c664186
...
...
@@ -296,7 +296,7 @@ clean_compiler1 clean_compiler2 clean_core clean_kernel:
clean
:
clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel
include
$(GAMMADIR)/make/
$(OSNAME)/makefiles/
cscope.make
include
$(GAMMADIR)/make/cscope.make
#-------------------------------------------------------------------------------
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
浏览文件 @
9c664186
...
...
@@ -486,7 +486,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if
(
ek
==
_invokespecial_mh
)
{
// Must load & check the first argument before entering the target method.
__
load_method_handle_vmslots
(
O0_argslot
,
G3_method_handle
,
O1_scratch
);
__
ld_ptr
(
__
argument_address
(
O0_argslot
),
G3_method_handle
);
__
ld_ptr
(
__
argument_address
(
O0_argslot
,
-
1
),
G3_method_handle
);
__
null_check
(
G3_method_handle
);
__
verify_oop
(
G3_method_handle
);
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
浏览文件 @
9c664186
...
...
@@ -3293,8 +3293,6 @@ void TemplateTable::invokedynamic(int byte_no) {
/*virtual*/
false
,
/*vfinal*/
false
,
/*indy*/
true
);
__
mov
(
SP
,
O5_savedSP
);
// record SP that we wanted the callee to restore
__
verify_oop
(
G5_callsite
);
// profile this call
__
profile_call
(
O4
);
...
...
@@ -3307,8 +3305,10 @@ void TemplateTable::invokedynamic(int byte_no) {
__
sll
(
Rret
,
LogBytesPerWord
,
Rret
);
__
ld_ptr
(
Rtemp
,
Rret
,
Rret
);
// get return address
__
verify_oop
(
G5_callsite
);
__
load_heap_oop
(
G5_callsite
,
__
delayed_value
(
java_lang_invoke_CallSite
::
target_offset_in_bytes
,
Rscratch
),
G3_method_handle
);
__
null_check
(
G3_method_handle
);
__
verify_oop
(
G3_method_handle
);
// Adjust Rret first so Llast_SP can be same as Rret
__
add
(
Rret
,
-
frame
::
pc_return_offset
,
O7
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
浏览文件 @
9c664186
...
...
@@ -422,7 +422,7 @@ void TemplateTable::fast_aldc(bool wide) {
Label
L_done
,
L_throw_exception
;
const
Register
con_klass_temp
=
rcx
;
// same as Rcache
__
movptr
(
con_klass_temp
,
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
con_klass_temp
,
rax
);
__
cmpptr
(
con_klass_temp
,
ExternalAddress
((
address
)
Universe
::
systemObjArrayKlassObj_addr
()));
__
jcc
(
Assembler
::
notEqual
,
L_done
);
__
cmpl
(
Address
(
rax
,
arrayOopDesc
::
length_offset_in_bytes
()),
0
);
...
...
@@ -432,7 +432,7 @@ void TemplateTable::fast_aldc(bool wide) {
// Load the exception from the system-array which wraps it:
__
bind
(
L_throw_exception
);
__
movptr
(
rax
,
Address
(
rax
,
arrayOopDesc
::
base_offset_in_bytes
(
T_OBJECT
)));
__
load_heap_oop
(
rax
,
Address
(
rax
,
arrayOopDesc
::
base_offset_in_bytes
(
T_OBJECT
)));
__
jump
(
ExternalAddress
(
Interpreter
::
throw_exception_entry
()));
__
bind
(
L_done
);
...
...
@@ -946,9 +946,9 @@ void TemplateTable::aastore() {
__
jcc
(
Assembler
::
zero
,
is_null
);
// Move subklass into EBX
__
movptr
(
rbx
,
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rbx
,
rax
);
// Move superklass into EAX
__
movptr
(
rax
,
Address
(
rdx
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rax
,
rdx
);
__
movptr
(
rax
,
Address
(
rax
,
sizeof
(
oopDesc
)
+
objArrayKlass
::
element_klass_offset_in_bytes
()));
// Compress array+index*wordSize+12 into a single register. Frees ECX.
__
lea
(
rdx
,
element_address
);
...
...
@@ -2001,7 +2001,7 @@ void TemplateTable::_return(TosState state) {
if
(
_desc
->
bytecode
()
==
Bytecodes
::
_return_register_finalizer
)
{
assert
(
state
==
vtos
,
"only valid state"
);
__
movptr
(
rax
,
aaddress
(
0
));
__
movptr
(
rdi
,
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rdi
,
rax
);
__
movl
(
rdi
,
Address
(
rdi
,
Klass
::
access_flags_offset_in_bytes
()
+
sizeof
(
oopDesc
)));
__
testl
(
rdi
,
JVM_ACC_HAS_FINALIZER
);
Label
skip_register_finalizer
;
...
...
@@ -2948,7 +2948,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
// get receiver klass
__
null_check
(
recv
,
oopDesc
::
klass_offset_in_bytes
());
// Keep recv in rcx for callee expects it there
__
movptr
(
rax
,
Address
(
recv
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rax
,
recv
);
__
verify_oop
(
rax
);
// profile this call
...
...
@@ -3028,7 +3028,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get receiver klass into rdx - also a null check
__
restore_locals
();
// restore rdi
__
movptr
(
rdx
,
Address
(
rcx
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rdx
,
rcx
);
__
verify_oop
(
rdx
);
// profile this call
...
...
@@ -3083,6 +3083,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void
TemplateTable
::
invokedynamic
(
int
byte_no
)
{
transition
(
vtos
,
vtos
);
assert
(
byte_no
==
f1_oop
,
"use this argument"
);
if
(
!
EnableInvokeDynamic
)
{
// We should not encounter this bytecode if !EnableInvokeDynamic.
...
...
@@ -3095,7 +3096,6 @@ void TemplateTable::invokedynamic(int byte_no) {
return
;
}
assert
(
byte_no
==
f1_oop
,
"use this argument"
);
prepare_invoke
(
rax
,
rbx
,
byte_no
);
// rax: CallSite object (f1)
...
...
@@ -3106,14 +3106,14 @@ void TemplateTable::invokedynamic(int byte_no) {
Register
rax_callsite
=
rax
;
Register
rcx_method_handle
=
rcx
;
if
(
ProfileInterpreter
)
{
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__
profile_call
(
rsi
);
}
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__
profile_call
(
rsi
);
__
movptr
(
rcx_method_handle
,
Address
(
rax_callsite
,
__
delayed_value
(
java_lang_invoke_CallSite
::
target_offset_in_bytes
,
rcx
)));
__
verify_oop
(
rax_callsite
);
__
load_heap_oop
(
rcx_method_handle
,
Address
(
rax_callsite
,
__
delayed_value
(
java_lang_invoke_CallSite
::
target_offset_in_bytes
,
rdx
)));
__
null_check
(
rcx_method_handle
);
__
verify_oop
(
rcx_method_handle
);
__
prepare_to_jump_from_interpreted
();
__
jump_to_method_handle_entry
(
rcx_method_handle
,
rdx
);
}
...
...
@@ -3258,7 +3258,7 @@ void TemplateTable::_new() {
(
int32_t
)
markOopDesc
::
prototype
());
// header
__
pop
(
rcx
);
// get saved klass back in the register.
}
__
movptr
(
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
,
rcx
);
// klass
__
store_klass
(
rax
,
rcx
);
// klass
{
SkipIfEqual
skip_if
(
_masm
,
&
DTraceAllocProbes
,
0
);
...
...
@@ -3333,7 +3333,7 @@ void TemplateTable::checkcast() {
__
movptr
(
rax
,
Address
(
rcx
,
rbx
,
Address
::
times_ptr
,
sizeof
(
constantPoolOopDesc
)));
__
bind
(
resolved
);
__
movptr
(
rbx
,
Address
(
rdx
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rbx
,
rdx
);
// Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
// Superklass in EAX. Subklass in EBX.
...
...
@@ -3376,12 +3376,12 @@ void TemplateTable::instanceof() {
__
push
(
atos
);
call_VM
(
rax
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
quicken_io_cc
)
);
__
pop_ptr
(
rdx
);
__
movptr
(
rdx
,
Address
(
rdx
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rdx
,
rdx
);
__
jmp
(
resolved
);
// Get superklass in EAX and subklass in EDX
__
bind
(
quicked
);
__
movptr
(
rdx
,
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
rdx
,
rax
);
__
movptr
(
rax
,
Address
(
rcx
,
rbx
,
Address
::
times_ptr
,
sizeof
(
constantPoolOopDesc
)));
__
bind
(
resolved
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
浏览文件 @
9c664186
...
...
@@ -436,7 +436,7 @@ void TemplateTable::fast_aldc(bool wide) {
Label
L_done
,
L_throw_exception
;
const
Register
con_klass_temp
=
rcx
;
// same as cache
const
Register
array_klass_temp
=
rdx
;
// same as index
__
movptr
(
con_klass_temp
,
Address
(
rax
,
oopDesc
::
klass_offset_in_bytes
())
);
__
load_klass
(
con_klass_temp
,
rax
);
__
lea
(
array_klass_temp
,
ExternalAddress
((
address
)
Universe
::
systemObjArrayKlassObj_addr
()));
__
cmpptr
(
con_klass_temp
,
Address
(
array_klass_temp
,
0
));
__
jcc
(
Assembler
::
notEqual
,
L_done
);
...
...
@@ -447,7 +447,7 @@ void TemplateTable::fast_aldc(bool wide) {
// Load the exception from the system-array which wraps it:
__
bind
(
L_throw_exception
);
__
movptr
(
rax
,
Address
(
rax
,
arrayOopDesc
::
base_offset_in_bytes
(
T_OBJECT
)));
__
load_heap_oop
(
rax
,
Address
(
rax
,
arrayOopDesc
::
base_offset_in_bytes
(
T_OBJECT
)));
__
jump
(
ExternalAddress
(
Interpreter
::
throw_exception_entry
()));
__
bind
(
L_done
);
...
...
@@ -3137,7 +3137,6 @@ void TemplateTable::invokedynamic(int byte_no) {
return
;
}
assert
(
byte_no
==
f1_oop
,
"use this argument"
);
prepare_invoke
(
rax
,
rbx
,
byte_no
);
// rax: CallSite object (f1)
...
...
@@ -3148,14 +3147,14 @@ void TemplateTable::invokedynamic(int byte_no) {
Register
rax_callsite
=
rax
;
Register
rcx_method_handle
=
rcx
;
if
(
ProfileInterpreter
)
{
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__
profile_call
(
r13
);
}
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__
profile_call
(
r13
);
__
load_heap_oop
(
rcx_method_handle
,
Address
(
rax_callsite
,
__
delayed_value
(
java_lang_invoke_CallSite
::
target_offset_in_bytes
,
rcx
)));
__
verify_oop
(
rax_callsite
);
__
load_heap_oop
(
rcx_method_handle
,
Address
(
rax_callsite
,
__
delayed_value
(
java_lang_invoke_CallSite
::
target_offset_in_bytes
,
rdx
)));
__
null_check
(
rcx_method_handle
);
__
verify_oop
(
rcx_method_handle
);
__
prepare_to_jump_from_interpreted
();
__
jump_to_method_handle_entry
(
rcx_method_handle
,
rdx
);
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/cpu/x86/vm/vm_version_x86.cpp
浏览文件 @
9c664186
...
...
@@ -441,12 +441,25 @@ void VM_Version::get_processor_features() {
}
}
// On family 21 processors default is no sw prefetch
if
(
cpu_family
()
==
21
)
{
// some defaults for AMD family 15h
if
(
cpu_family
()
==
0x15
)
{
// On family 15h processors default is no sw prefetch
if
(
FLAG_IS_DEFAULT
(
AllocatePrefetchStyle
))
{
AllocatePrefetchStyle
=
0
;
}
// Also, if some other prefetch style is specified, default instruction type is PREFETCHW
if
(
FLAG_IS_DEFAULT
(
AllocatePrefetchInstr
))
{
AllocatePrefetchInstr
=
3
;
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if
(
FLAG_IS_DEFAULT
(
UseXMMForArrayCopy
)
)
{
UseXMMForArrayCopy
=
true
;
}
if
(
FLAG_IS_DEFAULT
(
UseUnalignedLoadStores
)
&&
UseXMMForArrayCopy
)
{
UseUnalignedLoadStores
=
true
;
}
}
}
if
(
is_intel
()
)
{
// Intel cpus specific settings
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/os/linux/vm/globals_linux.hpp
浏览文件 @
9c664186
...
...
@@ -29,13 +29,19 @@
// Defines Linux specific flags. They are not available on other platforms.
//
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
product(bool, UseOprofile, false, \
"enable support for Oprofile profiler") \
\
product(bool, UseLinuxPosixThreadCPUClocks, true, \
"enable fast Linux Posix clocks where available")
// NB: The default value of UseLinuxPosixThreadCPUClocks may be
// overridden in Arguments::parse_each_vm_init_arg.
product(bool, UseOprofile, false, \
"enable support for Oprofile profiler") \
\
product(bool, UseLinuxPosixThreadCPUClocks, true, \
"enable fast Linux Posix clocks where available") \
/* NB: The default value of UseLinuxPosixThreadCPUClocks may be \
overridden in Arguments::parse_each_vm_init_arg. */
\
\
product(bool, UseHugeTLBFS, false, \
"Use MAP_HUGETLB for large pages") \
\
product(bool, UseSHM, false, \
"Use SYSV shared memory for large pages")
//
// Defines Linux-specific default values. The flags are available on all
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/os/linux/vm/os_linux.cpp
浏览文件 @
9c664186
...
...
@@ -2465,16 +2465,40 @@ bool os::commit_memory(char* addr, size_t size, bool exec) {
return
res
!=
(
uintptr_t
)
MAP_FAILED
;
}
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
#ifndef MAP_HUGETLB
#define MAP_HUGETLB 0x40000
#endif
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
#ifndef MADV_HUGEPAGE
#define MADV_HUGEPAGE 14
#endif
bool
os
::
commit_memory
(
char
*
addr
,
size_t
size
,
size_t
alignment_hint
,
bool
exec
)
{
if
(
UseHugeTLBFS
&&
alignment_hint
>
(
size_t
)
vm_page_size
())
{
int
prot
=
exec
?
PROT_READ
|
PROT_WRITE
|
PROT_EXEC
:
PROT_READ
|
PROT_WRITE
;
uintptr_t
res
=
(
uintptr_t
)
::
mmap
(
addr
,
size
,
prot
,
MAP_PRIVATE
|
MAP_FIXED
|
MAP_ANONYMOUS
|
MAP_HUGETLB
,
-
1
,
0
);
return
res
!=
(
uintptr_t
)
MAP_FAILED
;
}
return
commit_memory
(
addr
,
size
,
exec
);
}
void
os
::
realign_memory
(
char
*
addr
,
size_t
bytes
,
size_t
alignment_hint
)
{
}
void
os
::
realign_memory
(
char
*
addr
,
size_t
bytes
,
size_t
alignment_hint
)
{
if
(
UseHugeTLBFS
&&
alignment_hint
>
(
size_t
)
vm_page_size
())
{
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
// be supported or the memory may already be backed by huge pages.
::
madvise
(
addr
,
bytes
,
MADV_HUGEPAGE
);
}
}
void
os
::
free_memory
(
char
*
addr
,
size_t
bytes
)
{
::
mmap
(
addr
,
bytes
,
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_FIXED
|
MAP_ANONYMOUS
,
-
1
,
0
);
::
madvise
(
addr
,
bytes
,
MADV_DONTNEED
);
}
void
os
::
numa_make_global
(
char
*
addr
,
size_t
bytes
)
{
...
...
@@ -2812,6 +2836,43 @@ bool os::unguard_memory(char* addr, size_t size) {
return
linux_mprotect
(
addr
,
size
,
PROT_READ
|
PROT_WRITE
);
}
bool
os
::
Linux
::
hugetlbfs_sanity_check
(
bool
warn
,
size_t
page_size
)
{
bool
result
=
false
;
void
*
p
=
mmap
(
NULL
,
page_size
,
PROT_READ
|
PROT_WRITE
,
MAP_ANONYMOUS
|
MAP_PRIVATE
|
MAP_HUGETLB
,
-
1
,
0
);
if
(
p
!=
(
void
*
)
-
1
)
{
// We don't know if this really is a huge page or not.
FILE
*
fp
=
fopen
(
"/proc/self/maps"
,
"r"
);
if
(
fp
)
{
while
(
!
feof
(
fp
))
{
char
chars
[
257
];
long
x
=
0
;
if
(
fgets
(
chars
,
sizeof
(
chars
),
fp
))
{
if
(
sscanf
(
chars
,
"%lx-%*lx"
,
&
x
)
==
1
&&
x
==
(
long
)
p
)
{
if
(
strstr
(
chars
,
"hugepage"
))
{
result
=
true
;
break
;
}
}
}
}
fclose
(
fp
);
}
munmap
(
p
,
page_size
);
if
(
result
)
return
true
;
}
if
(
warn
)
{
warning
(
"HugeTLBFS is not supported by the operating system."
);
}
return
result
;
}
/*
* Set the coredump_filter bits to include largepages in core dump (bit 6)
*
...
...
@@ -2854,7 +2915,16 @@ static void set_coredump_filter(void) {
static
size_t
_large_page_size
=
0
;
bool
os
::
large_page_init
()
{
if
(
!
UseLargePages
)
return
false
;
if
(
!
UseLargePages
)
{
UseHugeTLBFS
=
false
;
UseSHM
=
false
;
return
false
;
}
if
(
FLAG_IS_DEFAULT
(
UseHugeTLBFS
)
&&
FLAG_IS_DEFAULT
(
UseSHM
))
{
// Our user has not expressed a preference, so we'll try both.
UseHugeTLBFS
=
UseSHM
=
true
;
}
if
(
LargePageSizeInBytes
)
{
_large_page_size
=
LargePageSizeInBytes
;
...
...
@@ -2899,6 +2969,9 @@ bool os::large_page_init() {
}
}
// print a warning if any large page related flag is specified on command line
bool
warn_on_failure
=
!
FLAG_IS_DEFAULT
(
UseHugeTLBFS
);
const
size_t
default_page_size
=
(
size_t
)
Linux
::
page_size
();
if
(
_large_page_size
>
default_page_size
)
{
_page_sizes
[
0
]
=
_large_page_size
;
...
...
@@ -2906,6 +2979,14 @@ bool os::large_page_init() {
_page_sizes
[
2
]
=
0
;
}
UseHugeTLBFS
=
UseHugeTLBFS
&&
Linux
::
hugetlbfs_sanity_check
(
warn_on_failure
,
_large_page_size
);
if
(
UseHugeTLBFS
)
UseSHM
=
false
;
UseLargePages
=
UseHugeTLBFS
||
UseSHM
;
set_coredump_filter
();
// Large page support is available on 2.6 or newer kernel, some vendors
...
...
@@ -2922,7 +3003,7 @@ bool os::large_page_init() {
char
*
os
::
reserve_memory_special
(
size_t
bytes
,
char
*
req_addr
,
bool
exec
)
{
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert
(
UseLargePages
,
"only for
large pages"
);
assert
(
UseLargePages
&&
UseSHM
,
"only for SHM
large pages"
);
key_t
key
=
IPC_PRIVATE
;
char
*
addr
;
...
...
@@ -2989,16 +3070,15 @@ size_t os::large_page_size() {
return
_large_page_size
;
}
// Linux does not support anonymous mmap with large page memory. The only way
// to reserve large page memory without file backing is through SysV shared
// memory API. The entire memory region is committed and pinned upfront.
// Hopefully this will change in the future...
// HugeTLBFS allows application to commit large page memory on demand;
// with SysV SHM the entire memory region must be allocated as shared
// memory.
bool
os
::
can_commit_large_page_memory
()
{
return
false
;
return
UseHugeTLBFS
;
}
bool
os
::
can_execute_large_page_memory
()
{
return
false
;
return
UseHugeTLBFS
;
}
// Reserve memory at an arbitrary address, only if that area is
...
...
@@ -4090,6 +4170,23 @@ jint os::init_2(void)
UseNUMA
=
false
;
}
}
// With SHM large pages we cannot uncommit a page, so there's not way
// we can make the adaptive lgrp chunk resizing work. If the user specified
// both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
// disable adaptive resizing.
if
(
UseNUMA
&&
UseLargePages
&&
UseSHM
)
{
if
(
!
FLAG_IS_DEFAULT
(
UseNUMA
))
{
if
(
FLAG_IS_DEFAULT
(
UseLargePages
)
&&
FLAG_IS_DEFAULT
(
UseSHM
))
{
UseLargePages
=
false
;
}
else
{
warning
(
"UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing"
);
UseAdaptiveSizePolicy
=
false
;
UseAdaptiveNUMAChunkSizing
=
false
;
}
}
else
{
UseNUMA
=
false
;
}
}
if
(
!
UseNUMA
&&
ForceNUMA
)
{
UseNUMA
=
true
;
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/os/linux/vm/os_linux.hpp
浏览文件 @
9c664186
...
...
@@ -86,6 +86,9 @@ class Linux {
static
void
rebuild_cpu_to_node_map
();
static
GrowableArray
<
int
>*
cpu_to_node
()
{
return
_cpu_to_node
;
}
static
bool
hugetlbfs_sanity_check
(
bool
warn
,
size_t
page_size
);
public:
static
void
init_thread_fpu_state
();
static
int
get_fpu_control_word
();
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/os/solaris/vm/os_solaris.cpp
浏览文件 @
9c664186
...
...
@@ -2826,7 +2826,9 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
void
os
::
realign_memory
(
char
*
addr
,
size_t
bytes
,
size_t
alignment_hint
)
{
assert
((
intptr_t
)
addr
%
alignment_hint
==
0
,
"Address should be aligned."
);
assert
((
intptr_t
)(
addr
+
bytes
)
%
alignment_hint
==
0
,
"End should be aligned."
);
Solaris
::
set_mpss_range
(
addr
,
bytes
,
alignment_hint
);
if
(
UseLargePages
&&
UseMPSS
)
{
Solaris
::
set_mpss_range
(
addr
,
bytes
,
alignment_hint
);
}
}
// Tell the OS to make the range local to the first-touching LWP
...
...
@@ -5044,6 +5046,20 @@ jint os::init_2(void) {
UseNUMA
=
false
;
}
}
// ISM is not compatible with the NUMA allocator - it always allocates
// pages round-robin across the lgroups.
if
(
UseNUMA
&&
UseLargePages
&&
UseISM
)
{
if
(
!
FLAG_IS_DEFAULT
(
UseNUMA
))
{
if
(
FLAG_IS_DEFAULT
(
UseLargePages
)
&&
FLAG_IS_DEFAULT
(
UseISM
))
{
UseLargePages
=
false
;
}
else
{
warning
(
"UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"
);
UseNUMA
=
false
;
}
}
else
{
UseNUMA
=
false
;
}
}
if
(
!
UseNUMA
&&
ForceNUMA
)
{
UseNUMA
=
true
;
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/c1/c1_Runtime1.cpp
浏览文件 @
9c664186
...
...
@@ -1026,9 +1026,21 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// first replace the tail, then the call
#ifdef ARM
if
(
stub_id
==
Runtime1
::
load_klass_patching_id
&&
!
VM_Version
::
supports_movw
())
{
nmethod
*
nm
=
CodeCache
::
find_nmethod
(
instr_pc
);
oop
*
oop_addr
=
NULL
;
assert
(
nm
!=
NULL
,
"invalid nmethod_pc"
);
RelocIterator
oops
(
nm
,
copy_buff
,
copy_buff
+
1
);
while
(
oops
.
next
())
{
if
(
oops
.
type
()
==
relocInfo
::
oop_type
)
{
oop_Relocation
*
r
=
oops
.
oop_reloc
();
oop_addr
=
r
->
oop_addr
();
break
;
}
}
assert
(
oop_addr
!=
NULL
,
"oop relocation must exist"
);
copy_buff
-=
*
byte_count
;
NativeMovConstReg
*
n_copy2
=
nativeMovConstReg_at
(
copy_buff
);
n_copy2
->
set_
data
((
intx
)
(
load_klass
())
,
instr_pc
);
n_copy2
->
set_
pc_relative_offset
((
address
)
oop_addr
,
instr_pc
);
}
#endif
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp
浏览文件 @
9c664186
...
...
@@ -232,14 +232,7 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
}
// compute size of arguments
int
arg_size
=
target
->
arg_size
();
if
(
code
==
Bytecodes
::
_invokedynamic
)
{
assert
(
!
target
->
is_static
(),
"receiver explicit in method"
);
arg_size
--
;
// implicit, not really on stack
}
if
(
!
target
->
is_loaded
()
&&
code
==
Bytecodes
::
_invokestatic
)
{
arg_size
--
;
}
int
arg_size
=
target
->
invoke_arg_size
(
code
);
int
arg_base
=
MAX2
(
state
.
_stack_height
-
arg_size
,
0
);
// direct recursive calls are skipped if they can be bound statically without introducing
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/ci/ciEnv.cpp
浏览文件 @
9c664186
...
...
@@ -756,7 +756,7 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
assert
(
bc
==
Bytecodes
::
_invokedynamic
,
"must be invokedynamic"
);
bool
is_resolved
=
cpool
->
cache
()
->
main_entry_at
(
index
)
->
is_resolved
(
bc
);
if
(
is_resolved
&&
(
oop
)
cpool
->
cache
()
->
secondary_entry_at
(
index
)
->
f1
()
==
NULL
)
if
(
is_resolved
&&
cpool
->
cache
()
->
secondary_entry_at
(
index
)
->
is_f1_null
()
)
// FIXME: code generation could allow for null (unlinked) call site
is_resolved
=
false
;
...
...
@@ -770,7 +770,7 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
// Get the invoker methodOop from the constant pool.
oop
f1_value
=
cpool
->
cache
()
->
main_entry_at
(
index
)
->
f1
();
methodOop
signature_invoker
=
methodOop
(
f1_value
)
;
methodOop
signature_invoker
=
(
methodOop
)
f1_value
;
assert
(
signature_invoker
!=
NULL
&&
signature_invoker
->
is_method
()
&&
signature_invoker
->
is_method_handle_invoke
(),
"correct result from LinkResolver::resolve_invokedynamic"
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/ci/ciMethod.hpp
浏览文件 @
9c664186
...
...
@@ -127,7 +127,24 @@ class ciMethod : public ciObject {
ciSignature
*
signature
()
const
{
return
_signature
;
}
ciType
*
return_type
()
const
{
return
_signature
->
return_type
();
}
int
arg_size_no_receiver
()
const
{
return
_signature
->
size
();
}
int
arg_size
()
const
{
return
_signature
->
size
()
+
(
_flags
.
is_static
()
?
0
:
1
);
}
// Can only be used on loaded ciMethods
int
arg_size
()
const
{
check_is_loaded
();
return
_signature
->
size
()
+
(
_flags
.
is_static
()
?
0
:
1
);
}
// Report the number of elements on stack when invoking this method.
// This is different than the regular arg_size because invokdynamic
// has an implicit receiver.
int
invoke_arg_size
(
Bytecodes
::
Code
code
)
const
{
int
arg_size
=
_signature
->
size
();
// Add a receiver argument, maybe:
if
(
code
!=
Bytecodes
::
_invokestatic
&&
code
!=
Bytecodes
::
_invokedynamic
)
{
arg_size
++
;
}
return
arg_size
;
}
// Method code and related information.
address
code
()
{
if
(
_code
==
NULL
)
load_code
();
return
_code
;
}
...
...
@@ -276,9 +293,9 @@ class ciMethod : public ciObject {
void
print_short_name
(
outputStream
*
st
=
tty
);
methodOop
get_method_handle_target
()
{
klassOop
receiver_limit_oop
=
NULL
;
int
flags
=
0
;
return
MethodHandles
::
decode_method
(
get_oop
(),
receiver_limit_oop
,
flags
);
KlassHandle
receiver_limit
;
int
flags
=
0
;
methodHandle
m
=
MethodHandles
::
decode_method
(
get_oop
(),
receiver_limit
,
flags
)
;
return
m
(
);
}
};
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/ci/ciObject.cpp
浏览文件 @
9c664186
...
...
@@ -194,6 +194,16 @@ bool ciObject::can_be_constant() {
// ciObject::should_be_constant()
bool
ciObject
::
should_be_constant
()
{
if
(
ScavengeRootsInCode
>=
2
)
return
true
;
// force everybody to be a constant
if
(
!
JavaObjectsInPerm
&&
!
is_null_object
())
{
// We want Strings and Classes to be embeddable by default since
// they used to be in the perm world. Not all Strings used to be
// embeddable but there's no easy way to distinguish the interned
// from the regulars ones so just treat them all that way.
ciEnv
*
env
=
CURRENT_ENV
;
if
(
klass
()
==
env
->
String_klass
()
||
klass
()
==
env
->
Class_klass
())
{
return
true
;
}
}
return
handle
()
==
NULL
||
!
is_scavengable
();
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/classfile/javaClasses.cpp
浏览文件 @
9c664186
...
...
@@ -1357,7 +1357,7 @@ class BacktraceBuilder: public StackObj {
};
void
java_lang_Throwable
::
fill_in_stack_trace
(
Handle
throwable
,
TRAPS
)
{
void
java_lang_Throwable
::
fill_in_stack_trace
(
Handle
throwable
,
methodHandle
method
,
TRAPS
)
{
if
(
!
StackTraceInThrowable
)
return
;
ResourceMark
rm
(
THREAD
);
...
...
@@ -1374,6 +1374,16 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
JavaThread
*
thread
=
(
JavaThread
*
)
THREAD
;
BacktraceBuilder
bt
(
CHECK
);
// If there is no Java frame just return the method that was being called
// with bci 0
if
(
!
thread
->
has_last_Java_frame
())
{
if
(
max_depth
>=
1
&&
method
()
!=
NULL
)
{
bt
.
push
(
method
(),
0
,
CHECK
);
set_backtrace
(
throwable
(),
bt
.
backtrace
());
}
return
;
}
// Instead of using vframe directly, this version of fill_in_stack_trace
// basically handles everything by hand. This significantly improved the
// speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.
...
...
@@ -1477,7 +1487,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
set_backtrace
(
throwable
(),
bt
.
backtrace
());
}
void
java_lang_Throwable
::
fill_in_stack_trace
(
Handle
throwable
)
{
void
java_lang_Throwable
::
fill_in_stack_trace
(
Handle
throwable
,
methodHandle
method
)
{
// No-op if stack trace is disabled
if
(
!
StackTraceInThrowable
)
{
return
;
...
...
@@ -1491,7 +1501,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable) {
PRESERVE_EXCEPTION_MARK
;
JavaThread
*
thread
=
JavaThread
::
active
();
fill_in_stack_trace
(
throwable
,
thread
);
fill_in_stack_trace
(
throwable
,
method
,
thread
);
// ignore exceptions thrown during stack trace filling
CLEAR_PENDING_EXCEPTION
;
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/classfile/javaClasses.hpp
浏览文件 @
9c664186
...
...
@@ -440,8 +440,8 @@ class java_lang_Throwable: AllStatic {
static
void
fill_in_stack_trace_of_preallocated_backtrace
(
Handle
throwable
);
// Fill in current stack trace, can cause GC
static
void
fill_in_stack_trace
(
Handle
throwable
,
TRAPS
);
static
void
fill_in_stack_trace
(
Handle
throwable
);
static
void
fill_in_stack_trace
(
Handle
throwable
,
methodHandle
method
,
TRAPS
);
static
void
fill_in_stack_trace
(
Handle
throwable
,
methodHandle
method
=
methodHandle
()
);
// Programmatic access to stack trace
static
oop
get_stack_trace_element
(
oop
throwable
,
int
index
,
TRAPS
);
static
int
get_stack_trace_depth
(
oop
throwable
,
TRAPS
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/compiler/compileBroker.cpp
浏览文件 @
9c664186
...
...
@@ -976,6 +976,15 @@ void CompileBroker::compile_method_base(methodHandle method,
return
;
}
// If the requesting thread is holding the pending list lock
// then we just return. We can't risk blocking while holding
// the pending list lock or a 3-way deadlock may occur
// between the reference handler thread, a GC (instigated
// by a compiler thread), and compiled method registration.
if
(
instanceRefKlass
::
owns_pending_list_lock
(
JavaThread
::
current
()))
{
return
;
}
// Outputs from the following MutexLocker block:
CompileTask
*
task
=
NULL
;
bool
blocking
=
false
;
...
...
@@ -1304,17 +1313,8 @@ uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
// Should the current thread be blocked until this compilation request
// has been fulfilled?
bool
CompileBroker
::
is_compile_blocking
(
methodHandle
method
,
int
osr_bci
)
{
if
(
!
BackgroundCompilation
)
{
Symbol
*
class_name
=
method
->
method_holder
()
->
klass_part
()
->
name
();
if
(
class_name
->
starts_with
(
"java/lang/ref/Reference"
,
23
))
{
// The reference handler thread can dead lock with the GC if compilation is blocking,
// so we avoid blocking compiles for anything in the java.lang.ref.Reference class,
// including inner classes such as ReferenceHandler.
return
false
;
}
return
true
;
}
return
false
;
assert
(
!
instanceRefKlass
::
owns_pending_list_lock
(
JavaThread
::
current
()),
"possible deadlock"
);
return
!
BackgroundCompilation
;
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
9c664186
...
...
@@ -1963,10 +1963,21 @@ CompactibleFreeListSpace::gc_epilogue() {
// Iteration support, mostly delegated from a CMS generation
void
CompactibleFreeListSpace
::
save_marks
()
{
// mark the "end" of the used space at the time of this call;
assert
(
Thread
::
current
()
->
is_VM_thread
(),
"Global variable should only be set when single-threaded"
);
// Mark the "end" of the used space at the time of this call;
// note, however, that promoted objects from this point
// on are tracked in the _promoInfo below.
set_saved_mark_word
(
unallocated_block
());
#ifdef ASSERT
// Check the sanity of save_marks() etc.
MemRegion
ur
=
used_region
();
MemRegion
urasm
=
used_region_at_save_marks
();
assert
(
ur
.
contains
(
urasm
),
err_msg
(
" Error at save_marks(): ["
PTR_FORMAT
","
PTR_FORMAT
")"
" should contain ["
PTR_FORMAT
","
PTR_FORMAT
")"
,
ur
.
start
(),
ur
.
end
(),
urasm
.
start
(),
urasm
.
end
()));
#endif
// inform allocator that promotions should be tracked.
assert
(
_promoInfo
.
noPromotions
(),
"_promoInfo inconsistency"
);
_promoInfo
.
startTrackingPromotions
();
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
9c664186
...
...
@@ -3189,10 +3189,9 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
}
void
CMSCollector
::
setup_cms_unloading_and_verification_state
()
{
const
bool
should_verify
=
VerifyBeforeGC
||
VerifyAfterGC
||
VerifyDuringGC
const
bool
should_verify
=
VerifyBeforeGC
||
VerifyAfterGC
||
VerifyDuringGC
||
VerifyBeforeExit
;
const
int
rso
=
SharedHeap
::
SO_Symbols
|
SharedHeap
::
SO_Strings
|
SharedHeap
::
SO_CodeCache
;
const
int
rso
=
SharedHeap
::
SO_Strings
|
SharedHeap
::
SO_CodeCache
;
if
(
should_unload_classes
())
{
// Should unload classes this cycle
remove_root_scanning_option
(
rso
);
// Shrink the root set appropriately
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
9c664186
...
...
@@ -1161,6 +1161,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
TraceTime
t
(
system_gc
?
"Full GC (System.gc())"
:
"Full GC"
,
PrintGC
,
true
,
gclog_or_tty
);
TraceCollectorStats
tcs
(
g1mm
()
->
full_collection_counters
());
TraceMemoryManagerStats
tms
(
true
/* fullGC */
);
double
start
=
os
::
elapsedTime
();
...
...
@@ -1339,6 +1340,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
if
(
PrintHeapAtGC
)
{
Universe
::
print_heap_after_gc
();
}
g1mm
()
->
update_counters
();
return
true
;
}
...
...
@@ -1971,6 +1973,10 @@ jint G1CollectedHeap::initialize() {
init_mutator_alloc_region
();
// Do create of the monitoring and management support so that
// values in the heap have been properly initialized.
_g1mm
=
new
G1MonitoringSupport
(
this
,
&
_g1_storage
);
return
JNI_OK
;
}
...
...
@@ -2113,6 +2119,28 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
(
cause
==
GCCause
::
_java_lang_system_gc
&&
ExplicitGCInvokesConcurrent
));
}
#ifndef PRODUCT
void
G1CollectedHeap
::
allocate_dummy_regions
()
{
// Let's fill up most of the region
size_t
word_size
=
HeapRegion
::
GrainWords
-
1024
;
// And as a result the region we'll allocate will be humongous.
guarantee
(
isHumongous
(
word_size
),
"sanity"
);
for
(
uintx
i
=
0
;
i
<
G1DummyRegionsPerGC
;
++
i
)
{
// Let's use the existing mechanism for the allocation
HeapWord
*
dummy_obj
=
humongous_obj_allocate
(
word_size
);
if
(
dummy_obj
!=
NULL
)
{
MemRegion
mr
(
dummy_obj
,
word_size
);
CollectedHeap
::
fill_with_object
(
mr
);
}
else
{
// If we can't allocate once, we probably cannot allocate
// again. Let's get out of the loop.
break
;
}
}
}
#endif // !PRODUCT
void
G1CollectedHeap
::
increment_full_collections_completed
(
bool
concurrent
)
{
MonitorLockerEx
x
(
FullGCCount_lock
,
Mutex
::
_no_safepoint_check_flag
);
...
...
@@ -2777,17 +2805,26 @@ void G1CollectedHeap::verify(bool allow_dirty,
bool
silent
,
bool
use_prev_marking
)
{
if
(
SafepointSynchronize
::
is_at_safepoint
()
||
!
UseTLAB
)
{
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"
roots
"
);
}
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"
Roots (excluding permgen)
"
);
}
VerifyRootsClosure
rootsCl
(
use_prev_marking
);
CodeBlobToOopClosure
blobsCl
(
&
rootsCl
,
/*do_marking=*/
false
);
process_strong_roots
(
true
,
// activate StrongRootsScope
false
,
SharedHeap
::
SO_AllClasses
,
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
const
int
so
=
SharedHeap
::
SO_AllClasses
|
SharedHeap
::
SO_Strings
|
SharedHeap
::
SO_CodeCache
;
process_strong_roots
(
true
,
// activate StrongRootsScope
true
,
// we set "collecting perm gen" to true,
// so we don't reset the dirty cards in the perm gen.
SharedHeap
::
ScanningOption
(
so
),
// roots scanning options
&
rootsCl
,
&
blobsCl
,
&
rootsCl
);
// Since we used "collecting_perm_gen" == true above, we will not have
// checked the refs from perm into the G1-collected heap. We check those
// references explicitly below. Whether the relevant cards are dirty
// is checked further below in the rem set verification.
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"Permgen roots "
);
}
perm_gen
()
->
oop_iterate
(
&
rootsCl
);
bool
failures
=
rootsCl
.
failures
();
rem_set
()
->
invalidate
(
perm_gen
()
->
used_region
(),
false
);
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"HeapRegionSets "
);
}
verify_region_sets
();
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"HeapRegions "
);
}
...
...
@@ -3164,6 +3201,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceCPUTime
tcpu
(
PrintGCDetails
,
true
,
gclog_or_tty
);
TraceTime
t
(
verbose_str
,
PrintGC
&&
!
PrintGCDetails
,
true
,
gclog_or_tty
);
TraceCollectorStats
tcs
(
g1mm
()
->
incremental_collection_counters
());
TraceMemoryManagerStats
tms
(
false
/* fullGC */
);
// If the secondary_free_list is not empty, append it to the
...
...
@@ -3338,6 +3376,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
doConcurrentMark
();
}
allocate_dummy_regions
();
#if YOUNG_LIST_VERBOSE
gclog_or_tty
->
print_cr
(
"
\n
End of the pause.
\n
Young_list:"
);
_young_list
->
print
();
...
...
@@ -3401,6 +3441,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if
(
PrintHeapAtGC
)
{
Universe
::
print_heap_after_gc
();
}
g1mm
()
->
update_counters
();
if
(
G1SummarizeRSetStats
&&
(
G1SummarizeRSetStatsPeriod
>
0
)
&&
(
total_collections
()
%
G1SummarizeRSetStatsPeriod
==
0
))
{
...
...
@@ -5314,6 +5356,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
if
(
new_alloc_region
!=
NULL
)
{
g1_policy
()
->
update_region_num
(
true
/* next_is_young */
);
set_region_short_lived_locked
(
new_alloc_region
);
g1mm
()
->
update_eden_counters
();
return
new_alloc_region
;
}
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
9c664186
...
...
@@ -28,7 +28,9 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
...
...
@@ -57,6 +59,7 @@ class HeapRegionRemSetIterator;
class
ConcurrentMark
;
class
ConcurrentMarkThread
;
class
ConcurrentG1Refine
;
class
GenerationCounters
;
typedef
OverflowTaskQueue
<
StarTask
>
RefToScanQueue
;
typedef
GenericTaskQueueSet
<
RefToScanQueue
>
RefToScanQueueSet
;
...
...
@@ -236,6 +239,9 @@ private:
// current collection.
HeapRegion
*
_gc_alloc_region_list
;
// Helper for monitoring and management support.
G1MonitoringSupport
*
_g1mm
;
// Determines PLAB size for a particular allocation purpose.
static
size_t
desired_plab_sz
(
GCAllocPurpose
purpose
);
...
...
@@ -298,6 +304,14 @@ private:
// started is maintained in _total_full_collections in CollectedHeap.
volatile
unsigned
int
_full_collections_completed
;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very
// frequent marking cycles and stress the cleanup / concurrent
// cleanup code more (as all the regions that will be allocated by
// this method will be found dead by the marking cycle).
void
allocate_dummy_regions
()
PRODUCT_RETURN
;
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
...
...
@@ -542,6 +556,9 @@ protected:
HeapWord
*
expand_and_allocate
(
size_t
word_size
);
public:
G1MonitoringSupport
*
g1mm
()
{
return
_g1mm
;
}
// Expand the garbage-first heap by at least the given size (in bytes!).
// Returns true if the heap was expanded by the requested amount;
// false otherwise.
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
0 → 100644
浏览文件 @
9c664186
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
G1MonitoringSupport
::
G1MonitoringSupport
(
G1CollectedHeap
*
g1h
,
VirtualSpace
*
g1_storage_addr
)
:
_g1h
(
g1h
),
_incremental_collection_counters
(
NULL
),
_full_collection_counters
(
NULL
),
_non_young_collection_counters
(
NULL
),
_old_space_counters
(
NULL
),
_young_collection_counters
(
NULL
),
_eden_counters
(
NULL
),
_from_counters
(
NULL
),
_to_counters
(
NULL
),
_g1_storage_addr
(
g1_storage_addr
)
{
// Counters for GC collections
//
// name "collector.0". In a generational collector this would be the
// young generation collection.
_incremental_collection_counters
=
new
CollectorCounters
(
"G1 incremental collections"
,
0
);
// name "collector.1". In a generational collector this would be the
// old generation collection.
_full_collection_counters
=
new
CollectorCounters
(
"G1 stop-the-world full collections"
,
1
);
// timer sampling for all counters supporting sampling only update the
// used value. See the take_sample() method. G1 requires both used and
// capacity updated so sampling is not currently used. It might
// be sufficient to update all counters in take_sample() even though
// take_sample() only returns "used". When sampling was used, there
// were some anomolous values emitted which may have been the consequence
// of not updating all values simultaneously (i.e., see the calculation done
// in eden_space_used(), is it possbile that the values used to
// calculate either eden_used or survivor_used are being updated by
// the collector when the sample is being done?).
const
bool
sampled
=
false
;
// "Generation" and "Space" counters.
//
// name "generation.1" This is logically the old generation in
// generational GC terms. The "1, 1" parameters are for
// the n-th generation (=1) with 1 space.
// Counters are created from minCapacity, maxCapacity, and capacity
_non_young_collection_counters
=
new
GenerationCounters
(
"whole heap"
,
1
,
1
,
_g1_storage_addr
);
// name "generation.1.space.0"
// Counters are created from maxCapacity, capacity, initCapacity,
// and used.
_old_space_counters
=
new
HSpaceCounters
(
"space"
,
0
,
_g1h
->
max_capacity
(),
_g1h
->
capacity
(),
_non_young_collection_counters
);
// Young collection set
// name "generation.0". This is logically the young generation.
// The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
// See _non_young_collection_counters for additional counters
_young_collection_counters
=
new
GenerationCounters
(
"young"
,
0
,
3
,
NULL
);
// Replace "max_heap_byte_size() with maximum young gen size for
// g1Collectedheap
// name "generation.0.space.0"
// See _old_space_counters for additional counters
_eden_counters
=
new
HSpaceCounters
(
"eden"
,
0
,
_g1h
->
max_capacity
(),
eden_space_committed
(),
_young_collection_counters
);
// name "generation.0.space.1"
// See _old_space_counters for additional counters
// Set the arguments to indicate that this survivor space is not used.
_from_counters
=
new
HSpaceCounters
(
"s0"
,
1
,
(
long
)
0
,
(
long
)
0
,
_young_collection_counters
);
// name "generation.0.space.2"
// See _old_space_counters for additional counters
_to_counters
=
new
HSpaceCounters
(
"s1"
,
2
,
_g1h
->
max_capacity
(),
survivor_space_committed
(),
_young_collection_counters
);
}
size_t
G1MonitoringSupport
::
overall_committed
()
{
return
g1h
()
->
capacity
();
}
size_t
G1MonitoringSupport
::
overall_used
()
{
return
g1h
()
->
used_unlocked
();
}
size_t
G1MonitoringSupport
::
eden_space_committed
()
{
return
MAX2
(
eden_space_used
(),
(
size_t
)
HeapRegion
::
GrainBytes
);
}
size_t
G1MonitoringSupport
::
eden_space_used
()
{
size_t
young_list_length
=
g1h
()
->
young_list
()
->
length
();
size_t
eden_used
=
young_list_length
*
HeapRegion
::
GrainBytes
;
size_t
survivor_used
=
survivor_space_used
();
eden_used
=
subtract_up_to_zero
(
eden_used
,
survivor_used
);
return
eden_used
;
}
size_t
G1MonitoringSupport
::
survivor_space_committed
()
{
return
MAX2
(
survivor_space_used
(),
(
size_t
)
HeapRegion
::
GrainBytes
);
}
size_t
G1MonitoringSupport
::
survivor_space_used
()
{
size_t
survivor_num
=
g1h
()
->
g1_policy
()
->
recorded_survivor_regions
();
size_t
survivor_used
=
survivor_num
*
HeapRegion
::
GrainBytes
;
return
survivor_used
;
}
size_t
G1MonitoringSupport
::
old_space_committed
()
{
size_t
committed
=
overall_committed
();
size_t
eden_committed
=
eden_space_committed
();
size_t
survivor_committed
=
survivor_space_committed
();
committed
=
subtract_up_to_zero
(
committed
,
eden_committed
);
committed
=
subtract_up_to_zero
(
committed
,
survivor_committed
);
committed
=
MAX2
(
committed
,
(
size_t
)
HeapRegion
::
GrainBytes
);
return
committed
;
}
// See the comment near the top of g1MonitoringSupport.hpp for
// an explanation of these calculations for "used" and "capacity".
size_t
G1MonitoringSupport
::
old_space_used
()
{
size_t
used
=
overall_used
();
size_t
eden_used
=
eden_space_used
();
size_t
survivor_used
=
survivor_space_used
();
used
=
subtract_up_to_zero
(
used
,
eden_used
);
used
=
subtract_up_to_zero
(
used
,
survivor_used
);
return
used
;
}
void
G1MonitoringSupport
::
update_counters
()
{
if
(
UsePerfData
)
{
eden_counters
()
->
update_capacity
(
eden_space_committed
());
eden_counters
()
->
update_used
(
eden_space_used
());
to_counters
()
->
update_capacity
(
survivor_space_committed
());
to_counters
()
->
update_used
(
survivor_space_used
());
old_space_counters
()
->
update_capacity
(
old_space_committed
());
old_space_counters
()
->
update_used
(
old_space_used
());
non_young_collection_counters
()
->
update_all
();
}
}
void
G1MonitoringSupport
::
update_eden_counters
()
{
if
(
UsePerfData
)
{
eden_counters
()
->
update_capacity
(
eden_space_committed
());
eden_counters
()
->
update_used
(
eden_space_used
());
}
}
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
0 → 100644
浏览文件 @
9c664186
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
#include "gc_implementation/shared/hSpaceCounters.hpp"
class
G1CollectedHeap
;
class
G1SpaceMonitoringSupport
;
// Class for monitoring logical spaces in G1.
// G1 defines a set of regions as a young
// collection (analogous to a young generation).
// The young collection is a logical generation
// with no fixed chunk (see space.hpp) reflecting
// the address space for the generation. In addition
// to the young collection there is its complement
// the non-young collection that is simply the regions
// not in the young collection. The non-young collection
// is treated here as a logical old generation only
// because the monitoring tools expect a generational
// heap. The monitoring tools expect that a Space
// (see space.hpp) exists that describe the
// address space of young collection and non-young
// collection and such a view is provided here.
//
// This class provides interfaces to access
// the value of variables for the young collection
// that include the "capacity" and "used" of the
// young collection along with constant values
// for the minimum and maximum capacities for
// the logical spaces. Similarly for the non-young
// collection.
//
// Also provided are counters for G1 concurrent collections
// and stop-the-world full heap collecitons.
//
// Below is a description of how "used" and "capactiy"
// (or committed) is calculated for the logical spaces.
//
// 1) The used space calculation for a pool is not necessarily
// independent of the others. We can easily get from G1 the overall
// used space in the entire heap, the number of regions in the young
// generation (includes both eden and survivors), and the number of
// survivor regions. So, from that we calculate:
//
// survivor_used = survivor_num * region_size
// eden_used = young_region_num * region_size - survivor_used
// old_gen_used = overall_used - eden_used - survivor_used
//
// Note that survivor_used and eden_used are upper bounds. To get the
// actual value we would have to iterate over the regions and add up
// ->used(). But that'd be expensive. So, we'll accept some lack of
// accuracy for those two. But, we have to be careful when calculating
// old_gen_used, in case we subtract from overall_used more then the
// actual number and our result goes negative.
//
// 2) Calculating the used space is straightforward, as described
// above. However, how do we calculate the committed space, given that
// we allocate space for the eden, survivor, and old gen out of the
// same pool of regions? One way to do this is to use the used value
// as also the committed value for the eden and survivor spaces and
// then calculate the old gen committed space as follows:
//
// old_gen_committed = overall_committed - eden_committed - survivor_committed
//
// Maybe a better way to do that would be to calculate used for eden
// and survivor as a sum of ->used() over their regions and then
// calculate committed as region_num * region_size (i.e., what we use
// to calculate the used space now). This is something to consider
// in the future.
//
// 3) Another decision that is again not straightforward is what is
// the max size that each memory pool can grow to. One way to do this
// would be to use the committed size for the max for the eden and
// survivors and calculate the old gen max as follows (basically, it's
// a similar pattern to what we use for the committed space, as
// described above):
//
// old_gen_max = overall_max - eden_max - survivor_max
//
// Unfortunately, the above makes the max of each pool fluctuate over
// time and, even though this is allowed according to the spec, it
// broke several assumptions in the M&M framework (there were cases
// where used would reach a value greater than max). So, for max we
// use -1, which means "undefined" according to the spec.
//
// 4) Now, there is a very subtle issue with all the above. The
// framework will call get_memory_usage() on the three pools
// asynchronously. As a result, each call might get a different value
// for, say, survivor_num which will yield inconsistent values for
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
// in the calculation of all three). This would normally be
// ok. However, it's possible that this might cause the sum of
// eden_used, survivor_used, and old_gen_used to go over the max heap
// size and this seems to sometimes cause JConsole (and maybe other
// clients) to get confused. There's not a really an easy / clean
// solution to this problem, due to the asynchrounous nature of the
// framework.
class
G1MonitoringSupport
:
public
CHeapObj
{
G1CollectedHeap
*
_g1h
;
VirtualSpace
*
_g1_storage_addr
;
// jstat performance counters
// incremental collections both fully and partially young
CollectorCounters
*
_incremental_collection_counters
;
// full stop-the-world collections
CollectorCounters
*
_full_collection_counters
;
// young collection set counters. The _eden_counters,
// _from_counters, and _to_counters are associated with
// this "generational" counter.
GenerationCounters
*
_young_collection_counters
;
// non-young collection set counters. The _old_space_counters
// below are associated with this "generational" counter.
GenerationCounters
*
_non_young_collection_counters
;
// Counters for the capacity and used for
// the whole heap
HSpaceCounters
*
_old_space_counters
;
// the young collection
HSpaceCounters
*
_eden_counters
;
// the survivor collection (only one, _to_counters, is actively used)
HSpaceCounters
*
_from_counters
;
HSpaceCounters
*
_to_counters
;
// It returns x - y if x > y, 0 otherwise.
// As described in the comment above, some of the inputs to the
// calculations we have to do are obtained concurrently and hence
// may be inconsistent with each other. So, this provides a
// defensive way of performing the subtraction and avoids the value
// going negative (which would mean a very large result, given that
// the parameter are size_t).
static
size_t
subtract_up_to_zero
(
size_t
x
,
size_t
y
)
{
if
(
x
>
y
)
{
return
x
-
y
;
}
else
{
return
0
;
}
}
public:
G1MonitoringSupport
(
G1CollectedHeap
*
g1h
,
VirtualSpace
*
g1_storage_addr
);
G1CollectedHeap
*
g1h
()
{
return
_g1h
;
}
VirtualSpace
*
g1_storage_addr
()
{
return
_g1_storage_addr
;
}
// Performance Counter accessors
void
update_counters
();
void
update_eden_counters
();
CollectorCounters
*
incremental_collection_counters
()
{
return
_incremental_collection_counters
;
}
CollectorCounters
*
full_collection_counters
()
{
return
_full_collection_counters
;
}
GenerationCounters
*
non_young_collection_counters
()
{
return
_non_young_collection_counters
;
}
HSpaceCounters
*
old_space_counters
()
{
return
_old_space_counters
;
}
HSpaceCounters
*
eden_counters
()
{
return
_eden_counters
;
}
HSpaceCounters
*
from_counters
()
{
return
_from_counters
;
}
HSpaceCounters
*
to_counters
()
{
return
_to_counters
;
}
// Monitoring support used by
// MemoryService
// jstat counters
size_t
overall_committed
();
size_t
overall_used
();
size_t
eden_space_committed
();
size_t
eden_space_used
();
size_t
survivor_space_committed
();
size_t
survivor_space_used
();
size_t
old_space_committed
();
size_t
old_space_used
();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
9c664186
...
...
@@ -300,6 +300,11 @@
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
"Artificial delay during concurrent region freeing") \
\
develop(uintx, G1DummyRegionsPerGC, 0, \
"The number of dummy regions G1 will allocate at the end of " \
"each evacuation pause in order to artificially fill up the " \
"heap and stress the marking implementation.") \
\
develop(bool, ReduceInitialCardMarksForG1, false, \
"When ReduceInitialCardMarks is true, this flag setting " \
" controls whether G1 allows the RICM optimization") \
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
浏览文件 @
9c664186
...
...
@@ -33,44 +33,43 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
void
CardTableModRefBS
::
par_non_clean_card_iterate
_work
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
,
int
n_threads
)
{
if
(
n_threads
>
0
)
{
assert
((
n_threads
==
1
&&
ParallelGCThreads
==
0
)
||
n_threads
<=
(
int
)
ParallelGCThreads
,
"# worker threads != # requested!"
);
// Make sure the LNC array is valid for the space.
jbyte
**
lowest_non_clean
;
uintptr_t
lowest_non_clean_base_chunk_index
;
size_t
lowest_non_clean_chunk_size
;
get_LNC_array_for_space
(
sp
,
lowest_non_clean
,
lowest_non_clean_base_chunk_index
,
lowest_non_clean_chunk_size
);
void
CardTableModRefBS
::
non_clean_card_iterate_parallel
_work
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
ClearNoncleanCardWrapper
*
cl
,
int
n_threads
)
{
assert
(
n_threads
>
0
,
"Error: expected n_threads > 0"
);
assert
((
n_threads
==
1
&&
ParallelGCThreads
==
0
)
||
n_threads
<=
(
int
)
ParallelGCThreads
,
"# worker threads != # requested!"
);
// Make sure the LNC array is valid for the space.
jbyte
**
lowest_non_clean
;
uintptr_t
lowest_non_clean_base_chunk_index
;
size_t
lowest_non_clean_chunk_size
;
get_LNC_array_for_space
(
sp
,
lowest_non_clean
,
lowest_non_clean_base_chunk_index
,
lowest_non_clean_chunk_size
);
int
n_strides
=
n_threads
*
StridesPerThread
;
SequentialSubTasksDone
*
pst
=
sp
->
par_seq_tasks
();
pst
->
set_n_threads
(
n_threads
);
pst
->
set_n_tasks
(
n_strides
);
int
n_strides
=
n_threads
*
StridesPerThread
;
SequentialSubTasksDone
*
pst
=
sp
->
par_seq_tasks
();
pst
->
set_n_threads
(
n_threads
);
pst
->
set_n_tasks
(
n_strides
);
int
stride
=
0
;
while
(
!
pst
->
is_task_claimed
(
/* reference */
stride
))
{
process_stride
(
sp
,
mr
,
stride
,
n_strides
,
dcto_cl
,
cl
,
lowest_non_clean
,
lowest_non_clean_base_chunk_index
,
lowest_non_clean_chunk_size
);
}
if
(
pst
->
all_tasks_completed
())
{
// Clear lowest_non_clean array for next time.
intptr_t
first_chunk_index
=
addr_to_chunk_index
(
mr
.
start
());
uintptr_t
last_chunk_index
=
addr_to_chunk_index
(
mr
.
last
());
for
(
uintptr_t
ch
=
first_chunk_index
;
ch
<=
last_chunk_index
;
ch
++
)
{
intptr_t
ind
=
ch
-
lowest_non_clean_base_chunk_index
;
assert
(
0
<=
ind
&&
ind
<
(
intptr_t
)
lowest_non_clean_chunk_size
,
"Bounds error"
);
lowest_non_clean
[
ind
]
=
NULL
;
}
int
stride
=
0
;
while
(
!
pst
->
is_task_claimed
(
/* reference */
stride
))
{
process_stride
(
sp
,
mr
,
stride
,
n_strides
,
dcto_cl
,
cl
,
lowest_non_clean
,
lowest_non_clean_base_chunk_index
,
lowest_non_clean_chunk_size
);
}
if
(
pst
->
all_tasks_completed
())
{
// Clear lowest_non_clean array for next time.
intptr_t
first_chunk_index
=
addr_to_chunk_index
(
mr
.
start
());
uintptr_t
last_chunk_index
=
addr_to_chunk_index
(
mr
.
last
());
for
(
uintptr_t
ch
=
first_chunk_index
;
ch
<=
last_chunk_index
;
ch
++
)
{
intptr_t
ind
=
ch
-
lowest_non_clean_base_chunk_index
;
assert
(
0
<=
ind
&&
ind
<
(
intptr_t
)
lowest_non_clean_chunk_size
,
"Bounds error"
);
lowest_non_clean
[
ind
]
=
NULL
;
}
}
}
...
...
@@ -81,7 +80,7 @@ process_stride(Space* sp,
MemRegion
used
,
jint
stride
,
int
n_strides
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
,
ClearNoncleanCardWrapper
*
cl
,
jbyte
**
lowest_non_clean
,
uintptr_t
lowest_non_clean_base_chunk_index
,
size_t
lowest_non_clean_chunk_size
)
{
...
...
@@ -127,7 +126,11 @@ process_stride(Space* sp,
lowest_non_clean_base_chunk_index
,
lowest_non_clean_chunk_size
);
non_clean_card_iterate_work
(
chunk_mr
,
cl
);
// We do not call the non_clean_card_iterate_serial() version because
// we want to clear the cards, and the ClearNoncleanCardWrapper closure
// itself does the work of finding contiguous dirty ranges of cards to
// process (and clear).
cl
->
do_MemRegion
(
chunk_mr
);
// Find the next chunk of the stride.
chunk_card_start
+=
CardsPerStrideChunk
*
n_strides
;
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp
浏览文件 @
9c664186
/*
* Copyright (c) 2002, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -51,15 +51,18 @@ GenerationCounters::GenerationCounters(const char* name,
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"minCapacity"
);
PerfDataManager
::
create_constant
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
_virtual_space
==
NULL
?
0
:
_virtual_space
->
committed_size
(),
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"maxCapacity"
);
PerfDataManager
::
create_constant
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
_virtual_space
==
NULL
?
0
:
_virtual_space
->
reserved_size
(),
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"capacity"
);
_current_size
=
PerfDataManager
::
create_variable
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
PerfData
::
U_Bytes
,
_virtual_space
==
NULL
?
0
:
_virtual_space
->
committed_size
(),
CHECK
);
}
}
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
浏览文件 @
9c664186
/*
* Copyright (c) 2002, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -61,10 +61,11 @@ class GenerationCounters: public CHeapObj {
}
virtual
void
update_all
()
{
_current_size
->
set_value
(
_virtual_space
->
committed_size
());
_current_size
->
set_value
(
_virtual_space
==
NULL
?
0
:
_virtual_space
->
committed_size
());
}
const
char
*
name_space
()
const
{
return
_name_space
;
}
};
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp
0 → 100644
浏览文件 @
9c664186
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "memory/generation.hpp"
#include "memory/resourceArea.hpp"
HSpaceCounters
::
HSpaceCounters
(
const
char
*
name
,
int
ordinal
,
size_t
max_size
,
size_t
initial_capacity
,
GenerationCounters
*
gc
)
{
if
(
UsePerfData
)
{
EXCEPTION_MARK
;
ResourceMark
rm
;
const
char
*
cns
=
PerfDataManager
::
name_space
(
gc
->
name_space
(),
"space"
,
ordinal
);
_name_space
=
NEW_C_HEAP_ARRAY
(
char
,
strlen
(
cns
)
+
1
);
strcpy
(
_name_space
,
cns
);
const
char
*
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"name"
);
PerfDataManager
::
create_string_constant
(
SUN_GC
,
cname
,
name
,
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"maxCapacity"
);
PerfDataManager
::
create_constant
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
(
jlong
)
max_size
,
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"capacity"
);
_capacity
=
PerfDataManager
::
create_variable
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
initial_capacity
,
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"used"
);
_used
=
PerfDataManager
::
create_variable
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
(
jlong
)
0
,
CHECK
);
cname
=
PerfDataManager
::
counter_name
(
_name_space
,
"initCapacity"
);
PerfDataManager
::
create_constant
(
SUN_GC
,
cname
,
PerfData
::
U_Bytes
,
initial_capacity
,
CHECK
);
}
}
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
0 → 100644
浏览文件 @
9c664186
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
#ifndef SERIALGC
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/generation.hpp"
#include "runtime/perfData.hpp"
#endif
// A HSpaceCounter is a holder class for performance counters
// that track a collections (logical spaces) in a heap;
class
HeapSpaceUsedHelper
;
class
G1SpaceMonitoringSupport
;
class
HSpaceCounters
:
public
CHeapObj
{
friend
class
VMStructs
;
private:
PerfVariable
*
_capacity
;
PerfVariable
*
_used
;
// Constant PerfData types don't need to retain a reference.
// However, it's a good idea to document them here.
char
*
_name_space
;
public:
HSpaceCounters
(
const
char
*
name
,
int
ordinal
,
size_t
max_size
,
size_t
initial_capacity
,
GenerationCounters
*
gc
);
~
HSpaceCounters
()
{
if
(
_name_space
!=
NULL
)
FREE_C_HEAP_ARRAY
(
char
,
_name_space
);
}
inline
void
update_capacity
(
size_t
v
)
{
_capacity
->
set_value
(
v
);
}
inline
void
update_used
(
size_t
v
)
{
_used
->
set_value
(
v
);
}
debug_only
(
// for security reasons, we do not allow arbitrary reads from
// the counters as they may live in shared memory.
jlong
used
()
{
return
_used
->
get_value
();
}
jlong
capacity
()
{
return
_used
->
get_value
();
}
)
inline
void
update_all
(
size_t
capacity
,
size_t
used
)
{
update_capacity
(
capacity
);
update_used
(
used
);
}
const
char
*
name_space
()
const
{
return
_name_space
;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
浏览文件 @
9c664186
...
...
@@ -456,31 +456,35 @@ bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
}
void
CardTableModRefBS
::
non_clean_card_iterate
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
)
{
void
CardTableModRefBS
::
non_clean_card_iterate
_possibly_parallel
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
ClearNoncleanCardWrapper
*
cl
)
{
if
(
!
mr
.
is_empty
())
{
int
n_threads
=
SharedHeap
::
heap
()
->
n_par_threads
();
if
(
n_threads
>
0
)
{
#ifndef SERIALGC
par_non_clean_card_iterate
_work
(
sp
,
mr
,
dcto_cl
,
cl
,
n_threads
);
non_clean_card_iterate_parallel
_work
(
sp
,
mr
,
dcto_cl
,
cl
,
n_threads
);
#else // SERIALGC
fatal
(
"Parallel gc not supported here."
);
#endif // SERIALGC
}
else
{
non_clean_card_iterate_work
(
mr
,
cl
);
// We do not call the non_clean_card_iterate_serial() version below because
// we want to clear the cards (which non_clean_card_iterate_serial() does not
// do for us), and the ClearNoncleanCardWrapper closure itself does the work
// of finding contiguous dirty ranges of cards to process (and clear).
cl
->
do_MemRegion
(
mr
);
}
}
}
//
NOTE: For this to work correctly, it is important tha
t
//
we look for non-clean cards below (so as to catch those
//
marked precleaned), rather than look explicitly for dirty
//
cards (and miss those marked precleaned). In that sense,
//
the name precleaned is currently somewhat of a misnom
er.
void
CardTableModRefBS
::
non_clean_card_iterate_
work
(
MemRegion
mr
,
MemRegionClosure
*
cl
)
{
//
The iterator itself is not MT-aware, bu
t
//
MT-aware callers and closures can use this to
//
accomplish dirty card iteration in parallel. The
//
iterator itself does not clear the dirty cards, or
//
change their values in any mann
er.
void
CardTableModRefBS
::
non_clean_card_iterate_
serial
(
MemRegion
mr
,
MemRegionClosure
*
cl
)
{
for
(
int
i
=
0
;
i
<
_cur_covered_regions
;
i
++
)
{
MemRegion
mri
=
mr
.
intersection
(
_covered
[
i
]);
if
(
mri
.
word_size
()
>
0
)
{
...
...
@@ -661,7 +665,7 @@ public:
void
CardTableModRefBS
::
verify_clean_region
(
MemRegion
mr
)
{
GuaranteeNotModClosure
blk
(
this
);
non_clean_card_iterate_
work
(
mr
,
&
blk
);
non_clean_card_iterate_
serial
(
mr
,
&
blk
);
}
// To verify a MemRegion is entirely dirty this closure is passed to
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/cardTableModRefBS.hpp
浏览文件 @
9c664186
...
...
@@ -44,6 +44,7 @@
class
Generation
;
class
OopsInGenClosure
;
class
DirtyCardToOopClosure
;
class
ClearNoncleanCardWrapper
;
class
CardTableModRefBS
:
public
ModRefBarrierSet
{
// Some classes get to look at some private stuff.
...
...
@@ -165,22 +166,28 @@ class CardTableModRefBS: public ModRefBarrierSet {
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. cl and dcto_cl must either be the same closure or cl must
// wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl
// may be modified. Note that this function will operate in a parallel
// mode if worker threads are available.
void
non_clean_card_iterate
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
);
// Utility function used to implement the other versions below.
void
non_clean_card_iterate_work
(
MemRegion
mr
,
MemRegionClosure
*
cl
);
void
par_non_clean_card_iterate_work
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
,
int
n_threads
);
// of mr. Dirty cards are _not_ cleared by the iterator method itself,
// but closures may arrange to do so on their own should they so wish.
void
non_clean_card_iterate_serial
(
MemRegion
mr
,
MemRegionClosure
*
cl
);
// A variant of the above that will operate in a parallel mode if
// worker threads are available, and clear the dirty cards as it
// processes them.
// ClearNoncleanCardWrapper cl must wrap the DirtyCardToOopClosure dcto_cl,
// which may itself be modified by the method.
void
non_clean_card_iterate_possibly_parallel
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
ClearNoncleanCardWrapper
*
cl
);
private:
// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
void
non_clean_card_iterate_parallel_work
(
Space
*
sp
,
MemRegion
mr
,
DirtyCardToOopClosure
*
dcto_cl
,
ClearNoncleanCardWrapper
*
cl
,
int
n_threads
);
protected:
// Dirty the bytes corresponding to "mr" (not all of which must be
// covered.)
void
dirty_MemRegion
(
MemRegion
mr
);
...
...
@@ -237,7 +244,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
MemRegion
used
,
jint
stride
,
int
n_strides
,
DirtyCardToOopClosure
*
dcto_cl
,
MemRegionClosure
*
cl
,
ClearNoncleanCardWrapper
*
cl
,
jbyte
**
lowest_non_clean
,
uintptr_t
lowest_non_clean_base_chunk_index
,
size_t
lowest_non_clean_chunk_size
);
...
...
@@ -409,14 +416,14 @@ public:
// marking, where a dirty card may cause scanning, and summarization
// marking, of objects that extend onto subsequent cards.)
void
mod_card_iterate
(
MemRegionClosure
*
cl
)
{
non_clean_card_iterate_
work
(
_whole_heap
,
cl
);
non_clean_card_iterate_
serial
(
_whole_heap
,
cl
);
}
// Like the "mod_cards_iterate" above, except only invokes the closure
// for cards within the MemRegion "mr" (which is required to be
// card-aligned and sized.)
void
mod_card_iterate
(
MemRegion
mr
,
MemRegionClosure
*
cl
)
{
non_clean_card_iterate_
work
(
mr
,
cl
);
non_clean_card_iterate_
serial
(
mr
,
cl
);
}
static
uintx
ct_max_alignment_constraint
();
...
...
@@ -493,4 +500,5 @@ public:
void
set_CTRS
(
CardTableRS
*
rs
)
{
_rs
=
rs
;
}
};
#endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/cardTableRS.cpp
浏览文件 @
9c664186
...
...
@@ -105,107 +105,111 @@ void CardTableRS::younger_refs_iterate(Generation* g,
g
->
younger_refs_iterate
(
blk
);
}
class
ClearNoncleanCardWrapper
:
public
MemRegionClosure
{
MemRegionClosure
*
_dirty_card_closure
;
CardTableRS
*
_ct
;
bool
_is_par
;
private:
// Clears the given card, return true if the corresponding card should be
// processed.
bool
clear_card
(
jbyte
*
entry
)
{
if
(
_is_par
)
{
while
(
true
)
{
// In the parallel case, we may have to do this several times.
jbyte
entry_val
=
*
entry
;
assert
(
entry_val
!=
CardTableRS
::
clean_card_val
(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."
);
if
(
CardTableRS
::
card_is_dirty_wrt_gen_iter
(
entry_val
)
||
_ct
->
is_prev_youngergen_card_val
(
entry_val
))
{
jbyte
res
=
Atomic
::
cmpxchg
(
CardTableRS
::
clean_card_val
(),
entry
,
entry_val
);
if
(
res
==
entry_val
)
{
break
;
}
else
{
assert
(
res
==
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
,
"The CAS above should only fail if another thread did "
"a GC write barrier."
);
}
}
else
if
(
entry_val
==
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
)
{
// Parallelism shouldn't matter in this case. Only the thread
// assigned to scan the card should change this value.
*
entry
=
_ct
->
cur_youngergen_card_val
();
break
;
}
else
{
assert
(
entry_val
==
_ct
->
cur_youngergen_card_val
(),
"Should be the only possibility."
);
// In this case, the card was clean before, and become
// cur_youngergen only because of processing of a promoted object.
// We don't have to look at the card.
return
false
;
}
inline
bool
ClearNoncleanCardWrapper
::
clear_card
(
jbyte
*
entry
)
{
if
(
_is_par
)
{
return
clear_card_parallel
(
entry
);
}
else
{
return
clear_card_serial
(
entry
);
}
}
inline
bool
ClearNoncleanCardWrapper
::
clear_card_parallel
(
jbyte
*
entry
)
{
while
(
true
)
{
// In the parallel case, we may have to do this several times.
jbyte
entry_val
=
*
entry
;
assert
(
entry_val
!=
CardTableRS
::
clean_card_val
(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."
);
if
(
CardTableRS
::
card_is_dirty_wrt_gen_iter
(
entry_val
)
||
_ct
->
is_prev_youngergen_card_val
(
entry_val
))
{
jbyte
res
=
Atomic
::
cmpxchg
(
CardTableRS
::
clean_card_val
(),
entry
,
entry_val
);
if
(
res
==
entry_val
)
{
break
;
}
else
{
assert
(
res
==
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
,
"The CAS above should only fail if another thread did "
"a GC write barrier."
);
}
return
true
;
}
else
if
(
entry_val
==
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
)
{
// Parallelism shouldn't matter in this case. Only the thread
// assigned to scan the card should change this value.
*
entry
=
_ct
->
cur_youngergen_card_val
();
break
;
}
else
{
jbyte
entry_val
=
*
entry
;
assert
(
entry_val
!=
CardTableRS
::
clean_card_val
(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."
);
assert
(
entry_val
!=
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
,
"This should be possible in the sequential case."
);
*
entry
=
CardTableRS
::
clean_card_val
();
return
true
;
assert
(
entry_val
==
_ct
->
cur_youngergen_card_val
(),
"Should be the only possibility."
);
// In this case, the card was clean before, and become
// cur_youngergen only because of processing of a promoted object.
// We don't have to look at the card.
return
false
;
}
}
return
true
;
}
public:
ClearNoncleanCardWrapper
(
MemRegionClosure
*
dirty_card_closure
,
CardTableRS
*
ct
)
:
inline
bool
ClearNoncleanCardWrapper
::
clear_card_serial
(
jbyte
*
entry
)
{
jbyte
entry_val
=
*
entry
;
assert
(
entry_val
!=
CardTableRS
::
clean_card_val
(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."
);
assert
(
entry_val
!=
CardTableRS
::
cur_youngergen_and_prev_nonclean_card
,
"This should be possible in the sequential case."
);
*
entry
=
CardTableRS
::
clean_card_val
();
return
true
;
}
ClearNoncleanCardWrapper
::
ClearNoncleanCardWrapper
(
MemRegionClosure
*
dirty_card_closure
,
CardTableRS
*
ct
)
:
_dirty_card_closure
(
dirty_card_closure
),
_ct
(
ct
)
{
_is_par
=
(
SharedHeap
::
heap
()
->
n_par_threads
()
>
0
);
}
void
do_MemRegion
(
MemRegion
mr
)
{
// We start at the high end of "mr", walking backwards
// while accumulating a contiguous dirty range of cards in
// [start_of_non_clean, end_of_non_clean) which we then
// process en masse.
HeapWord
*
end_of_non_clean
=
mr
.
end
();
HeapWord
*
start_of_non_clean
=
end_of_non_clean
;
jbyte
*
entry
=
_ct
->
byte_for
(
mr
.
last
());
const
jbyte
*
first_entry
=
_ct
->
byte_for
(
mr
.
start
());
while
(
entry
>=
first_entry
)
{
HeapWord
*
cur
=
_ct
->
addr_for
(
entry
);
if
(
!
clear_card
(
entry
))
{
// We hit a clean card; process any non-empty
// dirty range accumulated so far.
if
(
start_of_non_clean
<
end_of_non_clean
)
{
MemRegion
mr2
(
start_of_non_clean
,
end_of_non_clean
);
_dirty_card_closure
->
do_MemRegion
(
mr2
);
}
// Reset the dirty window while continuing to
// look for the next dirty window to process.
end_of_non_clean
=
cur
;
start_of_non_clean
=
end_of_non_clean
;
}
void
ClearNoncleanCardWrapper
::
do_MemRegion
(
MemRegion
mr
)
{
assert
(
mr
.
word_size
()
>
0
,
"Error"
);
assert
(
_ct
->
is_aligned
(
mr
.
start
()),
"mr.start() should be card aligned"
);
// mr.end() may not necessarily be card aligned.
jbyte
*
cur_entry
=
_ct
->
byte_for
(
mr
.
last
());
const
jbyte
*
limit
=
_ct
->
byte_for
(
mr
.
start
());
HeapWord
*
end_of_non_clean
=
mr
.
end
();
HeapWord
*
start_of_non_clean
=
end_of_non_clean
;
while
(
cur_entry
>=
limit
)
{
HeapWord
*
cur_hw
=
_ct
->
addr_for
(
cur_entry
);
if
((
*
cur_entry
!=
CardTableRS
::
clean_card_val
())
&&
clear_card
(
cur_entry
))
{
// Continue the dirty range by opening the
// dirty window one card to the left.
start_of_non_clean
=
cur_hw
;
}
else
{
// We hit a "clean" card; process any non-empty
// "dirty" range accumulated so far.
if
(
start_of_non_clean
<
end_of_non_clean
)
{
const
MemRegion
mrd
(
start_of_non_clean
,
end_of_non_clean
);
_dirty_card_closure
->
do_MemRegion
(
mrd
);
}
// Open the left end of the window one card to the left.
start_of_non_clean
=
cur
;
// Note that "entry" leads "start_of_non_clean" in
// its leftward excursion after this point
// in the loop and, when we hit the left end of "mr",
// will point off of the left end of the card-table
// for "mr".
entry
--
;
}
// If the first card of "mr" was dirty, we will have
// been left with a dirty window, co-initial with "mr",
// which we now process.
if
(
start_of_non_clean
<
end_of_non_clean
)
{
MemRegion
mr2
(
start_of_non_clean
,
end_of_non_clean
);
_dirty_card_closure
->
do_MemRegion
(
mr2
);
// Reset the dirty window, while continuing to look
// for the next dirty card that will start a
// new dirty window.
end_of_non_clean
=
cur_hw
;
start_of_non_clean
=
cur_hw
;
}
// Note that "cur_entry" leads "start_of_non_clean" in
// its leftward excursion after this point
// in the loop and, when we hit the left end of "mr",
// will point off of the left end of the card-table
// for "mr".
cur_entry
--
;
}
};
// If the first card of "mr" was dirty, we will have
// been left with a dirty window, co-initial with "mr",
// which we now process.
if
(
start_of_non_clean
<
end_of_non_clean
)
{
const
MemRegion
mrd
(
start_of_non_clean
,
end_of_non_clean
);
_dirty_card_closure
->
do_MemRegion
(
mrd
);
}
}
// clean (by dirty->clean before) ==> cur_younger_gen
// dirty ==> cur_youngergen_and_prev_nonclean_card
// precleaned ==> cur_youngergen_and_prev_nonclean_card
...
...
@@ -246,8 +250,35 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
cl
->
gen_boundary
());
ClearNoncleanCardWrapper
clear_cl
(
dcto_cl
,
this
);
_ct_bs
->
non_clean_card_iterate
(
sp
,
sp
->
used_region_at_save_marks
(),
dcto_cl
,
&
clear_cl
);
const
MemRegion
urasm
=
sp
->
used_region_at_save_marks
();
#ifdef ASSERT
// Convert the assertion check to a warning if we are running
// CMS+ParNew until related bug is fixed.
MemRegion
ur
=
sp
->
used_region
();
assert
(
ur
.
contains
(
urasm
)
||
(
UseConcMarkSweepGC
&&
UseParNewGC
),
err_msg
(
"Did you forget to call save_marks()? "
"["
PTR_FORMAT
", "
PTR_FORMAT
") is not contained in "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
urasm
.
start
(),
urasm
.
end
(),
ur
.
start
(),
ur
.
end
()));
// In the case of CMS+ParNew, issue a warning
if
(
!
ur
.
contains
(
urasm
))
{
assert
(
UseConcMarkSweepGC
&&
UseParNewGC
,
"Tautology: see assert above"
);
warning
(
"CMS+ParNew: Did you forget to call save_marks()? "
"["
PTR_FORMAT
", "
PTR_FORMAT
") is not contained in "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
urasm
.
start
(),
urasm
.
end
(),
ur
.
start
(),
ur
.
end
());
MemRegion
ur2
=
sp
->
used_region
();
MemRegion
urasm2
=
sp
->
used_region_at_save_marks
();
if
(
!
ur
.
equals
(
ur2
))
{
warning
(
"CMS+ParNew: Flickering used_region()!!"
);
}
if
(
!
urasm
.
equals
(
urasm2
))
{
warning
(
"CMS+ParNew: Flickering used_region_at_save_marks()!!"
);
}
}
#endif
_ct_bs
->
non_clean_card_iterate_possibly_parallel
(
sp
,
urasm
,
dcto_cl
,
&
clear_cl
);
}
void
CardTableRS
::
clear_into_younger
(
Generation
*
gen
,
bool
clear_perm
)
{
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/cardTableRS.hpp
浏览文件 @
9c664186
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -166,4 +166,21 @@ public:
};
class
ClearNoncleanCardWrapper
:
public
MemRegionClosure
{
MemRegionClosure
*
_dirty_card_closure
;
CardTableRS
*
_ct
;
bool
_is_par
;
private:
// Clears the given card, return true if the corresponding card should be
// processed.
inline
bool
clear_card
(
jbyte
*
entry
);
// Work methods called by the clear_card()
inline
bool
clear_card_serial
(
jbyte
*
entry
);
inline
bool
clear_card_parallel
(
jbyte
*
entry
);
public:
ClearNoncleanCardWrapper
(
MemRegionClosure
*
dirty_card_closure
,
CardTableRS
*
ct
);
void
do_MemRegion
(
MemRegion
mr
);
};
#endif // SHARE_VM_MEMORY_CARDTABLERS_HPP
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/genCollectedHeap.hpp
浏览文件 @
9c664186
...
...
@@ -427,13 +427,13 @@ public:
// explicitly mark reachable objects in younger generations, to avoid
// excess storage retention.) If "collecting_perm_gen" is false, then
// roots that may only contain references to permGen objects are not
// scanned. The "so" argument determines which of the roots
// scanned; instead, the older_gens closure is applied to all outgoing
// references in the perm gen. The "so" argument determines which of the roots
// the closure is applied to:
// "SO_None" does none;
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
// "SO_SystemClasses" to all the "system" classes and loaders;
// "SO_Symbols_and_Strings" applies the closure to all entries in
// SymbolsTable and StringTable.
// "SO_Strings" applies the closure to all entries in the StringTable.
void
gen_process_strong_roots
(
int
level
,
bool
younger_gens_as_roots
,
// The remaining arguments are in an order
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/genOopClosures.hpp
浏览文件 @
9c664186
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -175,7 +175,7 @@ class VerifyOopClosure: public OopClosure {
protected:
template
<
class
T
>
inline
void
do_oop_work
(
T
*
p
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
guarantee
(
obj
->
is_oop_or_null
(),
"invalid oop"
);
guarantee
(
obj
->
is_oop_or_null
(),
err_msg
(
"invalid oop: "
INTPTR_FORMAT
,
obj
)
);
}
public:
virtual
void
do_oop
(
oop
*
p
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/sharedHeap.cpp
浏览文件 @
9c664186
...
...
@@ -46,7 +46,6 @@ enum SH_process_strong_roots_tasks {
SH_PS_Management_oops_do
,
SH_PS_SystemDictionary_oops_do
,
SH_PS_jvmti_oops_do
,
SH_PS_SymbolTable_oops_do
,
SH_PS_StringTable_oops_do
,
SH_PS_CodeCache_oops_do
,
// Leave this one last.
...
...
@@ -161,13 +160,9 @@ void SharedHeap::process_strong_roots(bool activate_scope,
if
(
!
_process_strong_tasks
->
is_task_claimed
(
SH_PS_SystemDictionary_oops_do
))
{
if
(
so
&
SO_AllClasses
)
{
SystemDictionary
::
oops_do
(
roots
);
}
else
if
(
so
&
SO_SystemClasses
)
{
SystemDictionary
::
always_strong_oops_do
(
roots
);
}
}
if
(
!
_process_strong_tasks
->
is_task_claimed
(
SH_PS_SymbolTable_oops_do
))
{
}
else
if
(
so
&
SO_SystemClasses
)
{
SystemDictionary
::
always_strong_oops_do
(
roots
);
}
}
if
(
!
_process_strong_tasks
->
is_task_claimed
(
SH_PS_StringTable_oops_do
))
{
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/memory/sharedHeap.hpp
浏览文件 @
9c664186
...
...
@@ -192,9 +192,8 @@ public:
SO_None
=
0x0
,
SO_AllClasses
=
0x1
,
SO_SystemClasses
=
0x2
,
SO_Symbols
=
0x4
,
SO_Strings
=
0x8
,
SO_CodeCache
=
0x10
SO_Strings
=
0x4
,
SO_CodeCache
=
0x8
};
FlexibleWorkGang
*
workers
()
const
{
return
_workers
;
}
...
...
@@ -208,14 +207,13 @@ public:
// Invoke the "do_oop" method the closure "roots" on all root locations.
// If "collecting_perm_gen" is false, then roots that may only contain
// references to permGen objects are not scanned
. If true, the
//
"perm_gen" closure is applied to all older-to-younger
refs in the
// references to permGen objects are not scanned
; instead, in that case,
//
the "perm_blk" closure is applied to all outgoing
refs in the
// permanent generation. The "so" argument determines which of roots
// the closure is applied to:
// "SO_None" does none;
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
// "SO_SystemClasses" to all the "system" classes and loaders;
// "SO_Symbols" applies the closure to all entries in SymbolsTable;
// "SO_Strings" applies the closure to all entries in StringTable;
// "SO_CodeCache" applies the closure to all elements of the CodeCache.
void
process_strong_roots
(
bool
activate_scope
,
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/oops/cpCacheOop.cpp
浏览文件 @
9c664186
...
...
@@ -104,7 +104,7 @@ void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
void
*
result
=
Atomic
::
cmpxchg_ptr
(
f1
,
f1_addr
,
NULL
);
bool
success
=
(
result
==
NULL
);
if
(
success
)
{
update_barrier_set
(
f1_addr
,
f1
);
update_barrier_set
(
(
void
*
)
f1_addr
,
f1
);
}
}
...
...
@@ -275,21 +275,23 @@ int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
return
(
int
)
bsm_cache_index
;
}
void
ConstantPoolCacheEntry
::
set_dynamic_call
(
Handle
call_site
,
methodHandle
signature_invoker
)
{
void
ConstantPoolCacheEntry
::
set_dynamic_call
(
Handle
call_site
,
methodHandle
signature_invoker
)
{
assert
(
is_secondary_entry
(),
""
);
// NOTE: it's important that all other values are set before f1 is
// set since some users short circuit on f1 being set
// (i.e. non-null) and that may result in uninitialized values for
// other racing threads (e.g. flags).
int
param_size
=
signature_invoker
->
size_of_parameters
();
assert
(
param_size
>=
1
,
"method argument size must include MH.this"
);
param_size
-=
1
;
// do not count MH.this; it is not stacked for invokedynamic
if
(
Atomic
::
cmpxchg_ptr
(
call_site
(),
&
_f1
,
NULL
)
==
NULL
)
{
// racing threads might be trying to install their own favorites
set_f1
(
call_site
());
}
param_size
-=
1
;
// do not count MH.this; it is not stacked for invokedynamic
bool
is_final
=
true
;
assert
(
signature_invoker
->
is_final_method
(),
"is_final"
);
set_flags
(
as_flags
(
as_TosState
(
signature_invoker
->
result_type
()),
is_final
,
false
,
false
,
false
,
true
)
|
param_size
);
int
flags
=
as_flags
(
as_TosState
(
signature_invoker
->
result_type
()),
is_final
,
false
,
false
,
false
,
true
)
|
param_size
;
assert
(
_flags
==
0
||
_flags
==
flags
,
"flags should be the same"
);
set_flags
(
flags
);
// do not do set_bytecode on a secondary CP cache entry
//set_bytecode_1(Bytecodes::_invokedynamic);
set_f1_if_null_atomic
(
call_site
());
// This must be the last one to set (see NOTE above)!
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/escape.cpp
浏览文件 @
9c664186
...
...
@@ -1437,7 +1437,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// Update the memory inputs of MemNodes with the value we computed
// in Phase 2 and move stores memory users to corresponding memory slices.
#ifdef ASSERT
// Disable memory split verification code until the fix for 6984348.
// Currently it produces false negative results since it does not cover all cases.
#if 0 // ifdef ASSERT
visited.Reset();
Node_Stack old_mems(arena, _compile->unique() >> 2);
#endif
...
...
@@ -1447,7 +1450,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
Node
*
n
=
ptnode_adr
(
i
)
->
_node
;
assert
(
n
!=
NULL
,
"sanity"
);
if
(
n
->
is_Mem
())
{
#ifdef ASSERT
#if
0 // if
def ASSERT
Node* old_mem = n->in(MemNode::Memory);
if (!visited.test_set(old_mem->_idx)) {
old_mems.push(old_mem, old_mem->outcnt());
...
...
@@ -1469,13 +1472,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
}
}
#ifdef ASSERT
#if
0 // if
def ASSERT
// Verify that memory was split correctly
while (old_mems.is_nonempty()) {
Node* old_mem = old_mems.node();
uint old_cnt = old_mems.index();
old_mems.pop();
assert
(
old_cnt
=
old_mem
->
outcnt
(),
"old mem could be lost"
);
assert(old_cnt =
=
old_mem->outcnt(), "old mem could be lost");
}
#endif
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/graphKit.cpp
浏览文件 @
9c664186
...
...
@@ -1033,14 +1033,10 @@ bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
iter
.
reset_to_bci
(
bci
());
iter
.
next
();
ciMethod
*
method
=
iter
.
get_method
(
ignore
);
inputs
=
method
->
arg_size_no_receiver
();
// Add a receiver argument, maybe:
if
(
code
!=
Bytecodes
::
_invokestatic
&&
code
!=
Bytecodes
::
_invokedynamic
)
inputs
+=
1
;
// (Do not use ciMethod::arg_size(), because
// it might be an unloaded method, which doesn't
// know whether it is static or not.)
inputs
=
method
->
invoke_arg_size
(
code
);
int
size
=
method
->
return_type
()
->
size
();
depth
=
size
-
inputs
;
}
...
...
@@ -2957,8 +2953,7 @@ static void hook_memory_on_init(GraphKit& kit, int alias_idx,
//---------------------------set_output_for_allocation-------------------------
Node
*
GraphKit
::
set_output_for_allocation
(
AllocateNode
*
alloc
,
const
TypeOopPtr
*
oop_type
,
bool
raw_mem_only
)
{
const
TypeOopPtr
*
oop_type
)
{
int
rawidx
=
Compile
::
AliasIdxRaw
;
alloc
->
set_req
(
TypeFunc
::
FramePtr
,
frameptr
()
);
add_safepoint_edges
(
alloc
);
...
...
@@ -2982,7 +2977,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
rawoop
)
->
as_Initialize
();
assert
(
alloc
->
initialization
()
==
init
,
"2-way macro link must work"
);
assert
(
init
->
allocation
()
==
alloc
,
"2-way macro link must work"
);
if
(
ReduceFieldZeroing
&&
!
raw_mem_only
)
{
{
// Extract memory strands which may participate in the new object's
// initialization, and source them from the new InitializeNode.
// This will allow us to observe initializations when they occur,
...
...
@@ -3043,11 +3038,9 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
// the type to a constant.
// The optional arguments are for specialized use by intrinsics:
// - If 'extra_slow_test' if not null is an extra condition for the slow-path.
// - If 'raw_mem_only', do not cast the result to an oop.
// - If 'return_size_val', report the the total object size to the caller.
Node
*
GraphKit
::
new_instance
(
Node
*
klass_node
,
Node
*
extra_slow_test
,
bool
raw_mem_only
,
// affect only raw memory
Node
*
*
return_size_val
)
{
// Compute size in doublewords
// The size is always an integral number of doublewords, represented
...
...
@@ -3118,7 +3111,7 @@ Node* GraphKit::new_instance(Node* klass_node,
size
,
klass_node
,
initial_slow_test
);
return
set_output_for_allocation
(
alloc
,
oop_type
,
raw_mem_only
);
return
set_output_for_allocation
(
alloc
,
oop_type
);
}
//-------------------------------new_array-------------------------------------
...
...
@@ -3128,7 +3121,6 @@ Node* GraphKit::new_instance(Node* klass_node,
Node
*
GraphKit
::
new_array
(
Node
*
klass_node
,
// array klass (maybe variable)
Node
*
length
,
// number of array elements
int
nargs
,
// number of arguments to push back for uncommon trap
bool
raw_mem_only
,
// affect only raw memory
Node
*
*
return_size_val
)
{
jint
layout_con
=
Klass
::
_lh_neutral_value
;
Node
*
layout_val
=
get_layout_helper
(
klass_node
,
layout_con
);
...
...
@@ -3273,7 +3265,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
ary_type
=
ary_type
->
is_aryptr
()
->
cast_to_size
(
length_type
);
}
Node
*
javaoop
=
set_output_for_allocation
(
alloc
,
ary_type
,
raw_mem_only
);
Node
*
javaoop
=
set_output_for_allocation
(
alloc
,
ary_type
);
// Cast length on remaining path to be as narrow as possible
if
(
map
()
->
find_edge
(
length
)
>=
0
)
{
...
...
@@ -3462,9 +3454,22 @@ void GraphKit::write_barrier_post(Node* oop_store,
// Get the alias_index for raw card-mark memory
int
adr_type
=
Compile
::
AliasIdxRaw
;
// Smash zero into card
Node
*
zero
=
__
ConI
(
0
);
Node
*
zero
=
__
ConI
(
0
);
// Dirty card value
BasicType
bt
=
T_BYTE
;
if
(
UseCondCardMark
)
{
// The classic GC reference write barrier is typically implemented
// as a store into the global card mark table. Unfortunately
// unconditional stores can result in false sharing and excessive
// coherence traffic as well as false transactional aborts.
// UseCondCardMark enables MP "polite" conditional card mark
// stores. In theory we could relax the load from ctrl() to
// no_ctrl, but that doesn't buy much latitude.
Node
*
card_val
=
__
load
(
__
ctrl
(),
card_adr
,
TypeInt
::
BYTE
,
bt
,
adr_type
);
__
if_then
(
card_val
,
BoolTest
::
ne
,
zero
);
}
// Smash zero into card
if
(
!
UseConcMarkSweepGC
)
{
__
store
(
__
ctrl
(),
card_adr
,
zero
,
bt
,
adr_type
);
}
else
{
...
...
@@ -3472,6 +3477,10 @@ void GraphKit::write_barrier_post(Node* oop_store,
__
storeCM
(
__
ctrl
(),
card_adr
,
zero
,
oop_store
,
adr_idx
,
bt
,
adr_type
);
}
if
(
UseCondCardMark
)
{
__
end_if
();
}
// Final sync IdealKit and GraphKit.
final_sync
(
ideal
);
}
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/graphKit.hpp
浏览文件 @
9c664186
...
...
@@ -773,15 +773,13 @@ class GraphKit : public Phase {
// implementation of object creation
Node
*
set_output_for_allocation
(
AllocateNode
*
alloc
,
const
TypeOopPtr
*
oop_type
,
bool
raw_mem_only
);
const
TypeOopPtr
*
oop_type
);
Node
*
get_layout_helper
(
Node
*
klass_node
,
jint
&
constant_value
);
Node
*
new_instance
(
Node
*
klass_node
,
Node
*
slow_test
=
NULL
,
bool
raw_mem_only
=
false
,
Node
*
*
return_size_val
=
NULL
);
Node
*
new_array
(
Node
*
klass_node
,
Node
*
count_val
,
int
nargs
,
bool
raw_mem_only
=
false
,
Node
*
*
return_size_val
=
NULL
);
Node
*
*
return_size_val
=
NULL
);
// Handy for making control flow
IfNode
*
create_and_map_if
(
Node
*
ctrl
,
Node
*
tst
,
float
prob
,
float
cnt
)
{
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/library_call.cpp
浏览文件 @
9c664186
...
...
@@ -3527,8 +3527,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
Node
*
orig_tail
=
_gvn
.
transform
(
new
(
C
,
3
)
SubINode
(
orig_length
,
start
)
);
Node
*
moved
=
generate_min_max
(
vmIntrinsics
::
_min
,
orig_tail
,
length
);
const
bool
raw_mem_only
=
true
;
newcopy
=
new_array
(
klass_node
,
length
,
0
,
raw_mem_only
);
newcopy
=
new_array
(
klass_node
,
length
,
0
);
// Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the
...
...
@@ -4325,8 +4324,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
const
TypePtr
*
raw_adr_type
=
TypeRawPtr
::
BOTTOM
;
int
raw_adr_idx
=
Compile
::
AliasIdxRaw
;
const
bool
raw_mem_only
=
true
;
Node
*
array_ctl
=
generate_array_guard
(
obj_klass
,
(
RegionNode
*
)
NULL
);
if
(
array_ctl
!=
NULL
)
{
...
...
@@ -4335,8 +4332,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
set_control
(
array_ctl
);
Node
*
obj_length
=
load_array_length
(
obj
);
Node
*
obj_size
=
NULL
;
Node
*
alloc_obj
=
new_array
(
obj_klass
,
obj_length
,
0
,
raw_mem_only
,
&
obj_size
);
Node
*
alloc_obj
=
new_array
(
obj_klass
,
obj_length
,
0
,
&
obj_size
);
if
(
!
use_ReduceInitialCardMarks
())
{
// If it is an oop array, it requires very special treatment,
...
...
@@ -4408,7 +4404,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
// It's an instance, and it passed the slow-path tests.
PreserveJVMState
pjvms
(
this
);
Node
*
obj_size
=
NULL
;
Node
*
alloc_obj
=
new_instance
(
obj_klass
,
NULL
,
raw_mem_only
,
&
obj_size
);
Node
*
alloc_obj
=
new_instance
(
obj_klass
,
NULL
,
&
obj_size
);
copy_to_clone
(
obj
,
alloc_obj
,
obj_size
,
false
,
!
use_ReduceInitialCardMarks
());
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/loopopts.cpp
浏览文件 @
9c664186
...
...
@@ -2262,6 +2262,9 @@ bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& p
// stmt1
// |
// v
// loop predicate
// |
// v
// stmt2 clone
// |
// v
...
...
@@ -2272,9 +2275,6 @@ bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& p
// : false true
// : | |
// : | v
// : | loop predicate
// : | |
// : | v
// : | newloop<-----+
// : | | |
// : | stmt3 clone |
...
...
@@ -2330,7 +2330,6 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
}
}
Node
*
entry
=
head
->
in
(
LoopNode
::
EntryControl
);
int
dd
=
dom_depth
(
head
);
// Step 1: find cut point
...
...
@@ -2627,8 +2626,6 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Backedge of the surviving new_head (the clone) is original last_peel
_igvn
.
hash_delete
(
new_head_clone
);
Node
*
new_entry
=
move_loop_predicates
(
entry
,
new_head_clone
->
in
(
LoopNode
::
EntryControl
));
new_head_clone
->
set_req
(
LoopNode
::
EntryControl
,
new_entry
);
new_head_clone
->
set_req
(
LoopNode
::
LoopBackControl
,
last_peel
);
_igvn
.
_worklist
.
push
(
new_head_clone
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/macro.cpp
浏览文件 @
9c664186
...
...
@@ -221,9 +221,16 @@ void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
Node
*
shift
=
p2x
->
unique_out
();
Node
*
addp
=
shift
->
unique_out
();
for
(
DUIterator_Last
jmin
,
j
=
addp
->
last_outs
(
jmin
);
j
>=
jmin
;
--
j
)
{
Node
*
st
=
addp
->
last_out
(
j
);
assert
(
st
->
is_Store
(),
"store required"
);
_igvn
.
replace_node
(
st
,
st
->
in
(
MemNode
::
Memory
));
Node
*
mem
=
addp
->
last_out
(
j
);
if
(
UseCondCardMark
&&
mem
->
is_Load
())
{
assert
(
mem
->
Opcode
()
==
Op_LoadB
,
"unexpected code shape"
);
// The load is checking if the card has been written so
// replace it with zero to fold the test.
_igvn
.
replace_node
(
mem
,
intcon
(
0
));
continue
;
}
assert
(
mem
->
is_Store
(),
"store required"
);
_igvn
.
replace_node
(
mem
,
mem
->
in
(
MemNode
::
Memory
));
}
}
else
{
// G1 pre/post barriers
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/memnode.cpp
浏览文件 @
9c664186
...
...
@@ -1259,15 +1259,18 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
return
NULL
;
// Wait stable graph
}
uint
cnt
=
mem
->
req
();
for
(
uint
i
=
1
;
i
<
cnt
;
i
++
)
{
for
(
uint
i
=
1
;
i
<
cnt
;
i
++
)
{
Node
*
rc
=
region
->
in
(
i
);
if
(
rc
==
NULL
||
phase
->
type
(
rc
)
==
Type
::
TOP
)
return
NULL
;
// Wait stable graph
Node
*
in
=
mem
->
in
(
i
);
if
(
in
==
NULL
)
{
if
(
in
==
NULL
)
{
return
NULL
;
// Wait stable graph
}
}
// Check for loop invariant.
if
(
cnt
==
3
)
{
for
(
uint
i
=
1
;
i
<
cnt
;
i
++
)
{
for
(
uint
i
=
1
;
i
<
cnt
;
i
++
)
{
Node
*
in
=
mem
->
in
(
i
);
Node
*
m
=
MemNode
::
optimize_memory_chain
(
in
,
addr_t
,
phase
);
if
(
m
==
mem
)
{
...
...
@@ -1281,38 +1284,37 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// Do nothing here if Identity will find a value
// (to avoid infinite chain of value phis generation).
if
(
!
phase
->
eqv
(
this
,
this
->
Identity
(
phase
))
)
if
(
!
phase
->
eqv
(
this
,
this
->
Identity
(
phase
))
)
return
NULL
;
// Skip the split if the region dominates some control edge of the address.
if
(
cnt
==
3
&&
!
MemNode
::
all_controls_dominate
(
address
,
region
))
if
(
!
MemNode
::
all_controls_dominate
(
address
,
region
))
return
NULL
;
const
Type
*
this_type
=
this
->
bottom_type
();
int
this_index
=
phase
->
C
->
get_alias_index
(
addr_t
);
int
this_offset
=
addr_t
->
offset
();
int
this_iid
=
addr_t
->
is_oopptr
()
->
instance_id
();
int
wins
=
0
;
PhaseIterGVN
*
igvn
=
phase
->
is_IterGVN
();
Node
*
phi
=
new
(
igvn
->
C
,
region
->
req
())
PhiNode
(
region
,
this_type
,
NULL
,
this_iid
,
this_index
,
this_offset
);
for
(
uint
i
=
1
;
i
<
region
->
req
();
i
++
)
{
for
(
uint
i
=
1
;
i
<
region
->
req
();
i
++
)
{
Node
*
x
;
Node
*
the_clone
=
NULL
;
if
(
region
->
in
(
i
)
==
phase
->
C
->
top
()
)
{
if
(
region
->
in
(
i
)
==
phase
->
C
->
top
()
)
{
x
=
phase
->
C
->
top
();
// Dead path? Use a dead data op
}
else
{
x
=
this
->
clone
();
// Else clone up the data op
the_clone
=
x
;
// Remember for possible deletion.
// Alter data node to use pre-phi inputs
if
(
this
->
in
(
0
)
==
region
)
{
x
->
set_req
(
0
,
region
->
in
(
i
)
);
if
(
this
->
in
(
0
)
==
region
)
{
x
->
set_req
(
0
,
region
->
in
(
i
)
);
}
else
{
x
->
set_req
(
0
,
NULL
);
x
->
set_req
(
0
,
NULL
);
}
for
(
uint
j
=
1
;
j
<
this
->
req
();
j
++
)
{
for
(
uint
j
=
1
;
j
<
this
->
req
();
j
++
)
{
Node
*
in
=
this
->
in
(
j
);
if
(
in
->
is_Phi
()
&&
in
->
in
(
0
)
==
region
)
x
->
set_req
(
j
,
in
->
in
(
i
)
);
// Use pre-Phi input for the clone
if
(
in
->
is_Phi
()
&&
in
->
in
(
0
)
==
region
)
x
->
set_req
(
j
,
in
->
in
(
i
)
);
// Use pre-Phi input for the clone
}
}
// Check for a 'win' on some paths
...
...
@@ -1321,12 +1323,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
bool
singleton
=
t
->
singleton
();
// See comments in PhaseIdealLoop::split_thru_phi().
if
(
singleton
&&
t
==
Type
::
TOP
)
{
if
(
singleton
&&
t
==
Type
::
TOP
)
{
singleton
&=
region
->
is_Loop
()
&&
(
i
!=
LoopNode
::
EntryControl
);
}
if
(
singleton
)
{
wins
++
;
if
(
singleton
)
{
x
=
igvn
->
makecon
(
t
);
}
else
{
// We now call Identity to try to simplify the cloned node.
...
...
@@ -1340,13 +1341,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// igvn->type(x) is set to x->Value() already.
x
->
raise_bottom_type
(
t
);
Node
*
y
=
x
->
Identity
(
igvn
);
if
(
y
!=
x
)
{
wins
++
;
if
(
y
!=
x
)
{
x
=
y
;
}
else
{
y
=
igvn
->
hash_find
(
x
);
if
(
y
)
{
wins
++
;
if
(
y
)
{
x
=
y
;
}
else
{
// Else x is a new node we are keeping
...
...
@@ -1360,13 +1359,9 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
igvn
->
remove_dead_node
(
the_clone
);
phi
->
set_req
(
i
,
x
);
}
if
(
wins
>
0
)
{
// Record Phi
igvn
->
register_new_node_with_optimizer
(
phi
);
return
phi
;
}
igvn
->
remove_dead_node
(
phi
);
return
NULL
;
// Record Phi
igvn
->
register_new_node_with_optimizer
(
phi
);
return
phi
;
}
//------------------------------Ideal------------------------------------------
...
...
@@ -1677,14 +1672,15 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// If we are loading from a freshly-allocated object, produce a zero,
// if the load is provably beyond the header of the object.
// (Also allow a variable load from a fresh array to produce zero.)
if
(
ReduceFieldZeroing
)
{
const
TypeOopPtr
*
tinst
=
tp
->
isa_oopptr
();
bool
is_instance
=
(
tinst
!=
NULL
)
&&
tinst
->
is_known_instance_field
();
if
(
ReduceFieldZeroing
||
is_instance
)
{
Node
*
value
=
can_see_stored_value
(
mem
,
phase
);
if
(
value
!=
NULL
&&
value
->
is_Con
())
return
value
->
bottom_type
();
}
const
TypeOopPtr
*
tinst
=
tp
->
isa_oopptr
();
if
(
tinst
!=
NULL
&&
tinst
->
is_known_instance_field
())
{
if
(
is_instance
)
{
// If we have an instance type and our memory input is the
// programs's initial memory state, there is no matching store,
// so just return a zero of the appropriate type
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/opto/stringopts.cpp
浏览文件 @
9c664186
...
...
@@ -1172,16 +1172,16 @@ void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, N
Node
*
PhaseStringOpts
::
copy_string
(
GraphKit
&
kit
,
Node
*
str
,
Node
*
char_array
,
Node
*
start
)
{
Node
*
string
=
str
;
Node
*
offset
=
kit
.
make_load
(
NULL
,
Node
*
offset
=
kit
.
make_load
(
kit
.
control
()
,
kit
.
basic_plus_adr
(
string
,
string
,
java_lang_String
::
offset_offset_in_bytes
()),
TypeInt
::
INT
,
T_INT
,
offset_field_idx
);
Node
*
count
=
kit
.
make_load
(
NULL
,
Node
*
count
=
kit
.
make_load
(
kit
.
control
()
,
kit
.
basic_plus_adr
(
string
,
string
,
java_lang_String
::
count_offset_in_bytes
()),
TypeInt
::
INT
,
T_INT
,
count_field_idx
);
const
TypeAryPtr
*
value_type
=
TypeAryPtr
::
make
(
TypePtr
::
NotNull
,
TypeAry
::
make
(
TypeInt
::
CHAR
,
TypeInt
::
POS
),
ciTypeArrayKlass
::
make
(
T_CHAR
),
true
,
0
);
Node
*
value
=
kit
.
make_load
(
NULL
,
Node
*
value
=
kit
.
make_load
(
kit
.
control
()
,
kit
.
basic_plus_adr
(
string
,
string
,
java_lang_String
::
value_offset_in_bytes
()),
value_type
,
T_OBJECT
,
value_field_idx
);
...
...
@@ -1342,7 +1342,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
}
// Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
// TypeInt::INT, T_INT, offset_field_idx);
Node
*
count
=
kit
.
make_load
(
NULL
,
kit
.
basic_plus_adr
(
arg
,
arg
,
java_lang_String
::
count_offset_in_bytes
()),
Node
*
count
=
kit
.
make_load
(
kit
.
control
()
,
kit
.
basic_plus_adr
(
arg
,
arg
,
java_lang_String
::
count_offset_in_bytes
()),
TypeInt
::
INT
,
T_INT
,
count_field_idx
);
length
=
__
AddI
(
length
,
count
);
string_sizes
->
init_req
(
argi
,
NULL
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/prims/methodHandleWalk.cpp
浏览文件 @
9c664186
...
...
@@ -82,10 +82,8 @@ void MethodHandleChain::set_method_handle(Handle mh, TRAPS) {
void
MethodHandleChain
::
set_last_method
(
oop
target
,
TRAPS
)
{
_is_last
=
true
;
klassOop
receiver_limit_oop
=
NULL
;
int
flags
=
0
;
methodOop
m
=
MethodHandles
::
decode_method
(
target
,
receiver_limit_oop
,
flags
);
_last_method
=
methodHandle
(
THREAD
,
m
);
KlassHandle
receiver_limit
;
int
flags
=
0
;
_last_method
=
MethodHandles
::
decode_method
(
target
,
receiver_limit
,
flags
);
if
((
flags
&
MethodHandles
::
_dmf_has_receiver
)
==
0
)
_last_invoke
=
Bytecodes
::
_invokestatic
;
else
if
((
flags
&
MethodHandles
::
_dmf_does_dispatch
)
==
0
)
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/prims/methodHandles.cpp
浏览文件 @
9c664186
此差异已折叠。
点击以展开。
hotspot/src/share/vm/prims/methodHandles.hpp
浏览文件 @
9c664186
...
...
@@ -265,13 +265,13 @@ class MethodHandles: AllStatic {
static
inline
address
from_interpreted_entry
(
EntryKind
ek
);
// helpers for decode_method.
static
methodOop
decode_methodOop
(
methodOop
m
,
int
&
decode_flags_result
);
static
method
Oop
decode_vmtarget
(
oop
vmtarget
,
int
vmindex
,
oop
mtype
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Oop
decode_MemberName
(
oop
mname
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Oop
decode_MethodHandle
(
oop
mh
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Oop
decode_DirectMethodHandle
(
oop
mh
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Oop
decode_BoundMethodHandle
(
oop
mh
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Oop
decode_AdapterMethodHandle
(
oop
mh
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
methodOop
decode_methodOop
(
methodOop
m
,
int
&
decode_flags_result
);
static
method
Handle
decode_vmtarget
(
oop
vmtarget
,
int
vmindex
,
oop
mtype
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_MemberName
(
oop
mname
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_MethodHandle
(
oop
mh
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_DirectMethodHandle
(
oop
mh
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_BoundMethodHandle
(
oop
mh
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_AdapterMethodHandle
(
oop
mh
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
// Find out how many stack slots an mh pushes or pops.
// The result is *not* reported as a multiple of stack_move_unit();
...
...
@@ -317,7 +317,7 @@ class MethodHandles: AllStatic {
_dmf_adapter_lsb
=
0x20
,
_DMF_ADAPTER_MASK
=
(
_dmf_adapter_lsb
<<
CONV_OP_LIMIT
)
-
_dmf_adapter_lsb
};
static
method
Oop
decode_method
(
oop
x
,
klassOop
&
receiver_limit_result
,
int
&
decode_flags_result
);
static
method
Handle
decode_method
(
oop
x
,
KlassHandle
&
receiver_limit_result
,
int
&
decode_flags_result
);
enum
{
// format of query to getConstant:
GC_JVM_PUSH_LIMIT
=
0
,
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/runtime/globals.hpp
浏览文件 @
9c664186
...
...
@@ -620,6 +620,9 @@ class CommandLineFlags {
product(bool, UseSSE42Intrinsics, false, \
"SSE4.2 versions of intrinsics") \
\
product(bool, UseCondCardMark, false, \
"Check for already marked card before updating card table") \
\
develop(bool, TraceCallFixup, false, \
"traces all call fixups") \
\
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/runtime/javaCalls.cpp
浏览文件 @
9c664186
...
...
@@ -389,7 +389,7 @@ void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArgument
// to Java
if
(
!
os
::
stack_shadow_pages_available
(
THREAD
,
method
))
{
// Throw stack overflow exception with preinitialized exception.
Exceptions
::
throw_stack_overflow_exception
(
THREAD
,
__FILE__
,
__LINE__
);
Exceptions
::
throw_stack_overflow_exception
(
THREAD
,
__FILE__
,
__LINE__
,
method
);
return
;
}
else
{
// Touch pages checked if the OS needs them to be touched to be mapped.
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/runtime/sharedRuntime.cpp
浏览文件 @
9c664186
...
...
@@ -1721,14 +1721,14 @@ char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
targetArity
=
ArgumentCount
(
target
->
signature
()).
size
();
}
}
klassOop
kignore
;
int
dmf_flags
=
0
;
method
Oop
actual_method
=
MethodHandles
::
decode_method
(
actual
,
kignore
,
dmf_flags
);
KlassHandle
kignore
;
int
dmf_flags
=
0
;
method
Handle
actual_method
=
MethodHandles
::
decode_method
(
actual
,
kignore
,
dmf_flags
);
if
((
dmf_flags
&
~
(
MethodHandles
::
_dmf_has_receiver
|
MethodHandles
::
_dmf_does_dispatch
|
MethodHandles
::
_dmf_from_interface
))
!=
0
)
actual_method
=
NULL
;
// MH does extra binds, drops, etc.
actual_method
=
methodHandle
()
;
// MH does extra binds, drops, etc.
bool
has_receiver
=
((
dmf_flags
&
MethodHandles
::
_dmf_has_receiver
)
!=
0
);
if
(
actual_method
!=
NULL
)
{
if
(
actual_method
.
not_null
()
)
{
mhName
=
actual_method
->
signature
()
->
as_C_string
();
mhArity
=
ArgumentCount
(
actual_method
->
signature
()).
size
();
if
(
!
actual_method
->
is_static
())
mhArity
+=
1
;
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/runtime/vmThread.cpp
浏览文件 @
9c664186
...
...
@@ -291,7 +291,9 @@ void VMThread::run() {
// Among other things, this ensures that Eden top is correct.
Universe
::
heap
()
->
prepare_for_verify
();
os
::
check_heap
();
Universe
::
verify
(
true
,
true
);
// Silent verification to not polute normal output
// Silent verification so as not to pollute normal output,
// unless we really asked for it.
Universe
::
verify
(
true
,
!
(
PrintGCDetails
||
Verbose
));
}
CompileBroker
::
set_should_block
();
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/services/g1MemoryPool.cpp
浏览文件 @
9c664186
/*
* Copyright (c) 2007, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -34,10 +34,10 @@ G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
size_t
init_size
,
bool
support_usage_threshold
)
:
_g1h
(
g1h
),
CollectedMemoryPool
(
name
,
MemoryPool
::
Heap
,
init_size
,
undefined_max
(),
support_usage_threshold
)
{
MemoryPool
::
Heap
,
init_size
,
undefined_max
(),
support_usage_threshold
)
{
assert
(
UseG1GC
,
"sanity"
);
}
...
...
@@ -48,44 +48,27 @@ size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
// See the comment at the top of g1MemoryPool.hpp
size_t
G1MemoryPoolSuper
::
eden_space_used
(
G1CollectedHeap
*
g1h
)
{
size_t
young_list_length
=
g1h
->
young_list
()
->
length
();
size_t
eden_used
=
young_list_length
*
HeapRegion
::
GrainBytes
;
size_t
survivor_used
=
survivor_space_used
(
g1h
);
eden_used
=
subtract_up_to_zero
(
eden_used
,
survivor_used
);
return
eden_used
;
return
g1h
->
g1mm
()
->
eden_space_used
();
}
// See the comment at the top of g1MemoryPool.hpp
size_t
G1MemoryPoolSuper
::
survivor_space_committed
(
G1CollectedHeap
*
g1h
)
{
return
MAX2
(
survivor_space_used
(
g1h
),
(
size_t
)
HeapRegion
::
GrainBytes
);
return
g1h
->
g1mm
()
->
survivor_space_committed
(
);
}
// See the comment at the top of g1MemoryPool.hpp
size_t
G1MemoryPoolSuper
::
survivor_space_used
(
G1CollectedHeap
*
g1h
)
{
size_t
survivor_num
=
g1h
->
g1_policy
()
->
recorded_survivor_regions
();
size_t
survivor_used
=
survivor_num
*
HeapRegion
::
GrainBytes
;
return
survivor_used
;
return
g1h
->
g1mm
()
->
survivor_space_used
();
}
// See the comment at the top of g1MemoryPool.hpp
size_t
G1MemoryPoolSuper
::
old_space_committed
(
G1CollectedHeap
*
g1h
)
{
size_t
committed
=
overall_committed
(
g1h
);
size_t
eden_committed
=
eden_space_committed
(
g1h
);
size_t
survivor_committed
=
survivor_space_committed
(
g1h
);
committed
=
subtract_up_to_zero
(
committed
,
eden_committed
);
committed
=
subtract_up_to_zero
(
committed
,
survivor_committed
);
committed
=
MAX2
(
committed
,
(
size_t
)
HeapRegion
::
GrainBytes
);
return
committed
;
return
g1h
->
g1mm
()
->
old_space_committed
();
}
// See the comment at the top of g1MemoryPool.hpp
size_t
G1MemoryPoolSuper
::
old_space_used
(
G1CollectedHeap
*
g1h
)
{
size_t
used
=
overall_used
(
g1h
);
size_t
eden_used
=
eden_space_used
(
g1h
);
size_t
survivor_used
=
survivor_space_used
(
g1h
);
used
=
subtract_up_to_zero
(
used
,
eden_used
);
used
=
subtract_up_to_zero
(
used
,
survivor_used
);
return
used
;
return
g1h
->
g1mm
()
->
old_space_used
();
}
G1EdenPool
::
G1EdenPool
(
G1CollectedHeap
*
g1h
)
:
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/services/g1MemoryPool.hpp
浏览文件 @
9c664186
/*
* Copyright (c) 2007, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -46,68 +46,9 @@ class G1CollectedHeap;
// get, as this does affect the performance and behavior of G1. Which
// is why we introduce the three memory pools implemented here.
//
//
The above approach inroduces a couple of challenging issues in the
//
implementation of the three memory pools:
//
See comments in g1MonitoringSupport.hpp for additional details
//
on this model.
//
// 1) The used space calculation for a pool is not necessarily
// independent of the others. We can easily get from G1 the overall
// used space in the entire heap, the number of regions in the young
// generation (includes both eden and survivors), and the number of
// survivor regions. So, from that we calculate:
//
// survivor_used = survivor_num * region_size
// eden_used = young_region_num * region_size - survivor_used
// old_gen_used = overall_used - eden_used - survivor_used
//
// Note that survivor_used and eden_used are upper bounds. To get the
// actual value we would have to iterate over the regions and add up
// ->used(). But that'd be expensive. So, we'll accept some lack of
// accuracy for those two. But, we have to be careful when calculating
// old_gen_used, in case we subtract from overall_used more then the
// actual number and our result goes negative.
//
// 2) Calculating the used space is straightforward, as described
// above. However, how do we calculate the committed space, given that
// we allocate space for the eden, survivor, and old gen out of the
// same pool of regions? One way to do this is to use the used value
// as also the committed value for the eden and survivor spaces and
// then calculate the old gen committed space as follows:
//
// old_gen_committed = overall_committed - eden_committed - survivor_committed
//
// Maybe a better way to do that would be to calculate used for eden
// and survivor as a sum of ->used() over their regions and then
// calculate committed as region_num * region_size (i.e., what we use
// to calculate the used space now). This is something to consider
// in the future.
//
// 3) Another decision that is again not straightforward is what is
// the max size that each memory pool can grow to. One way to do this
// would be to use the committed size for the max for the eden and
// survivors and calculate the old gen max as follows (basically, it's
// a similar pattern to what we use for the committed space, as
// described above):
//
// old_gen_max = overall_max - eden_max - survivor_max
//
// Unfortunately, the above makes the max of each pool fluctuate over
// time and, even though this is allowed according to the spec, it
// broke several assumptions in the M&M framework (there were cases
// where used would reach a value greater than max). So, for max we
// use -1, which means "undefined" according to the spec.
//
// 4) Now, there is a very subtle issue with all the above. The
// framework will call get_memory_usage() on the three pools
// asynchronously. As a result, each call might get a different value
// for, say, survivor_num which will yield inconsistent values for
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
// in the calculation of all three). This would normally be
// ok. However, it's possible that this might cause the sum of
// eden_used, survivor_used, and old_gen_used to go over the max heap
// size and this seems to sometimes cause JConsole (and maybe other
// clients) to get confused. There's not a really an easy / clean
// solution to this problem, due to the asynchrounous nature of the
// framework.
// This class is shared by the three G1 memory pool classes
...
...
@@ -116,22 +57,6 @@ class G1CollectedHeap;
// (see comment above), we put the calculations in this class so that
// we can easily share them among the subclasses.
class
G1MemoryPoolSuper
:
public
CollectedMemoryPool
{
private:
// It returns x - y if x > y, 0 otherwise.
// As described in the comment above, some of the inputs to the
// calculations we have to do are obtained concurrently and hence
// may be inconsistent with each other. So, this provides a
// defensive way of performing the subtraction and avoids the value
// going negative (which would mean a very large result, given that
// the parameter are size_t).
static
size_t
subtract_up_to_zero
(
size_t
x
,
size_t
y
)
{
if
(
x
>
y
)
{
return
x
-
y
;
}
else
{
return
0
;
}
}
protected:
G1CollectedHeap
*
_g1h
;
...
...
@@ -148,13 +73,6 @@ protected:
return
(
size_t
)
-
1
;
}
static
size_t
overall_committed
(
G1CollectedHeap
*
g1h
)
{
return
g1h
->
capacity
();
}
static
size_t
overall_used
(
G1CollectedHeap
*
g1h
)
{
return
g1h
->
used_unlocked
();
}
static
size_t
eden_space_committed
(
G1CollectedHeap
*
g1h
);
static
size_t
eden_space_used
(
G1CollectedHeap
*
g1h
);
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/utilities/exceptions.cpp
浏览文件 @
9c664186
...
...
@@ -207,7 +207,7 @@ void Exceptions::_throw_args(Thread* thread, const char* file, int line, Symbol*
}
void
Exceptions
::
throw_stack_overflow_exception
(
Thread
*
THREAD
,
const
char
*
file
,
int
line
)
{
void
Exceptions
::
throw_stack_overflow_exception
(
Thread
*
THREAD
,
const
char
*
file
,
int
line
,
methodHandle
method
)
{
Handle
exception
;
if
(
!
THREAD
->
has_pending_exception
())
{
klassOop
k
=
SystemDictionary
::
StackOverflowError_klass
();
...
...
@@ -215,13 +215,13 @@ void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file
exception
=
Handle
(
THREAD
,
e
);
// fill_in_stack trace does gc
assert
(
instanceKlass
::
cast
(
k
)
->
is_initialized
(),
"need to increase min_stack_allowed calculation"
);
if
(
StackTraceInThrowable
)
{
java_lang_Throwable
::
fill_in_stack_trace
(
exception
);
java_lang_Throwable
::
fill_in_stack_trace
(
exception
,
method
()
);
}
}
else
{
// if prior exception, throw that one instead
exception
=
Handle
(
THREAD
,
THREAD
->
pending_exception
());
}
_throw
_oop
(
THREAD
,
file
,
line
,
exception
()
);
_throw
(
THREAD
,
file
,
line
,
exception
);
}
void
Exceptions
::
fthrow
(
Thread
*
thread
,
const
char
*
file
,
int
line
,
Symbol
*
h_name
,
const
char
*
format
,
...)
{
...
...
This diff is collapsed.
Click to expand it.
hotspot/src/share/vm/utilities/exceptions.hpp
浏览文件 @
9c664186
...
...
@@ -144,7 +144,7 @@ class Exceptions {
const
char
*
message
,
ExceptionMsgToUtf8Mode
to_utf8_safe
=
safe_to_utf8
);
static
void
throw_stack_overflow_exception
(
Thread
*
thread
,
const
char
*
file
,
int
line
);
static
void
throw_stack_overflow_exception
(
Thread
*
thread
,
const
char
*
file
,
int
line
,
methodHandle
method
);
// for AbortVMOnException flag
NOT_PRODUCT
(
static
void
debug_check_abort
(
Handle
exception
,
const
char
*
message
=
NULL
);)
...
...
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录