Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
fd7608b0
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fd7608b0
编写于
10月 09, 2008
作者:
T
tonyp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
d732eca9
b3b6893b
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
196 addition
and
12 deletion
+196
-12
src/os/linux/vm/globals_linux.hpp
src/os/linux/vm/globals_linux.hpp
+1
-0
src/os/solaris/vm/globals_solaris.hpp
src/os/solaris/vm/globals_solaris.hpp
+1
-0
src/os/windows/vm/globals_windows.hpp
src/os/windows/vm/globals_windows.hpp
+1
-0
src/os/windows/vm/os_windows.cpp
src/os/windows/vm/os_windows.cpp
+113
-5
src/os/windows/vm/os_windows.hpp
src/os/windows/vm/os_windows.hpp
+4
-0
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+59
-5
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+3
-0
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+2
-1
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+3
-1
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+9
-0
未找到文件。
src/os/linux/vm/globals_linux.hpp
浏览文件 @
fd7608b0
...
...
@@ -38,5 +38,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global
(
bool
,
UseLargePages
,
false
);
define_pd_global
(
bool
,
UseLargePagesIndividualAllocation
,
false
);
define_pd_global
(
bool
,
UseOSErrorReporting
,
false
);
define_pd_global
(
bool
,
UseThreadPriorities
,
true
)
;
src/os/solaris/vm/globals_solaris.hpp
浏览文件 @
fd7608b0
...
...
@@ -44,5 +44,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global
(
bool
,
UseLargePages
,
true
);
define_pd_global
(
bool
,
UseLargePagesIndividualAllocation
,
false
);
define_pd_global
(
bool
,
UseOSErrorReporting
,
false
);
define_pd_global
(
bool
,
UseThreadPriorities
,
false
);
src/os/windows/vm/globals_windows.hpp
浏览文件 @
fd7608b0
...
...
@@ -37,5 +37,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global
(
bool
,
UseLargePages
,
false
);
define_pd_global
(
bool
,
UseLargePagesIndividualAllocation
,
true
);
define_pd_global
(
bool
,
UseOSErrorReporting
,
false
);
// for now.
define_pd_global
(
bool
,
UseThreadPriorities
,
true
)
;
src/os/windows/vm/os_windows.cpp
浏览文件 @
fd7608b0
...
...
@@ -2593,9 +2593,104 @@ bool os::can_execute_large_page_memory() {
}
char
*
os
::
reserve_memory_special
(
size_t
bytes
)
{
DWORD
flag
=
MEM_RESERVE
|
MEM_COMMIT
|
MEM_LARGE_PAGES
;
char
*
res
=
(
char
*
)
VirtualAlloc
(
NULL
,
bytes
,
flag
,
PAGE_EXECUTE_READWRITE
);
return
res
;
if
(
UseLargePagesIndividualAllocation
)
{
if
(
TracePageSizes
&&
Verbose
)
{
tty
->
print_cr
(
"Reserving large pages individually."
);
}
char
*
p_buf
;
// first reserve enough address space in advance since we want to be
// able to break a single contiguous virtual address range into multiple
// large page commits but WS2003 does not allow reserving large page space
// so we just use 4K pages for reserve, this gives us a legal contiguous
// address space. then we will deallocate that reservation, and re alloc
// using large pages
const
size_t
size_of_reserve
=
bytes
+
_large_page_size
;
if
(
bytes
>
size_of_reserve
)
{
// Overflowed.
warning
(
"Individually allocated large pages failed, "
"use -XX:-UseLargePagesIndividualAllocation to turn off"
);
return
NULL
;
}
p_buf
=
(
char
*
)
VirtualAlloc
(
NULL
,
size_of_reserve
,
// size of Reserve
MEM_RESERVE
,
PAGE_EXECUTE_READWRITE
);
// If reservation failed, return NULL
if
(
p_buf
==
NULL
)
return
NULL
;
release_memory
(
p_buf
,
bytes
+
_large_page_size
);
// round up to page boundary. If the size_of_reserve did not
// overflow and the reservation did not fail, this align up
// should not overflow.
p_buf
=
(
char
*
)
align_size_up
((
size_t
)
p_buf
,
_large_page_size
);
// now go through and allocate one page at a time until all bytes are
// allocated
size_t
bytes_remaining
=
align_size_up
(
bytes
,
_large_page_size
);
// An overflow of align_size_up() would have been caught above
// in the calculation of size_of_reserve.
char
*
next_alloc_addr
=
p_buf
;
#ifdef ASSERT
// Variable for the failure injection
long
ran_num
=
os
::
random
();
size_t
fail_after
=
ran_num
%
bytes
;
#endif
while
(
bytes_remaining
)
{
size_t
bytes_to_rq
=
MIN2
(
bytes_remaining
,
_large_page_size
);
// Note allocate and commit
char
*
p_new
;
#ifdef ASSERT
bool
inject_error
=
LargePagesIndividualAllocationInjectError
&&
(
bytes_remaining
<=
fail_after
);
#else
const
bool
inject_error
=
false
;
#endif
if
(
inject_error
)
{
p_new
=
NULL
;
}
else
{
p_new
=
(
char
*
)
VirtualAlloc
(
next_alloc_addr
,
bytes_to_rq
,
MEM_RESERVE
|
MEM_COMMIT
|
MEM_LARGE_PAGES
,
PAGE_EXECUTE_READWRITE
);
}
if
(
p_new
==
NULL
)
{
// Free any allocated pages
if
(
next_alloc_addr
>
p_buf
)
{
// Some memory was committed so release it.
size_t
bytes_to_release
=
bytes
-
bytes_remaining
;
release_memory
(
p_buf
,
bytes_to_release
);
}
#ifdef ASSERT
if
(
UseLargePagesIndividualAllocation
&&
LargePagesIndividualAllocationInjectError
)
{
if
(
TracePageSizes
&&
Verbose
)
{
tty
->
print_cr
(
"Reserving large pages individually failed."
);
}
}
#endif
return
NULL
;
}
bytes_remaining
-=
bytes_to_rq
;
next_alloc_addr
+=
bytes_to_rq
;
}
return
p_buf
;
}
else
{
// normal policy just allocate it all at once
DWORD
flag
=
MEM_RESERVE
|
MEM_COMMIT
|
MEM_LARGE_PAGES
;
char
*
res
=
(
char
*
)
VirtualAlloc
(
NULL
,
bytes
,
flag
,
PAGE_EXECUTE_READWRITE
);
return
res
;
}
}
bool
os
::
release_memory_special
(
char
*
base
,
size_t
bytes
)
{
...
...
@@ -2983,6 +3078,7 @@ size_t os::win32::_default_stack_size = 0;
volatile
intx
os
::
win32
::
_os_thread_count
=
0
;
bool
os
::
win32
::
_is_nt
=
false
;
bool
os
::
win32
::
_is_windows_2003
=
false
;
void
os
::
win32
::
initialize_system_info
()
{
...
...
@@ -3005,7 +3101,15 @@ void os::win32::initialize_system_info() {
GetVersionEx
(
&
oi
);
switch
(
oi
.
dwPlatformId
)
{
case
VER_PLATFORM_WIN32_WINDOWS
:
_is_nt
=
false
;
break
;
case
VER_PLATFORM_WIN32_NT
:
_is_nt
=
true
;
break
;
case
VER_PLATFORM_WIN32_NT
:
_is_nt
=
true
;
{
int
os_vers
=
oi
.
dwMajorVersion
*
1000
+
oi
.
dwMinorVersion
;
if
(
os_vers
==
5002
)
{
_is_windows_2003
=
true
;
}
}
break
;
default:
fatal
(
"Unknown platform"
);
}
...
...
@@ -3103,9 +3207,13 @@ void os::init(void) {
NoYieldsInMicrolock
=
true
;
}
#endif
// This may be overridden later when argument processing is done.
FLAG_SET_ERGO
(
bool
,
UseLargePagesIndividualAllocation
,
os
::
win32
::
is_windows_2003
());
// Initialize main_process and main_thread
main_process
=
GetCurrentProcess
();
// Remember main_process is a pseudo handle
if
(
!
DuplicateHandle
(
main_process
,
GetCurrentThread
(),
main_process
,
if
(
!
DuplicateHandle
(
main_process
,
GetCurrentThread
(),
main_process
,
&
main_thread
,
THREAD_ALL_ACCESS
,
false
,
0
))
{
fatal
(
"DuplicateHandle failed
\n
"
);
}
...
...
src/os/windows/vm/os_windows.hpp
浏览文件 @
fd7608b0
...
...
@@ -34,6 +34,7 @@ class win32 {
static
julong
_physical_memory
;
static
size_t
_default_stack_size
;
static
bool
_is_nt
;
static
bool
_is_windows_2003
;
public:
// Windows-specific interface:
...
...
@@ -60,6 +61,9 @@ class win32 {
// Tells whether the platform is NT or Windown95
static
bool
is_nt
()
{
return
_is_nt
;
}
// Tells whether the platform is Windows 2003
static
bool
is_windows_2003
()
{
return
_is_windows_2003
;
}
// Returns the byte size of a virtual memory page
static
int
vm_page_size
()
{
return
_vm_page_size
;
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
fd7608b0
...
...
@@ -1789,6 +1789,20 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
}
}
class
ResetClaimValuesClosure
:
public
HeapRegionClosure
{
public:
bool
doHeapRegion
(
HeapRegion
*
r
)
{
r
->
set_claim_value
(
HeapRegion
::
InitialClaimValue
);
return
false
;
}
};
void
G1CollectedHeap
::
reset_heap_region_claim_values
()
{
ResetClaimValuesClosure
blk
;
heap_region_iterate
(
&
blk
);
}
#ifdef ASSERT
// This checks whether all regions in the heap have the correct claim
// value. I also piggy-backed on this a check to ensure that the
...
...
@@ -2031,10 +2045,12 @@ public:
class
VerifyRegionClosure
:
public
HeapRegionClosure
{
public:
bool
_allow_dirty
;
VerifyRegionClosure
(
bool
allow_dirty
)
:
_allow_dirty
(
allow_dirty
)
{}
bool
_par
;
VerifyRegionClosure
(
bool
allow_dirty
,
bool
par
=
false
)
:
_allow_dirty
(
allow_dirty
),
_par
(
par
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
guarantee
(
r
->
claim_value
()
==
0
,
"Should be unclaimed at verify points."
);
guarantee
(
_par
||
r
->
claim_value
()
==
HeapRegion
::
InitialClaimValue
,
"Should be unclaimed at verify points."
);
if
(
r
->
isHumongous
())
{
if
(
r
->
startsHumongous
())
{
// Verify the single H object.
...
...
@@ -2082,6 +2098,25 @@ public:
}
};
// This is the task used for parallel heap verification.
class
G1ParVerifyTask
:
public
AbstractGangTask
{
private:
G1CollectedHeap
*
_g1h
;
bool
_allow_dirty
;
public:
G1ParVerifyTask
(
G1CollectedHeap
*
g1h
,
bool
allow_dirty
)
:
AbstractGangTask
(
"Parallel verify task"
),
_g1h
(
g1h
),
_allow_dirty
(
allow_dirty
)
{
}
void
work
(
int
worker_i
)
{
VerifyRegionClosure
blk
(
_allow_dirty
,
true
);
_g1h
->
heap_region_par_iterate_chunked
(
&
blk
,
worker_i
,
HeapRegion
::
ParVerifyClaimValue
);
}
};
void
G1CollectedHeap
::
verify
(
bool
allow_dirty
,
bool
silent
)
{
if
(
SafepointSynchronize
::
is_at_safepoint
()
||
!
UseTLAB
)
{
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"roots "
);
}
...
...
@@ -2092,8 +2127,27 @@ void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
&
rootsCl
);
rem_set
()
->
invalidate
(
perm_gen
()
->
used_region
(),
false
);
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"heapRegions "
);
}
VerifyRegionClosure
blk
(
allow_dirty
);
_hrs
->
iterate
(
&
blk
);
if
(
GCParallelVerificationEnabled
&&
ParallelGCThreads
>
1
)
{
assert
(
check_heap_region_claim_values
(
HeapRegion
::
InitialClaimValue
),
"sanity check"
);
G1ParVerifyTask
task
(
this
,
allow_dirty
);
int
n_workers
=
workers
()
->
total_workers
();
set_par_threads
(
n_workers
);
workers
()
->
run_task
(
&
task
);
set_par_threads
(
0
);
assert
(
check_heap_region_claim_values
(
HeapRegion
::
ParVerifyClaimValue
),
"sanity check"
);
reset_heap_region_claim_values
();
assert
(
check_heap_region_claim_values
(
HeapRegion
::
InitialClaimValue
),
"sanity check"
);
}
else
{
VerifyRegionClosure
blk
(
allow_dirty
);
_hrs
->
iterate
(
&
blk
);
}
if
(
!
silent
)
gclog_or_tty
->
print
(
"remset "
);
rem_set
()
->
verify
();
guarantee
(
!
rootsCl
.
failures
(),
"should not have had failures"
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
fd7608b0
...
...
@@ -890,6 +890,9 @@ public:
int
worker
,
jint
claim_value
);
// It resets all the region claim values to the default.
void
reset_heap_region_claim_values
();
#ifdef ASSERT
bool
check_heap_region_claim_values
(
jint
claim_value
);
#endif // ASSERT
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
fd7608b0
...
...
@@ -317,7 +317,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
InitialClaimValue
=
0
,
FinalCountClaimValue
=
1
,
NoteEndClaimValue
=
2
,
ScrubRemSetClaimValue
=
3
ScrubRemSetClaimValue
=
3
,
ParVerifyClaimValue
=
4
};
// Concurrent refinement requires contiguous heap regions (in which TLABs
...
...
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
浏览文件 @
fd7608b0
...
...
@@ -391,6 +391,8 @@ size_t MutableNUMASpace::default_chunk_size() {
}
// Produce a new chunk size. page_size() aligned.
// This function is expected to be called on sequence of i's from 0 to
// lgrp_spaces()->length().
size_t
MutableNUMASpace
::
adaptive_chunk_size
(
int
i
,
size_t
limit
)
{
size_t
pages_available
=
base_space_size
();
for
(
int
j
=
0
;
j
<
i
;
j
++
)
{
...
...
@@ -405,7 +407,7 @@ size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
size_t
chunk_size
=
0
;
if
(
alloc_rate
>
0
)
{
LGRPSpace
*
ls
=
lgrp_spaces
()
->
at
(
i
);
chunk_size
=
(
size_t
)(
ls
->
alloc_rate
()
->
average
()
*
pages_available
/
alloc_rat
e
)
*
page_size
();
chunk_size
=
(
size_t
)(
ls
->
alloc_rate
()
->
average
()
/
alloc_rate
*
pages_availabl
e
)
*
page_size
();
}
chunk_size
=
MAX2
(
chunk_size
,
page_size
());
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
fd7608b0
...
...
@@ -330,6 +330,12 @@ class CommandLineFlags {
product_pd(bool, UseLargePages, \
"Use large page memory") \
\
product_pd(bool, UseLargePagesIndividualAllocation, \
"Allocate large pages individually for better affinity") \
\
develop(bool, LargePagesIndividualAllocationInjectError, false, \
"Fail large pages individual allocation") \
\
develop(bool, TracePageSizes, false, \
"Trace page size selection and usage.") \
\
...
...
@@ -1819,6 +1825,9 @@ class CommandLineFlags {
diagnostic(bool, VerifyDuringGC, false, \
"Verify memory system during GC (between phases)") \
\
diagnostic(bool, GCParallelVerificationEnabled, true, \
"Enable parallel memory system verification") \
\
diagnostic(bool, VerifyRememberedSets, false, \
"Verify GC remembered sets") \
\
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录