Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
044288ff
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
044288ff
编写于
10月 18, 2010
作者:
N
never
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
300bd132
090b9996
变更
24
显示空白变更内容
内联
并排
Showing
24 changed file
with
292 addition
and
337 deletion
+292
-337
src/cpu/sparc/vm/globals_sparc.hpp
src/cpu/sparc/vm/globals_sparc.hpp
+2
-0
src/cpu/x86/vm/globals_x86.hpp
src/cpu/x86/vm/globals_x86.hpp
+2
-0
src/cpu/zero/vm/globals_zero.hpp
src/cpu/zero/vm/globals_zero.hpp
+2
-0
src/os/linux/vm/attachListener_linux.cpp
src/os/linux/vm/attachListener_linux.cpp
+2
-2
src/os/linux/vm/os_linux.cpp
src/os/linux/vm/os_linux.cpp
+17
-6
src/os/solaris/vm/os_solaris.cpp
src/os/solaris/vm/os_solaris.cpp
+8
-9
src/os/windows/vm/os_windows.cpp
src/os/windows/vm/os_windows.cpp
+16
-2
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+86
-43
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+16
-92
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
+0
-2
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+35
-47
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+39
-110
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+10
-6
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+0
-3
src/share/vm/gc_implementation/includeDB_gc_g1
src/share/vm/gc_implementation/includeDB_gc_g1
+6
-0
src/share/vm/includeDB_core
src/share/vm/includeDB_core
+1
-0
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+10
-4
src/share/vm/runtime/arguments.hpp
src/share/vm/runtime/arguments.hpp
+3
-0
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+5
-2
src/share/vm/runtime/sharedRuntime.cpp
src/share/vm/runtime/sharedRuntime.cpp
+3
-0
src/share/vm/runtime/sharedRuntime.hpp
src/share/vm/runtime/sharedRuntime.hpp
+3
-0
src/share/vm/runtime/thread.cpp
src/share/vm/runtime/thread.cpp
+3
-0
src/share/vm/utilities/debug.cpp
src/share/vm/utilities/debug.cpp
+10
-8
src/share/vm/utilities/exceptions.cpp
src/share/vm/utilities/exceptions.cpp
+13
-1
未找到文件。
src/cpu/sparc/vm/globals_sparc.hpp
浏览文件 @
044288ff
...
...
@@ -62,3 +62,5 @@ define_pd_global(intx, PreInflateSpin, 40); // Determined by running desi
define_pd_global
(
bool
,
RewriteBytecodes
,
true
);
define_pd_global
(
bool
,
RewriteFrequentPairs
,
true
);
define_pd_global
(
bool
,
UseMembar
,
false
);
src/cpu/x86/vm/globals_x86.hpp
浏览文件 @
044288ff
...
...
@@ -63,3 +63,5 @@ define_pd_global(intx, PreInflateSpin, 10);
define_pd_global
(
bool
,
RewriteBytecodes
,
true
);
define_pd_global
(
bool
,
RewriteFrequentPairs
,
true
);
define_pd_global
(
bool
,
UseMembar
,
false
);
src/cpu/zero/vm/globals_zero.hpp
浏览文件 @
044288ff
...
...
@@ -45,3 +45,5 @@ define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
define_pd_global
(
bool
,
RewriteBytecodes
,
true
);
define_pd_global
(
bool
,
RewriteFrequentPairs
,
true
);
define_pd_global
(
bool
,
UseMembar
,
false
);
src/os/linux/vm/attachListener_linux.cpp
浏览文件 @
044288ff
...
...
@@ -176,10 +176,10 @@ int LinuxAttachListener::init() {
int
n
=
snprintf
(
path
,
UNIX_PATH_MAX
,
"%s/.java_pid%d"
,
os
::
get_temp_directory
(),
os
::
current_process_id
());
if
(
n
<
=
(
int
)
UNIX_PATH_MAX
)
{
if
(
n
<
(
int
)
UNIX_PATH_MAX
)
{
n
=
snprintf
(
initial_path
,
UNIX_PATH_MAX
,
"%s.tmp"
,
path
);
}
if
(
n
>
(
int
)
UNIX_PATH_MAX
)
{
if
(
n
>
=
(
int
)
UNIX_PATH_MAX
)
{
return
-
1
;
}
...
...
src/os/linux/vm/os_linux.cpp
浏览文件 @
044288ff
/*
* Copyright (c) 1999, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -827,8 +827,10 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
switch
(
thr_type
)
{
case
os
::
java_thread
:
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
if
(
JavaThread
::
stack_size_at_create
()
>
0
)
stack_size
=
JavaThread
::
stack_size_at_create
();
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert
(
JavaThread
::
stack_size_at_create
()
>
0
,
"this should be set"
);
stack_size
=
JavaThread
::
stack_size_at_create
();
break
;
case
os
::
compiler_thread
:
if
(
CompilerThreadStackSize
>
0
)
{
...
...
@@ -3922,12 +3924,21 @@ jint os::init_2(void)
Linux
::
signal_sets_init
();
Linux
::
install_signal_handlers
();
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os
::
Linux
::
min_stack_allowed
=
MAX2
(
os
::
Linux
::
min_stack_allowed
,
(
size_t
)(
StackYellowPages
+
StackRedPages
+
StackShadowPages
+
2
*
BytesPerWord
COMPILER2_PRESENT
(
+
1
))
*
Linux
::
page_size
());
size_t
threadStackSizeInBytes
=
ThreadStackSize
*
K
;
if
(
threadStackSizeInBytes
!=
0
&&
threadStackSizeInBytes
<
Linux
::
min_stack_allowed
)
{
threadStackSizeInBytes
<
os
::
Linux
::
min_stack_allowed
)
{
tty
->
print_cr
(
"
\n
The stack size specified is too small, "
"Specify at least %dk"
,
Linux
::
min_stack_allowed
/
K
);
os
::
Linux
::
min_stack_allowed
/
K
);
return
JNI_ERR
;
}
...
...
@@ -4839,7 +4850,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Next, demultiplex/decode time arguments
timespec
absTime
;
if
(
time
<
0
)
{
// don't wait at all
if
(
time
<
0
||
(
isAbsolute
&&
time
==
0
)
)
{
// don't wait at all
return
;
}
if
(
time
>
0
)
{
...
...
src/os/solaris/vm/os_solaris.cpp
浏览文件 @
044288ff
/*
* Copyright (c) 1997, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -4878,18 +4878,17 @@ jint os::init_2(void) {
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in BytesPerWord times page size to account for VM stack during
// Add in
2*
BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
guarantee
((
Solaris
::
min_stack_allowed
>=
(
StackYellowPages
+
StackRedPages
+
StackShadowPages
+
BytesPerWord
COMPILER2_PRESENT
(
+
1
))
*
page_size
),
"need to increase Solaris::min_stack_allowed on this platform"
);
os
::
Solaris
::
min_stack_allowed
=
MAX2
(
os
::
Solaris
::
min_stack_allowed
,
(
size_t
)(
StackYellowPages
+
StackRedPages
+
StackShadowPages
+
2
*
BytesPerWord
COMPILER2_PRESENT
(
+
1
))
*
page_size
);
size_t
threadStackSizeInBytes
=
ThreadStackSize
*
K
;
if
(
threadStackSizeInBytes
!=
0
&&
threadStackSizeInBytes
<
Solaris
::
min_stack_allowed
)
{
threadStackSizeInBytes
<
os
::
Solaris
::
min_stack_allowed
)
{
tty
->
print_cr
(
"
\n
The stack size specified is too small, Specify at least %dk"
,
Solaris
::
min_stack_allowed
/
K
);
os
::
Solaris
::
min_stack_allowed
/
K
);
return
JNI_ERR
;
}
...
...
@@ -5837,7 +5836,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// First, demultiplex/decode time arguments
timespec
absTime
;
if
(
time
<
0
)
{
// don't wait at all
if
(
time
<
0
||
(
isAbsolute
&&
time
==
0
)
)
{
// don't wait at all
return
;
}
if
(
time
>
0
)
{
...
...
src/os/windows/vm/os_windows.cpp
浏览文件 @
044288ff
...
...
@@ -3311,7 +3311,6 @@ extern "C" {
}
}
// this is called _after_ the global arguments have been parsed
jint
os
::
init_2
(
void
)
{
// Allocate a single page and mark it as readable for safepoint polling
...
...
@@ -3390,6 +3389,21 @@ jint os::init_2(void) {
actual_reserve_size
=
default_reserve_size
;
}
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
size_t
min_stack_allowed
=
(
size_t
)(
StackYellowPages
+
StackRedPages
+
StackShadowPages
+
2
*
BytesPerWord
COMPILER2_PRESENT
(
+
1
))
*
os
::
vm_page_size
();
if
(
actual_reserve_size
<
min_stack_allowed
)
{
tty
->
print_cr
(
"
\n
The stack size specified is too small, "
"Specify at least %dk"
,
min_stack_allowed
/
K
);
return
JNI_ERR
;
}
JavaThread
::
set_stack_size_at_create
(
stack_commit_size
);
// Calculate theoretical max. size of Threads to guard gainst artifical
...
...
@@ -3992,7 +4006,7 @@ void Parker::park(bool isAbsolute, jlong time) {
if
(
time
<
0
)
{
// don't wait
return
;
}
else
if
(
time
==
0
)
{
else
if
(
time
==
0
&&
!
isAbsolute
)
{
time
=
INFINITE
;
}
else
if
(
isAbsolute
)
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
044288ff
...
...
@@ -791,7 +791,7 @@ class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
int
_worker_i
;
public:
RebuildRSOutOfRegionClosure
(
G1CollectedHeap
*
g1
,
int
worker_i
=
0
)
:
_cl
(
g1
->
g1_rem_set
()
->
as_HRInto_G1RemSet
()
,
worker_i
),
_cl
(
g1
->
g1_rem_set
(),
worker_i
),
_worker_i
(
worker_i
),
_g1h
(
g1
)
{
}
...
...
@@ -890,7 +890,7 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
abandon_cur_alloc_region
();
abandon_gc_alloc_regions
();
assert
(
_cur_alloc_region
==
NULL
,
"Invariant."
);
g1_rem_set
()
->
as_HRInto_G1RemSet
()
->
cleanupHRRS
();
g1_rem_set
()
->
cleanupHRRS
();
tear_down_region_lists
();
set_used_regions_to_need_zero_fill
();
...
...
@@ -1506,16 +1506,12 @@ jint G1CollectedHeap::initialize() {
}
// Also create a G1 rem set.
if
(
G1UseHRIntoRS
)
{
if
(
mr_bs
()
->
is_a
(
BarrierSet
::
CardTableModRef
))
{
_g1_rem_set
=
new
HRInto_
G1RemSet
(
this
,
(
CardTableModRefBS
*
)
mr_bs
());
_g1_rem_set
=
new
G1RemSet
(
this
,
(
CardTableModRefBS
*
)
mr_bs
());
}
else
{
vm_exit_during_initialization
(
"G1 requires a cardtable mod ref bs."
);
return
JNI_ENOMEM
;
}
}
else
{
_g1_rem_set
=
new
StupidG1RemSet
(
this
);
}
// Carve out the G1 part of the heap.
...
...
@@ -2706,8 +2702,7 @@ size_t G1CollectedHeap::max_pending_card_num() {
}
size_t
G1CollectedHeap
::
cards_scanned
()
{
HRInto_G1RemSet
*
g1_rset
=
(
HRInto_G1RemSet
*
)
g1_rem_set
();
return
g1_rset
->
cardsScanned
();
return
g1_rem_set
()
->
cardsScanned
();
}
void
...
...
@@ -3850,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
undo_waste
()
*
HeapWordSize
/
K
);
}
#ifdef ASSERT
bool
G1ParScanThreadState
::
verify_ref
(
narrowOop
*
ref
)
const
{
assert
(
ref
!=
NULL
,
"invariant"
);
assert
(
UseCompressedOops
,
"sanity"
);
assert
(
!
has_partial_array_mask
(
ref
),
err_msg
(
"ref="
PTR_FORMAT
,
ref
));
oop
p
=
oopDesc
::
load_decode_heap_oop
(
ref
);
assert
(
_g1h
->
is_in_g1_reserved
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
return
true
;
}
bool
G1ParScanThreadState
::
verify_ref
(
oop
*
ref
)
const
{
assert
(
ref
!=
NULL
,
"invariant"
);
if
(
has_partial_array_mask
(
ref
))
{
// Must be in the collection set--it's already been copied.
oop
p
=
clear_partial_array_mask
(
ref
);
assert
(
_g1h
->
obj_in_cs
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
}
else
{
oop
p
=
oopDesc
::
load_decode_heap_oop
(
ref
);
assert
(
_g1h
->
is_in_g1_reserved
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
}
return
true
;
}
bool
G1ParScanThreadState
::
verify_task
(
StarTask
ref
)
const
{
if
(
ref
.
is_narrow
())
{
return
verify_ref
((
narrowOop
*
)
ref
);
}
else
{
return
verify_ref
((
oop
*
)
ref
);
}
}
#endif // ASSERT
void
G1ParScanThreadState
::
trim_queue
()
{
StarTask
ref
;
do
{
// Drain the overflow stack first, so other threads can steal.
while
(
refs
()
->
pop_overflow
(
ref
))
{
deal_with_reference
(
ref
);
}
while
(
refs
()
->
pop_local
(
ref
))
{
deal_with_reference
(
ref
);
}
}
while
(
!
refs
()
->
is_empty
());
}
G1ParClosureSuper
::
G1ParClosureSuper
(
G1CollectedHeap
*
g1
,
G1ParScanThreadState
*
par_scan_state
)
:
_g1
(
g1
),
_g1_rem
(
_g1
->
g1_rem_set
()),
_cm
(
_g1
->
concurrent_mark
()),
_par_scan_state
(
par_scan_state
)
{
}
...
...
@@ -4052,38 +4095,39 @@ public:
:
_g1h
(
g1h
),
_par_scan_state
(
par_scan_state
),
_queues
(
queues
),
_terminator
(
terminator
)
{}
void
do_void
()
{
G1ParScanThreadState
*
pss
=
par_scan_state
();
while
(
true
)
{
pss
->
trim_queue
();
void
do_void
();
private:
inline
bool
offer_termination
();
};
bool
G1ParEvacuateFollowersClosure
::
offer_termination
()
{
G1ParScanThreadState
*
const
pss
=
par_scan_state
();
pss
->
start_term_time
();
const
bool
res
=
terminator
()
->
offer_termination
();
pss
->
end_term_time
();
return
res
;
}
void
G1ParEvacuateFollowersClosure
::
do_void
()
{
StarTask
stolen_task
;
if
(
queues
()
->
steal
(
pss
->
queue_num
(),
pss
->
hash_seed
(),
stolen_task
))
{
// slightly paranoid tests; I'm trying to catch potential
// problems before we go into push_on_queue to know where the
// problem is coming from
assert
((
oop
*
)
stolen_task
!=
NULL
,
"Error"
);
G1ParScanThreadState
*
const
pss
=
par_scan_state
();
pss
->
trim_queue
();
do
{
while
(
queues
()
->
steal
(
pss
->
queue_num
(),
pss
->
hash_seed
(),
stolen_task
))
{
assert
(
pss
->
verify_task
(
stolen_task
),
"sanity"
);
if
(
stolen_task
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
narrowOop
*
p
=
(
narrowOop
*
)
stolen_task
;
assert
(
has_partial_array_mask
(
p
)
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"Error"
);
pss
->
push_on_queue
(
p
);
pss
->
push_on_queue
((
narrowOop
*
)
stolen_task
);
}
else
{
oop
*
p
=
(
oop
*
)
stolen_task
;
assert
(
has_partial_array_mask
(
p
)
||
_g1h
->
is_in_g1_reserved
(
*
p
),
"Error"
);
pss
->
push_on_queue
(
p
);
pss
->
push_on_queue
((
oop
*
)
stolen_task
);
}
continue
;
}
pss
->
start_term_time
();
if
(
terminator
()
->
offer_termination
())
break
;
pss
->
end_term_time
();
pss
->
trim_queue
();
}
pss
->
end_term_time
();
}
while
(
!
offer_termination
());
pss
->
retire_alloc_buffers
();
}
};
}
class
G1ParTask
:
public
AbstractGangTask
{
protected:
...
...
@@ -4182,8 +4226,7 @@ public:
pss
.
print_termination_stats
(
i
);
}
assert
(
pss
.
refs_to_scan
()
==
0
,
"Task queue should be empty"
);
assert
(
pss
.
overflowed_refs_to_scan
()
==
0
,
"Overflow queue should be empty"
);
assert
(
pss
.
refs
()
->
is_empty
(),
"should be empty"
);
double
end_time_ms
=
os
::
elapsedTime
()
*
1000.0
;
_g1h
->
g1_policy
()
->
record_gc_worker_end_time
(
i
,
end_time_ms
);
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
044288ff
...
...
@@ -1651,49 +1651,17 @@ public:
size_t
alloc_buffer_waste
()
const
{
return
_alloc_buffer_waste
;
}
size_t
undo_waste
()
const
{
return
_undo_waste
;
}
template
<
class
T
>
void
push_on_queue
(
T
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
assert
(
has_partial_array_mask
(
ref
)
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
ref
)),
"invariant"
);
#ifdef ASSERT
if
(
has_partial_array_mask
(
ref
))
{
oop
p
=
clear_partial_array_mask
(
ref
);
// Verify that we point into the CS
assert
(
_g1h
->
obj_in_cs
(
p
),
"Should be in CS"
);
}
#endif
refs
()
->
push
(
ref
);
}
void
pop_from_queue
(
StarTask
&
ref
)
{
if
(
refs
()
->
pop_local
(
ref
))
{
assert
((
oop
*
)
ref
!=
NULL
,
"pop_local() returned true"
);
assert
(
UseCompressedOops
||
!
ref
.
is_narrow
(),
"Error"
);
assert
(
has_partial_array_mask
((
oop
*
)
ref
)
||
_g1h
->
is_in_g1_reserved
(
ref
.
is_narrow
()
?
oopDesc
::
load_decode_heap_oop
((
narrowOop
*
)
ref
)
:
oopDesc
::
load_decode_heap_oop
((
oop
*
)
ref
)),
"invariant"
);
}
else
{
StarTask
null_task
;
ref
=
null_task
;
}
}
bool
verify_ref
(
narrowOop
*
ref
)
const
;
bool
verify_ref
(
oop
*
ref
)
const
;
bool
verify_task
(
StarTask
ref
)
const
;
#endif // ASSERT
void
pop_from_overflow_queue
(
StarTask
&
ref
)
{
StarTask
new_ref
;
refs
()
->
pop_overflow
(
new_ref
);
assert
((
oop
*
)
new_ref
!=
NULL
,
"pop() from a local non-empty stack"
);
assert
(
UseCompressedOops
||
!
new_ref
.
is_narrow
(),
"Error"
);
assert
(
has_partial_array_mask
((
oop
*
)
new_ref
)
||
_g1h
->
is_in_g1_reserved
(
new_ref
.
is_narrow
()
?
oopDesc
::
load_decode_heap_oop
((
narrowOop
*
)
new_ref
)
:
oopDesc
::
load_decode_heap_oop
((
oop
*
)
new_ref
)),
"invariant"
);
ref
=
new_ref
;
template
<
class
T
>
void
push_on_queue
(
T
*
ref
)
{
assert
(
verify_ref
(
ref
),
"sanity"
);
refs
()
->
push
(
ref
);
}
int
refs_to_scan
()
{
return
(
int
)
refs
()
->
size
();
}
int
overflowed_refs_to_scan
()
{
return
(
int
)
refs
()
->
overflow_stack
()
->
size
();
}
template
<
class
T
>
void
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
if
(
G1DeferredRSUpdate
)
{
deferred_rs_update
(
from
,
p
,
tid
);
...
...
@@ -1818,59 +1786,15 @@ private:
}
}
public:
void
trim_queue
()
{
// I've replicated the loop twice, first to drain the overflow
// queue, second to drain the task queue. This is better than
// having a single loop, which checks both conditions and, inside
// it, either pops the overflow queue or the task queue, as each
// loop is tighter. Also, the decision to drain the overflow queue
// first is not arbitrary, as the overflow queue is not visible
// to the other workers, whereas the task queue is. So, we want to
// drain the "invisible" entries first, while allowing the other
// workers to potentially steal the "visible" entries.
while
(
refs_to_scan
()
>
0
||
overflowed_refs_to_scan
()
>
0
)
{
while
(
overflowed_refs_to_scan
()
>
0
)
{
StarTask
ref_to_scan
;
assert
((
oop
*
)
ref_to_scan
==
NULL
,
"Constructed above"
);
pop_from_overflow_queue
(
ref_to_scan
);
// We shouldn't have pushed it on the queue if it was not
// pointing into the CSet.
assert
((
oop
*
)
ref_to_scan
!=
NULL
,
"Follows from inner loop invariant"
);
if
(
ref_to_scan
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
narrowOop
*
p
=
(
narrowOop
*
)
ref_to_scan
;
assert
(
!
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
else
{
oop
*
p
=
(
oop
*
)
ref_to_scan
;
assert
((
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
clear_partial_array_mask
(
p
)))
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
}
while
(
refs_to_scan
()
>
0
)
{
StarTask
ref_to_scan
;
assert
((
oop
*
)
ref_to_scan
==
NULL
,
"Constructed above"
);
pop_from_queue
(
ref_to_scan
);
if
((
oop
*
)
ref_to_scan
!=
NULL
)
{
if
(
ref_to_scan
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
narrowOop
*
p
=
(
narrowOop
*
)
ref_to_scan
;
assert
(
!
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
void
deal_with_reference
(
StarTask
ref
)
{
assert
(
verify_task
(
ref
),
"sanity"
);
if
(
ref
.
is_narrow
())
{
deal_with_reference
((
narrowOop
*
)
ref
);
}
else
{
oop
*
p
=
(
oop
*
)
ref_to_scan
;
assert
((
has_partial_array_mask
(
p
)
&&
_g1h
->
obj_in_cs
(
clear_partial_array_mask
(
p
)))
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
}
}
deal_with_reference
((
oop
*
)
ref
);
}
}
public:
void
trim_queue
();
};
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
浏览文件 @
044288ff
...
...
@@ -25,8 +25,6 @@
class
HeapRegion
;
class
G1CollectedHeap
;
class
G1RemSet
;
class
HRInto_G1RemSet
;
class
G1RemSet
;
class
ConcurrentMark
;
class
DirtyCardToOopClosure
;
class
CMBitMap
;
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
044288ff
...
...
@@ -97,13 +97,6 @@ public:
}
};
void
StupidG1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
IntoCSRegionClosure
rc
(
_g1
,
oc
);
_g1
->
heap_region_iterate
(
&
rc
);
}
class
VerifyRSCleanCardOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1
;
public:
...
...
@@ -119,8 +112,9 @@ public:
}
};
HRInto_G1RemSet
::
HRInto_G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
)
:
G1RemSet
(
g1
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
G1RemSet
::
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
)
:
_g1
(
g1
),
_conc_refine_cards
(
0
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
_cg1r
(
g1
->
concurrent_g1_refine
()),
_traversal_in_progress
(
false
),
_cset_rs_update_cl
(
NULL
),
...
...
@@ -134,7 +128,7 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
}
}
HRInto_G1RemSet
::~
HRInto_
G1RemSet
()
{
G1RemSet
::~
G1RemSet
()
{
delete
_seq_task
;
for
(
uint
i
=
0
;
i
<
n_workers
();
i
++
)
{
assert
(
_cset_rs_update_cl
[
i
]
==
NULL
,
"it should be"
);
...
...
@@ -277,7 +271,7 @@ public:
// p threads
// Then thread t will start at region t * floor (n/p)
HeapRegion
*
HRInto_
G1RemSet
::
calculateStartRegion
(
int
worker_i
)
{
HeapRegion
*
G1RemSet
::
calculateStartRegion
(
int
worker_i
)
{
HeapRegion
*
result
=
_g1p
->
collection_set
();
if
(
ParallelGCThreads
>
0
)
{
size_t
cs_size
=
_g1p
->
collection_set_size
();
...
...
@@ -290,7 +284,7 @@ HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
return
result
;
}
void
HRInto_
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
void
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
double
rs_time_start
=
os
::
elapsedTime
();
HeapRegion
*
startRegion
=
calculateStartRegion
(
worker_i
);
...
...
@@ -340,7 +334,7 @@ public:
}
};
void
HRInto_
G1RemSet
::
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
)
{
void
G1RemSet
::
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
)
{
double
start
=
os
::
elapsedTime
();
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure
into_cset_update_rs_cl
(
_g1
,
into_cset_dcq
);
...
...
@@ -439,12 +433,11 @@ public:
}
};
void
HRInto_
G1RemSet
::
cleanupHRRS
()
{
void
G1RemSet
::
cleanupHRRS
()
{
HeapRegionRemSet
::
cleanup
();
}
void
HRInto_G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
void
G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset
();
...
...
@@ -508,8 +501,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_cset_rs_update_cl
[
worker_i
]
=
NULL
;
}
void
HRInto_G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
void
G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
#if G1_REM_SET_LOGGING
PrintRSClosure
cl
;
_g1
->
collection_set_iterate
(
&
cl
);
...
...
@@ -581,7 +573,7 @@ public:
// RSet updating,
// * the post-write barrier shouldn't be logging updates to young
// regions (but there is a situation where this can happen - see
// the comment in
HRInto_
G1RemSet::concurrentRefineOneCard below -
// the comment in G1RemSet::concurrentRefineOneCard below -
// that should not be applicable here), and
// * during actual RSet updating, the filtering of cards in young
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
...
...
@@ -601,7 +593,7 @@ public:
}
};
void
HRInto_
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
void
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
_total_cards_scanned
=
0
;
for
(
uint
i
=
0
;
i
<
n_workers
();
++
i
)
...
...
@@ -692,12 +684,12 @@ public:
}
};
void
HRInto_
G1RemSet
::
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
void
G1RemSet
::
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
_g1
->
heap_region_iterate
(
&
scrub_cl
);
}
void
HRInto_
G1RemSet
::
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
void
G1RemSet
::
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
{
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
_g1
->
heap_region_par_iterate_chunked
(
&
scrub_cl
,
worker_num
,
claim_val
);
...
...
@@ -741,7 +733,7 @@ public:
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
};
bool
HRInto_
G1RemSet
::
concurrentRefineOneCard_impl
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
G1RemSet
::
concurrentRefineOneCard_impl
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
// Construct the region representing the card.
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
...
...
@@ -820,7 +812,7 @@ bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
return
trigger_cl
.
value
();
}
bool
HRInto_
G1RemSet
::
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
G1RemSet
::
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
// If the card is no longer dirty, nothing to do.
if
(
*
card_ptr
!=
CardTableModRefBS
::
dirty_card_val
())
{
...
...
@@ -995,7 +987,7 @@ public:
}
};
void
HRInto_
G1RemSet
::
print_summary_info
()
{
void
G1RemSet
::
print_summary_info
()
{
G1CollectedHeap
*
g1
=
G1CollectedHeap
::
heap
();
#if CARD_REPEAT_HISTO
...
...
@@ -1029,7 +1021,6 @@ void HRInto_G1RemSet::print_summary_info() {
g1
->
concurrent_g1_refine
()
->
threads_do
(
&
p
);
gclog_or_tty
->
print_cr
(
""
);
if
(
G1UseHRIntoRS
)
{
HRRSStatsIter
blk
;
g1
->
heap_region_iterate
(
&
blk
);
gclog_or_tty
->
print_cr
(
" Total heap region rem set sizes = "
SIZE_FORMAT
"K."
...
...
@@ -1046,13 +1037,10 @@ void HRInto_G1RemSet::print_summary_info() {
blk
.
max_mem_sz_region
()
->
bottom
(),
blk
.
max_mem_sz_region
()
->
end
(),
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
mem_size
()
+
K
-
1
)
/
K
,
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
occupied
()
+
K
-
1
)
/
K
);
gclog_or_tty
->
print_cr
(
" Did %d coarsenings."
,
HeapRegionRemSet
::
n_coarsenings
());
}
gclog_or_tty
->
print_cr
(
" Did %d coarsenings."
,
HeapRegionRemSet
::
n_coarsenings
());
}
void
HRInto_
G1RemSet
::
prepare_for_verify
()
{
void
G1RemSet
::
prepare_for_verify
()
{
if
(
G1HRRSFlushLogBuffersOnVerify
&&
(
VerifyBeforeGC
||
VerifyAfterGC
)
&&
!
_g1
->
full_collection
())
{
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
044288ff
...
...
@@ -27,107 +27,18 @@
class
G1CollectedHeap
;
class
CardTableModRefBarrierSet
;
class
HRInto_G1RemSet
;
class
ConcurrentG1Refine
;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class
G1RemSet
:
public
CHeapObj
{
protected:
G1CollectedHeap
*
_g1
;
unsigned
_conc_refine_cards
;
size_t
n_workers
();
public:
G1RemSet
(
G1CollectedHeap
*
g1
)
:
_g1
(
g1
),
_conc_refine_cards
(
0
)
{}
// Invoke "blk->do_oop" on all pointers into the CS in object in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
virtual
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
)
=
0
;
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops into collection set do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
virtual
void
prepare_for_oops_into_collection_set_do
()
=
0
;
virtual
void
cleanup_after_oops_into_collection_set_do
()
=
0
;
// If "this" is of the given subtype, return "this", else "NULL".
virtual
HRInto_G1RemSet
*
as_HRInto_G1RemSet
()
{
return
NULL
;
}
// Record, if necessary, the fact that *p (where "p" is in region "from",
// and is, a fortiori, required to be non-NULL) has changed to its new value.
virtual
void
write_ref
(
HeapRegion
*
from
,
oop
*
p
)
=
0
;
virtual
void
write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
)
=
0
;
virtual
void
par_write_ref
(
HeapRegion
*
from
,
oop
*
p
,
int
tid
)
=
0
;
virtual
void
par_write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
,
int
tid
)
=
0
;
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
virtual
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
=
0
;
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
virtual
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
=
0
;
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means
// being done at a safepoint.)
// With some implementations of this routine, when check_for_refs_into_cset
// is true, a true result may be returned if the given card contains oops
// that have references into the current collection set.
virtual
bool
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
return
false
;
}
// Print any relevant summary info.
virtual
void
print_summary_info
()
{}
// Prepare remebered set for verification.
virtual
void
prepare_for_verify
()
{};
};
// The simplest possible G1RemSet: iterates over all objects in non-CS
// regions, searching for pointers into the CS.
class
StupidG1RemSet
:
public
G1RemSet
{
public:
StupidG1RemSet
(
G1CollectedHeap
*
g1
)
:
G1RemSet
(
g1
)
{}
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
);
void
prepare_for_oops_into_collection_set_do
()
{}
void
cleanup_after_oops_into_collection_set_do
()
{}
// Nothing is necessary in the version below.
void
write_ref
(
HeapRegion
*
from
,
oop
*
p
)
{}
void
write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
)
{}
void
par_write_ref
(
HeapRegion
*
from
,
oop
*
p
,
int
tid
)
{}
void
par_write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
,
int
tid
)
{}
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{}
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
{}
};
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class
HRInto_G1RemSet
:
public
G1RemSet
{
protected:
enum
SomePrivateConstants
{
UpdateRStoMergeSync
=
0
,
...
...
@@ -175,27 +86,31 @@ public:
// scanned.
void
cleanupHRRS
();
HRInto_
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
);
~
HRInto_
G1RemSet
();
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
);
~
G1RemSet
();
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops_into_collection_set_do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
void
prepare_for_oops_into_collection_set_do
();
void
cleanup_after_oops_into_collection_set_do
();
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
template
<
class
T
>
void
scanNewRefsRS_work
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
void
scanNewRefsRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
if
(
UseCompressedOops
)
{
scanNewRefsRS_work
<
narrowOop
>
(
oc
,
worker_i
);
}
else
{
scanNewRefsRS_work
<
oop
>
(
oc
,
worker_i
);
}
}
void
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
);
HeapRegion
*
calculateStartRegion
(
int
i
);
H
RInto_G1RemSet
*
as_HRInto_G1RemSet
()
{
return
this
;
}
H
eapRegion
*
calculateStartRegion
(
int
i
);
CardTableModRefBS
*
ct_bs
()
{
return
_ct_bs
;
}
size_t
cardsScanned
()
{
return
_total_cards_scanned
;
}
...
...
@@ -219,17 +134,31 @@ public:
bool
self_forwarded
(
oop
obj
);
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
);
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
);
// If check_for_refs_into_cset is true then a true result is returned
// if the card contains oops that have references into the current
// collection set.
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means
// being done at a safepoint.)
// If check_for_refs_into_cset is true, a true result is returned
// if the given card contains oops that have references into the
// current collection set.
virtual
bool
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
);
// Print any relevant summary info.
virtual
void
print_summary_info
();
// Prepare remembered set for verification.
virtual
void
prepare_for_verify
();
};
...
...
@@ -250,13 +179,13 @@ public:
class
UpdateRSOopClosure
:
public
OopClosure
{
HeapRegion
*
_from
;
HRInto_
G1RemSet
*
_rs
;
G1RemSet
*
_rs
;
int
_worker_i
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
);
public:
UpdateRSOopClosure
(
HRInto_
G1RemSet
*
rs
,
int
worker_i
=
0
)
:
UpdateRSOopClosure
(
G1RemSet
*
rs
,
int
worker_i
=
0
)
:
_from
(
NULL
),
_rs
(
rs
),
_worker_i
(
worker_i
)
{
guarantee
(
_rs
!=
NULL
,
"Requires an HRIntoG1RemSet"
);
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
044288ff
...
...
@@ -30,16 +30,18 @@ inline size_t G1RemSet::n_workers() {
}
}
template
<
class
T
>
inline
void
HRInto_G1RemSet
::
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
)
{
template
<
class
T
>
inline
void
G1RemSet
::
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
)
{
par_write_ref_nv
(
from
,
p
,
0
);
}
inline
bool
HRInto_
G1RemSet
::
self_forwarded
(
oop
obj
)
{
inline
bool
G1RemSet
::
self_forwarded
(
oop
obj
)
{
bool
result
=
(
obj
->
is_forwarded
()
&&
(
obj
->
forwardee
()
==
obj
));
return
result
;
}
template
<
class
T
>
inline
void
HRInto_G1RemSet
::
par_write_ref_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
template
<
class
T
>
inline
void
G1RemSet
::
par_write_ref_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
#ifdef ASSERT
// can't do because of races
...
...
@@ -77,7 +79,7 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See
HRInto_
G1RemSet::cleanup_after_oops_into_collection_set_do().
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
}
else
{
#if G1_REM_SET_LOGGING
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
...
...
@@ -91,12 +93,14 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
}
}
template
<
class
T
>
inline
void
UpdateRSOopClosure
::
do_oop_work
(
T
*
p
)
{
template
<
class
T
>
inline
void
UpdateRSOopClosure
::
do_oop_work
(
T
*
p
)
{
assert
(
_from
!=
NULL
,
"from region must be non-NULL"
);
_rs
->
par_write_ref
(
_from
,
p
,
_worker_i
);
}
template
<
class
T
>
inline
void
UpdateRSetImmediate
::
do_oop_work
(
T
*
p
)
{
template
<
class
T
>
inline
void
UpdateRSetImmediate
::
do_oop_work
(
T
*
p
)
{
assert
(
_from
->
is_in_reserved
(
p
),
"paranoia"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
)
&&
!
_from
->
is_survivor
())
{
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
044288ff
...
...
@@ -40,9 +40,6 @@
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
develop(bool, G1UseHRIntoRS, true, \
"Determines whether the 'advanced' HR Into rem set is used.") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
...
...
src/share/vm/gc_implementation/includeDB_gc_g1
浏览文件 @
044288ff
...
...
@@ -310,10 +310,16 @@ heapRegionSeq.hpp heapRegion.hpp
heapRegionSeq.inline.hpp heapRegionSeq.hpp
instanceKlass.cpp g1RemSet.inline.hpp
instanceRefKlass.cpp g1RemSet.inline.hpp
klass.hpp g1OopClosures.hpp
memoryService.cpp g1MemoryPool.hpp
objArrayKlass.cpp g1RemSet.inline.hpp
ptrQueue.cpp allocation.hpp
ptrQueue.cpp allocation.inline.hpp
ptrQueue.cpp mutex.hpp
...
...
src/share/vm/includeDB_core
浏览文件 @
044288ff
...
...
@@ -3231,6 +3231,7 @@ orderAccess.hpp allocation.hpp
orderAccess.hpp os.hpp
orderAccess_<os_arch>.inline.hpp orderAccess.hpp
orderAccess_<os_arch>.inline.hpp vm_version_<arch>.hpp
os.cpp allocation.inline.hpp
os.cpp arguments.hpp
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
044288ff
...
...
@@ -119,11 +119,8 @@ void Arguments::init_system_properties() {
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.specification.version"
,
"1.0"
,
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.specification.name"
,
"Java Virtual Machine Specification"
,
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.specification.vendor"
,
JDK_Version
::
is_gte_jdk17x_version
()
?
"Oracle Corporation"
:
"Sun Microsystems Inc."
,
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.version"
,
VM_Version
::
vm_release
(),
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.name"
,
VM_Version
::
vm_name
(),
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.vendor"
,
VM_Version
::
vm_vendor
(),
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.info"
,
VM_Version
::
vm_info_string
(),
true
));
// following are JVMTI agent writeable properties.
...
...
@@ -151,6 +148,14 @@ void Arguments::init_system_properties() {
os
::
init_system_properties_values
();
}
// Update/Initialize System properties after JDK version number is known
void
Arguments
::
init_version_specific_system_properties
()
{
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.specification.vendor"
,
JDK_Version
::
is_gte_jdk17x_version
()
?
"Oracle Corporation"
:
"Sun Microsystems Inc."
,
false
));
PropertyList_add
(
&
_system_properties
,
new
SystemProperty
(
"java.vm.vendor"
,
VM_Version
::
vm_vendor
(),
false
));
}
/**
* Provide a slightly more user-friendly way of eliminating -XX flags.
* When a flag is eliminated, it can be added to this list in order to
...
...
@@ -1676,7 +1681,8 @@ bool Arguments::check_stack_pages()
bool
status
=
true
;
status
=
status
&&
verify_min_value
(
StackYellowPages
,
1
,
"StackYellowPages"
);
status
=
status
&&
verify_min_value
(
StackRedPages
,
1
,
"StackRedPages"
);
status
=
status
&&
verify_min_value
(
StackShadowPages
,
1
,
"StackShadowPages"
);
// greater stack shadow pages can't generate instruction to bang stack
status
=
status
&&
verify_interval
(
StackShadowPages
,
1
,
50
,
"StackShadowPages"
);
return
status
;
}
...
...
src/share/vm/runtime/arguments.hpp
浏览文件 @
044288ff
...
...
@@ -484,6 +484,9 @@ class Arguments : AllStatic {
// System properties
static
void
init_system_properties
();
// Update/Initialize System properties after JDK version number is known
static
void
init_version_specific_system_properties
();
// Property List manipulation
static
void
PropertyList_add
(
SystemProperty
**
plist
,
SystemProperty
*
element
);
static
void
PropertyList_add
(
SystemProperty
**
plist
,
const
char
*
k
,
char
*
v
);
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
044288ff
...
...
@@ -327,7 +327,7 @@ class CommandLineFlags {
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
* been re-added (see 6401008) */
\
product
(bool, UseMembar, false,
\
product
_pd(bool, UseMembar,
\
"(Unstable) Issues membars on thread state transitions") \
\
/* Temporary: See 6948537 */
\
...
...
@@ -822,6 +822,9 @@ class CommandLineFlags {
develop(bool, PrintJVMWarnings, false, \
"Prints warnings for unimplemented JVM functions") \
\
product(bool, PrintWarnings, true, \
"Prints JVM warnings to output stream") \
\
notproduct(uintx, WarnOnStalledSpinLock, 0, \
"Prints warnings for stalled SpinLocks") \
\
...
...
src/share/vm/runtime/sharedRuntime.cpp
浏览文件 @
044288ff
...
...
@@ -302,6 +302,9 @@ double SharedRuntime::dabs(double f) {
return
(
f
<=
(
double
)
0.0
)
?
(
double
)
0.0
-
f
:
f
;
}
#endif
#if defined(__SOFTFP__) || defined(PPC)
double
SharedRuntime
::
dsqrt
(
double
f
)
{
return
sqrt
(
f
);
}
...
...
src/share/vm/runtime/sharedRuntime.hpp
浏览文件 @
044288ff
...
...
@@ -116,6 +116,9 @@ class SharedRuntime: AllStatic {
#if defined(__SOFTFP__) || defined(E500V2)
static
double
dabs
(
double
f
);
#endif
#if defined(__SOFTFP__) || defined(PPC)
static
double
dsqrt
(
double
f
);
#endif
...
...
src/share/vm/runtime/thread.cpp
浏览文件 @
044288ff
...
...
@@ -2921,6 +2921,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// So that JDK version can be used as a discrimintor when parsing arguments
JDK_Version_init
();
// Update/Initialize System properties after JDK version number is known
Arguments
::
init_version_specific_system_properties
();
// Parse arguments
jint
parse_result
=
Arguments
::
parse
(
args
);
if
(
parse_result
!=
JNI_OK
)
return
parse_result
;
...
...
src/share/vm/utilities/debug.cpp
浏览文件 @
044288ff
...
...
@@ -51,6 +51,7 @@
void
warning
(
const
char
*
format
,
...)
{
if
(
PrintWarnings
)
{
// In case error happens before init or during shutdown
if
(
tty
==
NULL
)
ostream_init
();
...
...
@@ -59,6 +60,7 @@ void warning(const char* format, ...) {
va_start
(
ap
,
format
);
tty
->
vprint_cr
(
format
,
ap
);
va_end
(
ap
);
}
if
(
BreakAtWarning
)
BREAKPOINT
;
}
...
...
src/share/vm/utilities/exceptions.cpp
浏览文件 @
044288ff
...
...
@@ -61,6 +61,18 @@ bool Exceptions::special_exception(Thread* thread, const char* file, int line, H
ShouldNotReachHere
();
}
#ifdef ASSERT
// Check for trying to throw stack overflow before initialization is complete
// to prevent infinite recursion trying to initialize stack overflow without
// adequate stack space.
// This can happen with stress testing a large value of StackShadowPages
if
(
h_exception
()
->
klass
()
==
SystemDictionary
::
StackOverflowError_klass
())
{
instanceKlass
*
ik
=
instanceKlass
::
cast
(
h_exception
->
klass
());
assert
(
ik
->
is_initialized
(),
"need to increase min_stack_allowed calculation"
);
}
#endif // ASSERT
if
(
thread
->
is_VM_thread
()
||
thread
->
is_Compiler_thread
()
)
{
// We do not care what kind of exception we get for the vm-thread or a thread which
...
...
@@ -91,7 +103,6 @@ bool Exceptions::special_exception(Thread* thread, const char* file, int line, s
thread
->
set_pending_exception
(
Universe
::
vm_exception
(),
file
,
line
);
return
true
;
}
return
false
;
}
...
...
@@ -193,6 +204,7 @@ void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file
klassOop
k
=
SystemDictionary
::
StackOverflowError_klass
();
oop
e
=
instanceKlass
::
cast
(
k
)
->
allocate_instance
(
CHECK
);
exception
=
Handle
(
THREAD
,
e
);
// fill_in_stack trace does gc
assert
(
instanceKlass
::
cast
(
k
)
->
is_initialized
(),
"need to increase min_stack_allowed calculation"
);
if
(
StackTraceInThrowable
)
{
java_lang_Throwable
::
fill_in_stack_trace
(
exception
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录