Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
d0e622be
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d0e622be
编写于
9月 09, 2011
作者:
S
stefank
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
754447b7
3f82f1d9
变更
27
展开全部
隐藏空白更改
内联
并排
Showing
27 changed file
with
1291 addition
and
660 deletion
+1291
-660
src/os/linux/vm/os_linux.cpp
src/os/linux/vm/os_linux.cpp
+6
-2
src/os/windows/vm/os_windows.cpp
src/os/windows/vm/os_windows.cpp
+21
-12
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
+19
-2
src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
...hare/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
+1
-1
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+10
-6
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+73
-86
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+525
-342
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+55
-21
src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp
src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp
+65
-0
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
+197
-0
src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
+1
-5
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+4
-4
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+1
-1
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
..._implementation/parallelScavenge/parallelScavengeHeap.cpp
+0
-4
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+0
-4
src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
+1
-2
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+0
-2
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+0
-1
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+0
-1
src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
+0
-4
src/share/vm/memory/referenceProcessor.cpp
src/share/vm/memory/referenceProcessor.cpp
+172
-129
src/share/vm/memory/referenceProcessor.hpp
src/share/vm/memory/referenceProcessor.hpp
+10
-10
src/share/vm/memory/sharedHeap.cpp
src/share/vm/memory/sharedHeap.cpp
+0
-1
src/share/vm/oops/instanceRefKlass.cpp
src/share/vm/oops/instanceRefKlass.cpp
+115
-15
src/share/vm/prims/jvm.h
src/share/vm/prims/jvm.h
+2
-1
src/share/vm/runtime/java.cpp
src/share/vm/runtime/java.cpp
+2
-1
src/share/vm/runtime/java.hpp
src/share/vm/runtime/java.hpp
+11
-3
未找到文件。
src/os/linux/vm/os_linux.cpp
浏览文件 @
d0e622be
...
...
@@ -2531,10 +2531,14 @@ bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
}
return
true
;
}
return
false
;
// Fall through and try to use small pages
}
return
commit_memory
(
addr
,
size
,
exec
);
if
(
commit_memory
(
addr
,
size
,
exec
))
{
realign_memory
(
addr
,
size
,
alignment_hint
);
return
true
;
}
return
false
;
}
void
os
::
realign_memory
(
char
*
addr
,
size_t
bytes
,
size_t
alignment_hint
)
{
...
...
src/os/windows/vm/os_windows.cpp
浏览文件 @
d0e622be
...
...
@@ -2706,11 +2706,10 @@ static void cleanup_after_large_page_init() {
static
bool
numa_interleaving_init
()
{
bool
success
=
false
;
bool
use_numa_specified
=
!
FLAG_IS_DEFAULT
(
UseNUMA
);
bool
use_numa_interleaving_specified
=
!
FLAG_IS_DEFAULT
(
UseNUMAInterleaving
);
// print a warning if UseNUMA
or UseNUMA
Interleaving flag is specified on command line
bool
warn_on_failure
=
use_numa_specified
||
use_numa_interleaving_specified
;
// print a warning if UseNUMAInterleaving flag is specified on command line
bool
warn_on_failure
=
use_numa_interleaving_specified
;
# define WARN(msg) if (warn_on_failure) { warning(msg); }
// NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
...
...
@@ -2720,7 +2719,7 @@ static bool numa_interleaving_init() {
if
(
os
::
Kernel32Dll
::
NumaCallsAvailable
())
{
if
(
numa_node_list_holder
.
build
())
{
if
(
PrintMiscellaneous
&&
Verbose
)
{
tty
->
print
(
"NUMA UsedNodeCount=%d, namely "
,
os
::
numa_get_groups_num
());
tty
->
print
(
"NUMA UsedNodeCount=%d, namely "
,
numa_node_list_holder
.
get_count
());
for
(
int
i
=
0
;
i
<
numa_node_list_holder
.
get_count
();
i
++
)
{
tty
->
print
(
"%d "
,
numa_node_list_holder
.
get_node_list_entry
(
i
));
}
...
...
@@ -2734,7 +2733,6 @@ static bool numa_interleaving_init() {
WARN
(
"NUMA Interleaving is not supported by the operating system."
);
}
if
(
!
success
)
{
if
(
use_numa_specified
)
WARN
(
"...Ignoring UseNUMA flag."
);
if
(
use_numa_interleaving_specified
)
WARN
(
"...Ignoring UseNUMAInterleaving flag."
);
}
return
success
;
...
...
@@ -2816,7 +2814,8 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
prot
);
}
else
{
// get the next node to use from the used_node_list
DWORD
node
=
numa_node_list_holder
.
get_node_list_entry
(
count
%
os
::
numa_get_groups_num
());
assert
(
numa_node_list_holder
.
get_count
()
>
0
,
"Multiple NUMA nodes expected"
);
DWORD
node
=
numa_node_list_holder
.
get_node_list_entry
(
count
%
numa_node_list_holder
.
get_count
());
p_new
=
(
char
*
)
os
::
Kernel32Dll
::
VirtualAllocExNuma
(
hProc
,
next_alloc_addr
,
bytes_to_rq
,
...
...
@@ -3132,15 +3131,21 @@ void os::free_memory(char *addr, size_t bytes) { }
void
os
::
numa_make_global
(
char
*
addr
,
size_t
bytes
)
{
}
void
os
::
numa_make_local
(
char
*
addr
,
size_t
bytes
,
int
lgrp_hint
)
{
}
bool
os
::
numa_topology_changed
()
{
return
false
;
}
size_t
os
::
numa_get_groups_num
()
{
return
numa_node_list_holder
.
get_count
(
);
}
size_t
os
::
numa_get_groups_num
()
{
return
MAX2
(
numa_node_list_holder
.
get_count
(),
1
);
}
int
os
::
numa_get_group_id
()
{
return
0
;
}
size_t
os
::
numa_get_leaf_groups
(
int
*
ids
,
size_t
size
)
{
// check for size bigger than actual groups_num
size
=
MIN2
(
size
,
numa_get_groups_num
());
for
(
int
i
=
0
;
i
<
(
int
)
size
;
i
++
)
{
ids
[
i
]
=
numa_node_list_holder
.
get_node_list_entry
(
i
);
if
(
numa_node_list_holder
.
get_count
()
==
0
&&
size
>
0
)
{
// Provide an answer for UMA systems
ids
[
0
]
=
0
;
return
1
;
}
else
{
// check for size bigger than actual groups_num
size
=
MIN2
(
size
,
numa_get_groups_num
());
for
(
int
i
=
0
;
i
<
(
int
)
size
;
i
++
)
{
ids
[
i
]
=
numa_node_list_holder
.
get_node_list_entry
(
i
);
}
return
size
;
}
return
size
;
}
bool
os
::
get_page_info
(
char
*
start
,
page_info
*
info
)
{
...
...
@@ -3768,6 +3773,10 @@ jint os::init_2(void) {
// initialize thread priority policy
prio_init
();
if
(
UseNUMA
&&
!
ForceNUMA
)
{
UseNUMA
=
false
;
// We don't fully support this yet
}
if
(
UseNUMAInterleaving
)
{
// first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
bool
success
=
numa_interleaving_init
();
...
...
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
浏览文件 @
d0e622be
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -26,6 +26,7 @@
#include "gc_implementation/g1/collectionSetChooser.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "memory/space.inline.hpp"
CSetChooserCache
::
CSetChooserCache
()
{
...
...
@@ -358,6 +359,9 @@ CollectionSetChooser::getNextMarkedRegion(double time_remaining,
if
(
_cache
.
is_empty
())
{
assert
(
_curMarkedIndex
==
_numMarkedRegions
,
"if cache is empty, list should also be empty"
);
ergo_verbose0
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"cache is empty"
));
return
NULL
;
}
...
...
@@ -368,10 +372,23 @@ CollectionSetChooser::getNextMarkedRegion(double time_remaining,
if
(
g1p
->
adaptive_young_list_length
())
{
if
(
time_remaining
-
predicted_time
<
0.0
)
{
g1h
->
check_if_region_is_too_expensive
(
predicted_time
);
ergo_verbose2
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"predicted old region time higher than remaining time"
)
ergo_format_ms
(
"predicted old region time"
)
ergo_format_ms
(
"remaining time"
),
predicted_time
,
time_remaining
);
return
NULL
;
}
}
else
{
if
(
predicted_time
>
2.0
*
avg_prediction
)
{
double
threshold
=
2.0
*
avg_prediction
;
if
(
predicted_time
>
threshold
)
{
ergo_verbose2
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"predicted old region time higher than threshold"
)
ergo_format_ms
(
"predicted old region time"
)
ergo_format_ms
(
"threshold"
),
predicted_time
,
threshold
);
return
NULL
;
}
}
...
...
src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
浏览文件 @
d0e622be
...
...
@@ -91,7 +91,7 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
}
}
g1p
->
check_prediction_validit
y
();
g1p
->
revise_young_list_target_length_if_necessar
y
();
}
}
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
d0e622be
...
...
@@ -28,6 +28,7 @@
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
...
...
@@ -1727,18 +1728,21 @@ void ConcurrentMark::cleanup() {
size_t
known_garbage_bytes
=
g1_par_count_task
.
used_bytes
()
-
g1_par_count_task
.
live_bytes
();
#if 0
gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf",
(double) g1_par_count_task.used_bytes() / (double) (1024 * 1024),
(double) g1_par_count_task.live_bytes() / (double) (1024 * 1024),
(double) known_garbage_bytes / (double) (1024 * 1024));
#endif // 0
g1p
->
set_known_garbage_bytes
(
known_garbage_bytes
);
size_t
start_used_bytes
=
g1h
->
used
();
_at_least_one_mark_complete
=
true
;
g1h
->
set_marking_complete
();
ergo_verbose4
(
ErgoConcCycles
,
"finish cleanup"
,
ergo_format_byte
(
"occupancy"
)
ergo_format_byte
(
"capacity"
)
ergo_format_byte_perc
(
"known garbage"
),
start_used_bytes
,
g1h
->
capacity
(),
known_garbage_bytes
,
((
double
)
known_garbage_bytes
/
(
double
)
g1h
->
capacity
())
*
100.0
);
double
count_end
=
os
::
elapsedTime
();
double
this_final_counting_time
=
(
count_end
-
start
);
if
(
G1PrintParCleanupStats
)
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
d0e622be
...
...
@@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
...
...
@@ -577,6 +578,11 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
res
=
new_region_try_secondary_free_list
();
}
if
(
res
==
NULL
&&
do_expand
)
{
ergo_verbose1
(
ErgoHeapSizing
,
"attempt heap expansion"
,
ergo_format_reason
(
"region allocation request failed"
)
ergo_format_byte
(
"allocation request"
),
word_size
*
HeapWordSize
);
if
(
expand
(
word_size
*
HeapWordSize
))
{
// Even though the heap was expanded, it might not have reached
// the desired size. So, we cannot assume that the allocation
...
...
@@ -790,6 +796,11 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
// room available.
assert
(
num_regions
>
fs
,
"earlier allocation should have succeeded"
);
ergo_verbose1
(
ErgoHeapSizing
,
"attempt heap expansion"
,
ergo_format_reason
(
"humongous allocation request failed"
)
ergo_format_byte
(
"allocation request"
),
word_size
*
HeapWordSize
);
if
(
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
))
{
// Even though the heap was expanded, it might not have
// reached the desired size. So, we cannot assume that the
...
...
@@ -906,6 +917,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
if
(
GC_locker
::
is_active_and_needs_gc
())
{
if
(
g1_policy
()
->
can_expand_young_list
())
{
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
result
=
_mutator_alloc_region
.
attempt_allocation_force
(
word_size
,
false
/* bot_updates */
);
if
(
result
!=
NULL
)
{
...
...
@@ -1477,63 +1490,34 @@ resize_if_necessary_after_full_collection(size_t word_size) {
// we'll try to make the capacity smaller than it, not greater).
maximum_desired_capacity
=
MAX2
(
maximum_desired_capacity
,
min_heap_size
);
if
(
PrintGC
&&
Verbose
)
{
const
double
free_percentage
=
(
double
)
free_after_gc
/
(
double
)
capacity_after_gc
;
gclog_or_tty
->
print_cr
(
"Computing new size after full GC "
);
gclog_or_tty
->
print_cr
(
" "
" minimum_free_percentage: %6.2f"
,
minimum_free_percentage
);
gclog_or_tty
->
print_cr
(
" "
" maximum_free_percentage: %6.2f"
,
maximum_free_percentage
);
gclog_or_tty
->
print_cr
(
" "
" capacity: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK"
,
(
double
)
capacity_after_gc
/
(
double
)
K
,
(
double
)
minimum_desired_capacity
/
(
double
)
K
,
(
double
)
maximum_desired_capacity
/
(
double
)
K
);
gclog_or_tty
->
print_cr
(
" "
" free_after_gc: %6.1fK"
" used_after_gc: %6.1fK"
,
(
double
)
free_after_gc
/
(
double
)
K
,
(
double
)
used_after_gc
/
(
double
)
K
);
gclog_or_tty
->
print_cr
(
" "
" free_percentage: %6.2f"
,
free_percentage
);
}
if
(
capacity_after_gc
<
minimum_desired_capacity
)
{
// Don't expand unless it's significant
size_t
expand_bytes
=
minimum_desired_capacity
-
capacity_after_gc
;
if
(
expand
(
expand_bytes
))
{
if
(
PrintGC
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" "
" expanding:"
" max_heap_size: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
,
(
double
)
max_heap_size
/
(
double
)
K
,
(
double
)
minimum_desired_capacity
/
(
double
)
K
,
(
double
)
expand_bytes
/
(
double
)
K
);
}
}
ergo_verbose4
(
ErgoHeapSizing
,
"attempt heap expansion"
,
ergo_format_reason
(
"capacity lower than "
"min desired capacity after Full GC"
)
ergo_format_byte
(
"capacity"
)
ergo_format_byte
(
"occupancy"
)
ergo_format_byte_perc
(
"min desired capacity"
),
capacity_after_gc
,
used_after_gc
,
minimum_desired_capacity
,
(
double
)
MinHeapFreeRatio
);
expand
(
expand_bytes
);
// No expansion, now see if we want to shrink
}
else
if
(
capacity_after_gc
>
maximum_desired_capacity
)
{
// Capacity too large, compute shrinking size
size_t
shrink_bytes
=
capacity_after_gc
-
maximum_desired_capacity
;
ergo_verbose4
(
ErgoHeapSizing
,
"attempt heap shrinking"
,
ergo_format_reason
(
"capacity higher than "
"max desired capacity after Full GC"
)
ergo_format_byte
(
"capacity"
)
ergo_format_byte
(
"occupancy"
)
ergo_format_byte_perc
(
"max desired capacity"
),
capacity_after_gc
,
used_after_gc
,
maximum_desired_capacity
,
(
double
)
MaxHeapFreeRatio
);
shrink
(
shrink_bytes
);
if
(
PrintGC
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" "
" shrinking:"
" min_heap_size: %6.1fK"
" maximum_desired_capacity: %6.1fK"
" shrink_bytes: %6.1fK"
,
(
double
)
min_heap_size
/
(
double
)
K
,
(
double
)
maximum_desired_capacity
/
(
double
)
K
,
(
double
)
shrink_bytes
/
(
double
)
K
);
}
}
}
...
...
@@ -1619,6 +1603,11 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
verify_region_sets_optional
();
size_t
expand_bytes
=
MAX2
(
word_size
*
HeapWordSize
,
MinHeapDeltaBytes
);
ergo_verbose1
(
ErgoHeapSizing
,
"attempt heap expansion"
,
ergo_format_reason
(
"allocation request failed"
)
ergo_format_byte
(
"allocation request"
),
word_size
*
HeapWordSize
);
if
(
expand
(
expand_bytes
))
{
_hrs
.
verify_optional
();
verify_region_sets_optional
();
...
...
@@ -1646,11 +1635,11 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
size_t
aligned_expand_bytes
=
ReservedSpace
::
page_align_size_up
(
expand_bytes
);
aligned_expand_bytes
=
align_size_up
(
aligned_expand_bytes
,
HeapRegion
::
GrainBytes
);
if
(
Verbose
&&
PrintGC
)
{
gclog_or_tty
->
print
(
"Expanding garbage-first heap from %ldK by %ldK"
,
old_mem_size
/
K
,
aligned_expand_bytes
/
K
);
}
ergo_verbose2
(
ErgoHeapSizing
,
"expand the heap"
,
ergo_format_byte
(
"requested expansion amount"
)
ergo_format_byte
(
"attempted expansion amount"
),
expand_bytes
,
aligned_expand_bytes
);
// First commit the memory.
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
...
...
@@ -1693,7 +1682,11 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
}
assert
(
curr
==
mr
.
end
(),
"post-condition"
);
}
g1_policy
()
->
record_new_heap_size
(
n_regions
());
}
else
{
ergo_verbose0
(
ErgoHeapSizing
,
"did not expand the heap"
,
ergo_format_reason
(
"heap expansion operation failed"
));
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if
(
G1ExitOnExpansionFailure
&&
...
...
@@ -1702,13 +1695,6 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
vm_exit_out_of_memory
(
aligned_expand_bytes
,
"G1 heap expansion"
);
}
}
if
(
Verbose
&&
PrintGC
)
{
size_t
new_mem_size
=
_g1_storage
.
committed_size
();
gclog_or_tty
->
print_cr
(
"...%s, expanded to %ldK"
,
(
successful
?
"Successful"
:
"Failed"
),
new_mem_size
/
K
);
}
return
successful
;
}
...
...
@@ -1722,6 +1708,13 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
MemRegion
mr
=
_hrs
.
shrink_by
(
aligned_shrink_bytes
,
&
num_regions_deleted
);
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
assert
(
mr
.
end
()
==
old_end
,
"post-condition"
);
ergo_verbose3
(
ErgoHeapSizing
,
"shrink the heap"
,
ergo_format_byte
(
"requested shrinking amount"
)
ergo_format_byte
(
"aligned shrinking amount"
)
ergo_format_byte
(
"attempted shrinking amount"
),
shrink_bytes
,
aligned_shrink_bytes
,
mr
.
byte_size
());
if
(
mr
.
byte_size
()
>
0
)
{
if
(
_hr_printer
.
is_active
())
{
HeapWord
*
curr
=
mr
.
end
();
...
...
@@ -1740,13 +1733,11 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
_expansion_regions
+=
num_regions_deleted
;
update_committed_space
(
old_end
,
new_end
);
HeapRegionRemSet
::
shrink_heap
(
n_regions
());
if
(
Verbose
&&
PrintGC
)
{
size_t
new_mem_size
=
_g1_storage
.
committed_size
();
gclog_or_tty
->
print_cr
(
"Shrinking garbage-first heap from %ldK by %ldK to %ldK"
,
old_mem_size
/
K
,
aligned_shrink_bytes
/
K
,
new_mem_size
/
K
);
}
g1_policy
()
->
record_new_heap_size
(
n_regions
());
}
else
{
ergo_verbose0
(
ErgoHeapSizing
,
"did not shrink the heap"
,
ergo_format_reason
(
"heap shrinking operation failed"
));
}
}
...
...
@@ -3534,6 +3525,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
init_mutator_alloc_region
();
{
size_t
expand_bytes
=
g1_policy
()
->
expansion_amount
();
if
(
expand_bytes
>
0
)
{
size_t
bytes_before
=
capacity
();
if
(
!
expand
(
expand_bytes
))
{
// We failed to expand the heap so let's verify that
// committed/uncommitted amount match the backing store
assert
(
capacity
()
==
_g1_storage
.
committed_size
(),
"committed size mismatch"
);
assert
(
max_capacity
()
==
_g1_storage
.
reserved_size
(),
"reserved size mismatch"
);
}
}
}
double
end_time_sec
=
os
::
elapsedTime
();
double
pause_time_ms
=
(
end_time_sec
-
start_time_sec
)
*
MILLIUNITS
;
g1_policy
()
->
record_pause_time_ms
(
pause_time_ms
);
...
...
@@ -3579,6 +3583,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
size_t
expand_bytes
=
g1_policy
()
->
expansion_amount
();
if
(
expand_bytes
>
0
)
{
size_t
bytes_before
=
capacity
();
// No need for an ergo verbose message here,
// expansion_amount() does this when it returns a value > 0.
if
(
!
expand
(
expand_bytes
))
{
// We failed to expand the heap so let's verify that
// committed/uncommitted amount match the backing store
...
...
@@ -3732,13 +3738,6 @@ public:
bool
do_object_b
(
oop
p
)
{
// It is reachable if it is outside the collection set, or is inside
// and forwarded.
#ifdef G1_DEBUG
gclog_or_tty
->
print_cr
(
"is alive "
PTR_FORMAT
" in CS %d forwarded %d overall %d"
,
(
void
*
)
p
,
_g1
->
obj_in_cs
(
p
),
p
->
is_forwarded
(),
!
_g1
->
obj_in_cs
(
p
)
||
p
->
is_forwarded
());
#endif // G1_DEBUG
return
!
_g1
->
obj_in_cs
(
p
)
||
p
->
is_forwarded
();
}
};
...
...
@@ -3750,20 +3749,9 @@ public:
void
do_oop
(
narrowOop
*
p
)
{
guarantee
(
false
,
"Not needed"
);
}
void
do_oop
(
oop
*
p
)
{
oop
obj
=
*
p
;
#ifdef G1_DEBUG
if
(
PrintGC
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"keep alive *"
PTR_FORMAT
" = "
PTR_FORMAT
" "
PTR_FORMAT
,
p
,
(
void
*
)
obj
,
(
void
*
)
*
p
);
}
#endif // G1_DEBUG
if
(
_g1
->
obj_in_cs
(
obj
))
{
assert
(
obj
->
is_forwarded
(),
"invariant"
);
*
p
=
obj
->
forwardee
();
#ifdef G1_DEBUG
gclog_or_tty
->
print_cr
(
" in CSet: moved "
PTR_FORMAT
" -> "
PTR_FORMAT
,
(
void
*
)
obj
,
(
void
*
)
*
p
);
#endif // G1_DEBUG
}
}
};
...
...
@@ -4613,7 +4601,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
// keep entries (which are added by the marking threads) on them
// live until they can be processed at the end of marking.
ref_processor
()
->
weak_oops_do
(
&
buf_scan_non_heap_roots
);
ref_processor
()
->
oops_do
(
&
buf_scan_non_heap_roots
);
}
// Finish up any enqueued closure apps (attributed as object copy time).
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
d0e622be
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
d0e622be
...
...
@@ -183,9 +183,9 @@ protected:
// if true, then it tries to dynamically adjust the length of the
// young list
bool
_adaptive_young_list_length
;
size_t
_young_list_min_length
;
size_t
_young_list_target_length
;
size_t
_young_list_fixed_length
;
size_t
_prev_eden_capacity
;
// used for logging
// The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length;
...
...
@@ -207,6 +207,9 @@ protected:
double
_gc_overhead_perc
;
double
_reserve_factor
;
size_t
_reserve_regions
;
bool
during_marking
()
{
return
_during_marking
;
}
...
...
@@ -243,6 +246,10 @@ private:
TruncatedSeq
*
_max_conc_overhead_seq
;
bool
_using_new_ratio_calculations
;
size_t
_min_desired_young_length
;
// as set on the command line or default calculations
size_t
_max_desired_young_length
;
// as set on the command line or default calculations
size_t
_recorded_young_regions
;
size_t
_recorded_non_young_regions
;
size_t
_recorded_region_num
;
...
...
@@ -456,12 +463,6 @@ public:
size_t
predict_bytes_to_copy
(
HeapRegion
*
hr
);
double
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
young
);
// for use by: calculate_young_list_target_length(rs_length)
bool
predict_will_fit
(
size_t
young_region_num
,
double
base_time_ms
,
size_t
init_free_regions
,
double
target_pause_time_ms
);
void
start_recording_regions
();
void
record_cset_region_info
(
HeapRegion
*
hr
,
bool
young
);
void
record_non_young_cset_region
(
HeapRegion
*
hr
);
...
...
@@ -493,7 +494,6 @@ public:
// </NEW PREDICTION>
public:
void
cset_regions_freed
()
{
bool
propagate
=
_last_young_gc_full
&&
!
_in_marking_window
;
_short_lived_surv_rate_group
->
all_surviving_words_recorded
(
propagate
);
...
...
@@ -772,9 +772,41 @@ protected:
double
_mark_cleanup_start_sec
;
double
_mark_closure_time_ms
;
void
calculate_young_list_min_length
();
void
calculate_young_list_target_length
();
void
calculate_young_list_target_length
(
size_t
rs_lengths
);
// Update the young list target length either by setting it to the
// desired fixed value or by calculating it using G1's pause
// prediction model. If no rs_lengths parameter is passed, predict
// the RS lengths using the prediction model, otherwise use the
// given rs_lengths as the prediction.
void
update_young_list_target_length
(
size_t
rs_lengths
=
(
size_t
)
-
1
);
// Calculate and return the minimum desired young list target
// length. This is the minimum desired young list length according
// to the user's inputs.
size_t
calculate_young_list_desired_min_length
(
size_t
base_min_length
);
// Calculate and return the maximum desired young list target
// length. This is the maximum desired young list length according
// to the user's inputs.
size_t
calculate_young_list_desired_max_length
();
// Calculate and return the maximum young list target length that
// can fit into the pause time goal. The parameters are: rs_lengths
// represent the prediction of how large the young RSet lengths will
// be, base_min_length is the alreay existing number of regions in
// the young list, min_length and max_length are the desired min and
// max young list length according to the user's inputs.
size_t
calculate_young_list_target_length
(
size_t
rs_lengths
,
size_t
base_min_length
,
size_t
desired_min_length
,
size_t
desired_max_length
);
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the
// given free space (expressed by base_free_regions). It is used by
// calculate_young_list_target_length().
bool
predict_will_fit
(
size_t
young_length
,
double
base_time_ms
,
size_t
base_free_regions
,
double
target_pause_time_ms
);
public:
...
...
@@ -786,7 +818,10 @@ public:
return
CollectorPolicy
::
G1CollectorPolicyKind
;
}
void
check_prediction_validity
();
// Check the current value of the young list RSet lengths and
// compare it against the last prediction. If the current value is
// higher, recalculate the young list target length prediction.
void
revise_young_list_target_length_if_necessary
();
size_t
bytes_in_collection_set
()
{
return
_bytes_in_collection_set_before_gc
;
...
...
@@ -796,6 +831,9 @@ public:
return
_all_pause_times_ms
->
num
()
+
1
;
}
// This should be called after the heap is resized.
void
record_new_heap_size
(
size_t
new_number_of_regions
);
protected:
// Count the number of bytes used in the CS.
...
...
@@ -807,6 +845,8 @@ protected:
size_t
max_live_bytes
);
void
record_concurrent_mark_cleanup_end_work2
();
void
update_young_list_size_using_newratio
(
size_t
number_of_heap_regions
);
public:
virtual
void
init
();
...
...
@@ -1045,7 +1085,7 @@ public:
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
// progress or not is stable.
bool
force_initial_mark_if_outside_cycle
();
bool
force_initial_mark_if_outside_cycle
(
GCCause
::
Cause
gc_cause
);
// This is called at the very beginning of an evacuation pause (it
// has to be the first thing that the pause does). If
...
...
@@ -1204,10 +1244,10 @@ public:
_survivors_age_table
.
merge_par
(
age_table
);
}
void
calcul
ate_max_gc_locker_expansion
();
void
upd
ate_max_gc_locker_expansion
();
// Calculates survivor space parameters.
void
calcul
ate_survivors_policy
();
void
upd
ate_survivors_policy
();
};
...
...
@@ -1234,8 +1274,6 @@ public:
class
G1CollectorPolicy_BestRegionsFirst
:
public
G1CollectorPolicy
{
CollectionSetChooser
*
_collectionSetChooser
;
// If the estimated is less then desirable, resize if possible.
void
expand_if_possible
(
size_t
numRegions
);
virtual
void
choose_collection_set
(
double
target_pause_time_ms
);
virtual
void
record_collection_pause_start
(
double
start_time_sec
,
...
...
@@ -1269,8 +1307,4 @@ inline double variance(int n, double sum_of_squares, double sum) {
return
(
sum_of_squares
-
2.0
*
avg
*
sum
+
n_d
*
avg
*
avg
)
/
n_d
;
}
// Local Variables: ***
// c-indentation-style: gnu ***
// End: ***
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp
0 → 100644
浏览文件 @
d0e622be
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "utilities/ostream.hpp"
ErgoLevel
G1ErgoVerbose
::
_level
;
bool
G1ErgoVerbose
::
_enabled
[
ErgoHeuristicNum
];
void
G1ErgoVerbose
::
initialize
()
{
set_level
(
ErgoLow
);
set_enabled
(
false
);
}
void
G1ErgoVerbose
::
set_level
(
ErgoLevel
level
)
{
_level
=
level
;
}
void
G1ErgoVerbose
::
set_enabled
(
ErgoHeuristic
n
,
bool
enabled
)
{
assert
(
0
<=
n
&&
n
<
ErgoHeuristicNum
,
"pre-condition"
);
_enabled
[
n
]
=
enabled
;
}
void
G1ErgoVerbose
::
set_enabled
(
bool
enabled
)
{
for
(
int
n
=
0
;
n
<
ErgoHeuristicNum
;
n
+=
1
)
{
set_enabled
((
ErgoHeuristic
)
n
,
enabled
);
}
}
const
char
*
G1ErgoVerbose
::
to_string
(
int
tag
)
{
ErgoHeuristic
n
=
extract_heuristic
(
tag
);
switch
(
n
)
{
case
ErgoHeapSizing
:
return
"Heap Sizing"
;
case
ErgoCSetConstruction
:
return
"CSet Construction"
;
case
ErgoConcCycles
:
return
"Concurrent Cycles"
;
case
ErgoPartiallyYoungGCs
:
return
"Partially-Young GCs"
;
default:
ShouldNotReachHere
();
// Keep the Windows compiler happy
return
NULL
;
}
}
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
0 → 100644
浏览文件 @
d0e622be
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
// The log of G1's heuristic decisions comprises of a series of
// records which have a similar format in order to maintain
// consistency across records and ultimately easier parsing of the
// output, if we ever choose to do that. Each record consists of:
// * A time stamp to be able to easily correlate each record with
// other events.
// * A unique string to allow us to easily identify such records.
// * The name of the heuristic the record corresponds to.
// * An action string which describes the action that G1 did or is
// about to do.
// * An optional reason string which describes the reason for the
// action.
// * An optional number of name/value pairs which contributed to the
// decision to take the action described in the record.
//
// Each record is associated with a "tag" which is the combination of
// the heuristic the record corresponds to, as well as the min level
// of verboseness at which the record should be printed. The tag is
// checked against the current settings to determine whether the record
// should be printed or not.
// The available verboseness levels.
typedef
enum
{
// Determine which part of the tag is occupied by the level.
ErgoLevelShift
=
8
,
ErgoLevelMask
=
~
((
1
<<
ErgoLevelShift
)
-
1
),
// ErgoLow is 0 so that we don't have to explicitly or a heuristic
// id with ErgoLow to keep its use simpler.
ErgoLow
=
0
,
ErgoHigh
=
1
<<
ErgoLevelShift
,
}
ErgoLevel
;
// The available heuristics.
typedef
enum
{
// Determines which part of the tag is occupied by the heuristic id.
ErgoHeuristicMask
=
~
ErgoLevelMask
,
ErgoHeapSizing
=
0
,
ErgoCSetConstruction
,
ErgoConcCycles
,
ErgoPartiallyYoungGCs
,
ErgoHeuristicNum
}
ErgoHeuristic
;
class
G1ErgoVerbose
:
AllStatic
{
private:
// Determines the minimum verboseness level at which records will be
// printed.
static
ErgoLevel
_level
;
// Determines which heuristics are currently enabled.
static
bool
_enabled
[
ErgoHeuristicNum
];
static
ErgoLevel
extract_level
(
int
tag
)
{
return
(
ErgoLevel
)
(
tag
&
ErgoLevelMask
);
}
static
ErgoHeuristic
extract_heuristic
(
int
tag
)
{
return
(
ErgoHeuristic
)
(
tag
&
ErgoHeuristicMask
);
}
public:
// Needs to be explicitly called at GC initialization.
static
void
initialize
();
static
void
set_level
(
ErgoLevel
level
);
static
void
set_enabled
(
ErgoHeuristic
h
,
bool
enabled
);
// It is applied to all heuristics.
static
void
set_enabled
(
bool
enabled
);
static
bool
enabled
(
int
tag
)
{
ErgoLevel
level
=
extract_level
(
tag
);
ErgoHeuristic
n
=
extract_heuristic
(
tag
);
return
level
<=
_level
&&
_enabled
[
n
];
}
// Extract the heuristic id from the tag and return a string with
// its name.
static
const
char
*
to_string
(
int
tag
);
};
// The macros below generate the format string for values of different
// types and/or metrics.
// The reason for the action is optional and is handled specially: the
// reason string is concatenated here so it's not necessary to pass it
// as a parameter.
#define ergo_format_reason(_reason_) ", reason: " _reason_
// Single parameter format strings
#define ergo_format_str(_name_) ", " _name_ ": %s"
#define ergo_format_region(_name_) ", " _name_ ": "SIZE_FORMAT" regions"
#define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes"
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
#define ergo_format_ms(_name_) ", " _name_ ": %1.2f ms"
// Double parameter format strings
#define ergo_format_byte_perc(_name_) \
", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
// Generates the format string
#define ergo_format(_action_, _extra_format_) \
" %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]"
// Conditionally, prints an ergonomic decision record. _extra_format_
// is the format string for the optional items we'd like to print
// (i.e., the decision's reason and any associated values). This
// string should be built up using the ergo_*_format macros (see
// above) to ensure consistency.
//
// Since we cannot rely on the compiler supporting variable argument
// macros, this macro accepts a fixed number of arguments and passes
// them to the print method. For convenience, we have wrapper macros
// below which take a specific number of arguments and set the rest to
// a default value.
#define ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
do { \
if (G1ErgoVerbose::enabled((_tag_))) { \
gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_), \
os::elapsedTime(), \
G1ErgoVerbose::to_string((_tag_)), \
(_arg0_), (_arg1_), (_arg2_), \
(_arg3_), (_arg4_), (_arg5_)); \
} \
} while (0)
#define ergo_verbose(_tag_, _action_) \
ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
#define ergo_verbose0(_tag_, _action_, _extra_format_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, 0, 0, 0, 0, 0, 0)
#define ergo_verbose1(_tag_, _action_, _extra_format_, \
_arg0_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, 0, 0, 0, 0, 0)
#define ergo_verbose2(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, 0, 0, 0, 0)
#define ergo_verbose3(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, 0, 0, 0)
#define ergo_verbose4(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, 0, 0)
#define ergo_verbose5(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, 0)
#define ergo_verbose6(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
浏览文件 @
d0e622be
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -97,10 +97,6 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
// or performance (we are GC'ing most of the time anyway!),
// simply overwrite the oldest entry in the tracker.
if
(
G1PolicyVerbose
>
1
)
{
warning
(
"MMU Tracker Queue overflow. Replacing earliest entry."
);
}
_head_index
=
trim_index
(
_head_index
+
1
);
assert
(
_head_index
==
_tail_index
,
"Because we have a full circular buffer"
);
_tail_index
=
trim_index
(
_tail_index
+
1
);
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
d0e622be
...
...
@@ -134,9 +134,9 @@
develop(bool, G1RSCountHisto, false, \
"If true, print a histogram of RS occupancies after each pause") \
\
product(bool, G1PrintRegionLivenessInfo, false,
\
"Prints the liveness information for all regions in the heap "
\
"at the end of a marking cycle.")
\
diagnostic(bool, G1PrintRegionLivenessInfo, false,
\
"Prints the liveness information for all regions in the heap "
\
"at the end of a marking cycle.")
\
\
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
...
...
@@ -228,7 +228,7 @@
"the number of regions for which we'll print a surv rate " \
"summary.") \
\
product(
intx, G1ReservePercent, 10,
\
product(
uintx, G1ReservePercent, 10,
\
"It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \
\
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
d0e622be
...
...
@@ -98,7 +98,7 @@ void VM_G1IncCollectionPause::doit() {
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool
res
=
g1h
->
g1_policy
()
->
force_initial_mark_if_outside_cycle
();
bool
res
=
g1h
->
g1_policy
()
->
force_initial_mark_if_outside_cycle
(
_gc_cause
);
// The above routine returns true if we were able to force the
// next GC pause to be an initial mark; it returns false if a
...
...
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
浏览文件 @
d0e622be
...
...
@@ -909,10 +909,6 @@ void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption op
}
young_gen
()
->
verify
(
allow_dirty
);
}
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"ref_proc "
);
}
ReferenceProcessor
::
verify
();
}
void
ParallelScavengeHeap
::
print_heap_change
(
size_t
prev_used
)
{
...
...
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
浏览文件 @
d0e622be
...
...
@@ -80,10 +80,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
Universe
::
oops_do
(
&
mark_and_push_closure
);
break
;
case
reference_processing
:
ReferenceProcessor
::
oops_do
(
&
mark_and_push_closure
);
break
;
case
jni_handles
:
JNIHandles
::
oops_do
(
&
mark_and_push_closure
);
break
;
...
...
src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
浏览文件 @
d0e622be
...
...
@@ -98,8 +98,7 @@ class MarkFromRootsTask : public GCTask {
management
=
6
,
jvmti
=
7
,
system_dictionary
=
8
,
reference_processing
=
9
,
code_cache
=
10
code_cache
=
9
};
private:
RootType
_root_type
;
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
d0e622be
...
...
@@ -516,7 +516,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
{
ParallelScavengeHeap
::
ParStrongRootsScope
psrs
;
Universe
::
oops_do
(
mark_and_push_closure
());
ReferenceProcessor
::
oops_do
(
mark_and_push_closure
());
JNIHandles
::
oops_do
(
mark_and_push_closure
());
// Global (strong) JNI handles
CodeBlobToOopClosure
each_active_code_blob
(
mark_and_push_closure
(),
/*do_marking=*/
true
);
Threads
::
oops_do
(
mark_and_push_closure
(),
&
each_active_code_blob
);
...
...
@@ -623,7 +622,6 @@ void PSMarkSweep::mark_sweep_phase3() {
// General strong roots.
Universe
::
oops_do
(
adjust_root_pointer_closure
());
ReferenceProcessor
::
oops_do
(
adjust_root_pointer_closure
());
JNIHandles
::
oops_do
(
adjust_root_pointer_closure
());
// Global (strong) JNI handles
Threads
::
oops_do
(
adjust_root_pointer_closure
(),
NULL
);
ObjectSynchronizer
::
oops_do
(
adjust_root_pointer_closure
());
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
d0e622be
...
...
@@ -2445,7 +2445,6 @@ void PSParallelCompact::adjust_roots() {
// General strong roots.
Universe
::
oops_do
(
adjust_root_pointer_closure
());
ReferenceProcessor
::
oops_do
(
adjust_root_pointer_closure
());
JNIHandles
::
oops_do
(
adjust_root_pointer_closure
());
// Global (strong) JNI handles
Threads
::
oops_do
(
adjust_root_pointer_closure
(),
NULL
);
ObjectSynchronizer
::
oops_do
(
adjust_root_pointer_closure
());
...
...
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
浏览文件 @
d0e622be
...
...
@@ -55,7 +55,6 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
switch
(
_root_type
)
{
case
universe
:
Universe
::
oops_do
(
&
roots_closure
);
ReferenceProcessor
::
oops_do
(
&
roots_closure
);
break
;
case
jni_handles
:
...
...
src/share/vm/memory/genCollectedHeap.cpp
浏览文件 @
d0e622be
...
...
@@ -1269,10 +1269,6 @@ void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option
gclog_or_tty
->
print
(
"remset "
);
}
rem_set
()
->
verify
();
if
(
!
silent
)
{
gclog_or_tty
->
print
(
"ref_proc "
);
}
ReferenceProcessor
::
verify
();
}
void
GenCollectedHeap
::
print
()
const
{
print_on
(
tty
);
}
...
...
src/share/vm/memory/referenceProcessor.cpp
浏览文件 @
d0e622be
此差异已折叠。
点击以展开。
src/share/vm/memory/referenceProcessor.hpp
浏览文件 @
d0e622be
...
...
@@ -52,8 +52,8 @@ class DiscoveredList;
class
ReferenceProcessor
:
public
CHeapObj
{
protected:
//
End of list marker
static
oop
_sentinelRef
;
//
Compatibility with pre-4965777 JDK's
static
bool
_pending_list_uses_discovered_field
;
MemRegion
_span
;
// (right-open) interval of heap
// subject to wkref discovery
bool
_discovering_refs
;
// true when discovery enabled
...
...
@@ -106,8 +106,6 @@ class ReferenceProcessor : public CHeapObj {
int
max_num_q
()
{
return
_max_num_q
;
}
void
set_active_mt_degree
(
int
v
)
{
_num_q
=
v
;
}
DiscoveredList
*
discovered_soft_refs
()
{
return
_discoveredSoftRefs
;
}
static
oop
sentinel_ref
()
{
return
_sentinelRef
;
}
static
oop
*
adr_sentinel_ref
()
{
return
&
_sentinelRef
;
}
ReferencePolicy
*
setup_policy
(
bool
always_clear
)
{
_current_soft_ref_policy
=
always_clear
?
_always_clear_soft_ref_policy
:
_default_soft_ref_policy
;
...
...
@@ -115,7 +113,6 @@ class ReferenceProcessor : public CHeapObj {
return
_current_soft_ref_policy
;
}
public:
// Process references with a certain reachability level.
void
process_discovered_reflist
(
DiscoveredList
refs_lists
[],
ReferencePolicy
*
policy
,
...
...
@@ -230,6 +227,7 @@ class ReferenceProcessor : public CHeapObj {
HeapWord
*
discovered_addr
);
void
verify_ok_to_handle_reflists
()
PRODUCT_RETURN
;
void
clear_discovered_references
(
DiscoveredList
&
refs_list
);
void
abandon_partial_discovered_list
(
DiscoveredList
&
refs_list
);
// Calculate the number of jni handles.
...
...
@@ -300,6 +298,13 @@ class ReferenceProcessor : public CHeapObj {
bool
discovery_is_atomic
()
const
{
return
_discovery_is_atomic
;
}
void
set_atomic_discovery
(
bool
atomic
)
{
_discovery_is_atomic
=
atomic
;
}
// whether the JDK in which we are embedded is a pre-4965777 JDK,
// and thus whether or not it uses the discovered field to chain
// the entries in the pending list.
static
bool
pending_list_uses_discovered_field
()
{
return
_pending_list_uses_discovered_field
;
}
// whether discovery is done by multiple threads same-old-timeously
bool
discovery_is_mt
()
const
{
return
_discovery_is_mt
;
}
void
set_mt_discovery
(
bool
mt
)
{
_discovery_is_mt
=
mt
;
}
...
...
@@ -314,7 +319,6 @@ class ReferenceProcessor : public CHeapObj {
// iterate over oops
void
weak_oops_do
(
OopClosure
*
f
);
// weak roots
static
void
oops_do
(
OopClosure
*
f
);
// strong root(s)
// Balance each of the discovered lists.
void
balance_all_queues
();
...
...
@@ -340,7 +344,6 @@ class ReferenceProcessor : public CHeapObj {
// debugging
void
verify_no_references_recorded
()
PRODUCT_RETURN
;
void
verify_referent
(
oop
obj
)
PRODUCT_RETURN
;
static
void
verify
();
// clear the discovered lists (unlinking each entry).
void
clear_discovered_references
()
PRODUCT_RETURN
;
...
...
@@ -524,12 +527,10 @@ protected:
EnqueueTask
(
ReferenceProcessor
&
ref_processor
,
DiscoveredList
refs_lists
[],
HeapWord
*
pending_list_addr
,
oop
sentinel_ref
,
int
n_queues
)
:
_ref_processor
(
ref_processor
),
_refs_lists
(
refs_lists
),
_pending_list_addr
(
pending_list_addr
),
_sentinel_ref
(
sentinel_ref
),
_n_queues
(
n_queues
)
{
}
...
...
@@ -540,7 +541,6 @@ protected:
ReferenceProcessor
&
_ref_processor
;
DiscoveredList
*
_refs_lists
;
HeapWord
*
_pending_list_addr
;
oop
_sentinel_ref
;
int
_n_queues
;
};
...
...
src/share/vm/memory/sharedHeap.cpp
浏览文件 @
d0e622be
...
...
@@ -146,7 +146,6 @@ void SharedHeap::process_strong_roots(bool activate_scope,
assert
(
_strong_roots_parity
!=
0
,
"must have called prologue code"
);
if
(
!
_process_strong_tasks
->
is_task_claimed
(
SH_PS_Universe_oops_do
))
{
Universe
::
oops_do
(
roots
);
ReferenceProcessor
::
oops_do
(
roots
);
// Consider perm-gen discovered lists to be strong.
perm_gen
()
->
ref_processor
()
->
weak_oops_do
(
roots
);
}
...
...
src/share/vm/oops/instanceRefKlass.cpp
浏览文件 @
d0e622be
...
...
@@ -56,9 +56,8 @@ static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
referent
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
if
(
!
referent
->
is_gc_marked
()
&&
MarkSweep
::
ref_processor
()
->
discover_reference
(
obj
,
ref
->
reference_type
()))
{
// reference already enqueued, referent will be traversed later
MarkSweep
::
ref_processor
()
->
discover_reference
(
obj
,
ref
->
reference_type
()))
{
// reference was discovered, referent will be traversed later
ref
->
instanceKlass
::
oop_follow_contents
(
obj
);
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
...
...
@@ -76,8 +75,34 @@ static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
MarkSweep
::
mark_and_push
(
referent_addr
);
}
}
// treat next as normal oop. next is a link in the pending list.
T
*
next_addr
=
(
T
*
)
java_lang_ref_Reference
::
next_addr
(
obj
);
if
(
ReferenceProcessor
::
pending_list_uses_discovered_field
())
{
// Treat discovered as normal oop, if ref is not "active",
// i.e. if next is non-NULL.
T
next_oop
=
oopDesc
::
load_heap_oop
(
next_addr
);
if
(
!
oopDesc
::
is_null
(
next_oop
))
{
// i.e. ref is not "active"
T
*
discovered_addr
=
(
T
*
)
java_lang_ref_Reference
::
discovered_addr
(
obj
);
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
" Process discovered as normal "
INTPTR_FORMAT
,
discovered_addr
);
}
)
MarkSweep
::
mark_and_push
(
discovered_addr
);
}
}
else
{
#ifdef ASSERT
// In the case of older JDKs which do not use the discovered
// field for the pending list, an inactive ref (next != NULL)
// must always have a NULL discovered field.
oop
next
=
oopDesc
::
load_decode_heap_oop
(
next_addr
);
oop
discovered
=
java_lang_ref_Reference
::
discovered
(
obj
);
assert
(
oopDesc
::
is_null
(
next
)
||
oopDesc
::
is_null
(
discovered
),
err_msg
(
"Found an inactive reference "
PTR_FORMAT
" with a non-NULL discovered field"
,
obj
));
#endif
}
// treat next as normal oop. next is a link in the reference queue.
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
" Process next as normal "
INTPTR_FORMAT
,
next_addr
);
...
...
@@ -130,13 +155,33 @@ void specialized_oop_follow_contents(instanceRefKlass* ref,
PSParallelCompact
::
mark_and_push
(
cm
,
referent_addr
);
}
}
// treat next as normal oop. next is a link in the pending list.
T
*
next_addr
=
(
T
*
)
java_lang_ref_Reference
::
next_addr
(
obj
);
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
" Process next as normal "
INTPTR_FORMAT
,
next_addr
);
if
(
ReferenceProcessor
::
pending_list_uses_discovered_field
())
{
// Treat discovered as normal oop, if ref is not "active",
// i.e. if next is non-NULL.
T
next_oop
=
oopDesc
::
load_heap_oop
(
next_addr
);
if
(
!
oopDesc
::
is_null
(
next_oop
))
{
// i.e. ref is not "active"
T
*
discovered_addr
=
(
T
*
)
java_lang_ref_Reference
::
discovered_addr
(
obj
);
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
" Process discovered as normal "
INTPTR_FORMAT
,
discovered_addr
);
}
)
PSParallelCompact
::
mark_and_push
(
cm
,
discovered_addr
);
}
)
}
else
{
#ifdef ASSERT
// In the case of older JDKs which do not use the discovered
// field for the pending list, an inactive ref (next != NULL)
// must always have a NULL discovered field.
T
next
=
oopDesc
::
load_heap_oop
(
next_addr
);
oop
discovered
=
java_lang_ref_Reference
::
discovered
(
obj
);
assert
(
oopDesc
::
is_null
(
next
)
||
oopDesc
::
is_null
(
discovered
),
err_msg
(
"Found an inactive reference "
PTR_FORMAT
" with a non-NULL discovered field"
,
obj
));
#endif
}
PSParallelCompact
::
mark_and_push
(
cm
,
next_addr
);
ref
->
instanceKlass
::
oop_follow_contents
(
cm
,
obj
);
}
...
...
@@ -197,27 +242,53 @@ int instanceRefKlass::oop_adjust_pointers(oop obj) {
}
#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \
T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \
if (closure->apply_to_weak_ref_discovered_field()) { \
T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \
closure->do_oop##nv_suffix(disc_addr); \
} \
\
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) {
\
ReferenceProcessor* rp = closure->_ref_processor;
\
ReferenceProcessor* rp = closure->_ref_processor;
\
if (!oopDesc::is_null(heap_oop)) {
\
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
if (!referent->is_gc_marked() && (rp != NULL) && \
rp->discover_reference(obj, reference_type())) { \
return size; \
} else
{
\
} else
if (contains(referent_addr)) {
\
/* treat referent as normal oop */
\
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
closure->do_oop##nv_suffix(referent_addr); \
} \
} \
/* treat next as normal oop */
\
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
if (ReferenceProcessor::pending_list_uses_discovered_field()) { \
T next_oop = oopDesc::load_heap_oop(next_addr); \
/* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */
\
if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { \
/* i.e. ref is not "active" */
\
debug_only( \
if(TraceReferenceGC && PrintGCDetails) { \
gclog_or_tty->print_cr(" Process discovered as normal " \
INTPTR_FORMAT, disc_addr); \
} \
) \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
closure->do_oop##nv_suffix(disc_addr); \
} \
} else { \
/* In the case of older JDKs which do not use the discovered field for */
\
/* the pending list, an inactive ref (next != NULL) must always have a */
\
/* NULL discovered field. */
\
debug_only( \
T next_oop = oopDesc::load_heap_oop(next_addr); \
T disc_oop = oopDesc::load_heap_oop(disc_addr); \
assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), \
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \
"discovered field", obj)); \
) \
} \
/* treat next as normal oop */
\
if (contains(next_addr)) { \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
closure->do_oop##nv_suffix(next_addr); \
...
...
@@ -306,8 +377,37 @@ void specialized_oop_push_contents(instanceRefKlass *ref,
pm
->
claim_or_forward_depth
(
referent_addr
);
}
}
// treat next as normal oop
// Treat discovered as normal oop, if ref is not "active",
// i.e. if next is non-NULL.
T
*
next_addr
=
(
T
*
)
java_lang_ref_Reference
::
next_addr
(
obj
);
if
(
ReferenceProcessor
::
pending_list_uses_discovered_field
())
{
T
next_oop
=
oopDesc
::
load_heap_oop
(
next_addr
);
if
(
!
oopDesc
::
is_null
(
next_oop
))
{
// i.e. ref is not "active"
T
*
discovered_addr
=
(
T
*
)
java_lang_ref_Reference
::
discovered_addr
(
obj
);
debug_only
(
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
" Process discovered as normal "
INTPTR_FORMAT
,
discovered_addr
);
}
)
if
(
PSScavenge
::
should_scavenge
(
discovered_addr
))
{
pm
->
claim_or_forward_depth
(
discovered_addr
);
}
}
}
else
{
#ifdef ASSERT
// In the case of older JDKs which do not use the discovered
// field for the pending list, an inactive ref (next != NULL)
// must always have a NULL discovered field.
oop
next
=
oopDesc
::
load_decode_heap_oop
(
next_addr
);
oop
discovered
=
java_lang_ref_Reference
::
discovered
(
obj
);
assert
(
oopDesc
::
is_null
(
next
)
||
oopDesc
::
is_null
(
discovered
),
err_msg
(
"Found an inactive reference "
PTR_FORMAT
" with a non-NULL discovered field"
,
obj
));
#endif
}
// Treat next as normal oop; next is a link in the reference queue.
if
(
PSScavenge
::
should_scavenge
(
next_addr
))
{
pm
->
claim_or_forward_depth
(
next_addr
);
}
...
...
src/share/vm/prims/jvm.h
浏览文件 @
d0e622be
...
...
@@ -1579,7 +1579,8 @@ typedef struct {
*/
unsigned
int
thread_park_blocker
:
1
;
unsigned
int
post_vm_init_hook_enabled
:
1
;
unsigned
int
:
30
;
unsigned
int
pending_list_uses_discovered_field
:
1
;
unsigned
int
:
29
;
unsigned
int
:
32
;
unsigned
int
:
32
;
}
jdk_version_info
;
...
...
src/share/vm/runtime/java.cpp
浏览文件 @
d0e622be
...
...
@@ -672,7 +672,8 @@ void JDK_Version::initialize() {
_current
=
JDK_Version
(
major
,
minor
,
micro
,
info
.
update_version
,
info
.
special_update_version
,
build
,
info
.
thread_park_blocker
==
1
,
info
.
post_vm_init_hook_enabled
==
1
);
info
.
post_vm_init_hook_enabled
==
1
,
info
.
pending_list_uses_discovered_field
==
1
);
}
}
...
...
src/share/vm/runtime/java.hpp
浏览文件 @
d0e622be
...
...
@@ -92,6 +92,7 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
bool
_partially_initialized
;
bool
_thread_park_blocker
;
bool
_pending_list_uses_discovered_field
;
bool
_post_vm_init_hook_enabled
;
bool
is_valid
()
const
{
...
...
@@ -114,15 +115,18 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
JDK_Version
()
:
_major
(
0
),
_minor
(
0
),
_micro
(
0
),
_update
(
0
),
_special
(
0
),
_build
(
0
),
_partially_initialized
(
false
),
_thread_park_blocker
(
false
),
_post_vm_init_hook_enabled
(
false
)
{}
_thread_park_blocker
(
false
),
_post_vm_init_hook_enabled
(
false
),
_pending_list_uses_discovered_field
(
false
)
{}
JDK_Version
(
uint8_t
major
,
uint8_t
minor
=
0
,
uint8_t
micro
=
0
,
uint8_t
update
=
0
,
uint8_t
special
=
0
,
uint8_t
build
=
0
,
bool
thread_park_blocker
=
false
,
bool
post_vm_init_hook_enabled
=
false
)
:
bool
thread_park_blocker
=
false
,
bool
post_vm_init_hook_enabled
=
false
,
bool
pending_list_uses_discovered_field
=
false
)
:
_major
(
major
),
_minor
(
minor
),
_micro
(
micro
),
_update
(
update
),
_special
(
special
),
_build
(
build
),
_partially_initialized
(
false
),
_thread_park_blocker
(
thread_park_blocker
),
_post_vm_init_hook_enabled
(
post_vm_init_hook_enabled
)
{}
_post_vm_init_hook_enabled
(
post_vm_init_hook_enabled
),
_pending_list_uses_discovered_field
(
pending_list_uses_discovered_field
)
{}
// Returns the current running JDK version
static
JDK_Version
current
()
{
return
_current
;
}
...
...
@@ -149,6 +153,10 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
bool
post_vm_init_hook_enabled
()
const
{
return
_post_vm_init_hook_enabled
;
}
// For compatibility wrt pre-4965777 JDK's
bool
pending_list_uses_discovered_field
()
const
{
return
_pending_list_uses_discovered_field
;
}
// Performs a full ordering comparison using all fields (update, build, etc.)
int
compare
(
const
JDK_Version
&
other
)
const
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录