Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
081048c3
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
081048c3
编写于
2月 04, 2011
作者:
T
trims
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
f8d9b716
53f53e89
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
138 addition
and
79 deletion
+138
-79
src/share/vm/compiler/compileBroker.cpp
src/share/vm/compiler/compileBroker.cpp
+12
-2
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+2
-2
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+8
-3
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+100
-57
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+3
-4
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+1
-1
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+2
-2
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+6
-2
src/share/vm/gc_implementation/shared/allocationStats.hpp
src/share/vm/gc_implementation/shared/allocationStats.hpp
+2
-4
src/share/vm/gc_implementation/shared/gcUtil.cpp
src/share/vm/gc_implementation/shared/gcUtil.cpp
+2
-2
未找到文件。
src/share/vm/compiler/compileBroker.cpp
浏览文件 @
081048c3
/*
* Copyright (c) 1999, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -1210,7 +1210,17 @@ uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
// Should the current thread be blocked until this compilation request
// has been fulfilled?
bool
CompileBroker
::
is_compile_blocking
(
methodHandle
method
,
int
osr_bci
)
{
return
!
BackgroundCompilation
;
if
(
!
BackgroundCompilation
)
{
Symbol
*
class_name
=
method
->
method_holder
()
->
klass_part
()
->
name
();
if
(
class_name
->
starts_with
(
"java/lang/ref/Reference"
,
23
))
{
// The reference handler thread can dead lock with the GC if compilation is blocking,
// so we avoid blocking compiles for anything in the java.lang.ref.Reference class,
// including inner classes such as ReferenceHandler.
return
false
;
}
return
true
;
}
return
false
;
}
...
...
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
浏览文件 @
081048c3
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -99,7 +99,7 @@ void ConcurrentG1Refine::init() {
if
(
G1ConcRSLogCacheSize
>
0
)
{
_g1h
=
G1CollectedHeap
::
heap
();
_max_n_card_counts
=
(
unsigned
)
(
_g1h
->
g1_reserved_obj_bytes
()
>>
CardTableModRefBS
::
card_shift
);
(
unsigned
)
(
_g1h
->
max_capacity
()
>>
CardTableModRefBS
::
card_shift
);
size_t
max_card_num
=
((
size_t
)
1
<<
(
sizeof
(
unsigned
)
*
BitsPerByte
-
1
))
-
1
;
guarantee
(
_max_n_card_counts
<
max_card_num
,
"card_num representation"
);
...
...
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
浏览文件 @
081048c3
...
...
@@ -251,6 +251,14 @@ void ConcurrentMarkThread::run() {
// Now do the remainder of the cleanup operation.
_cm
->
completeCleanup
();
// Notify anyone who's waiting that there are no more free
// regions coming. We have to do this before we join the STS,
// otherwise we might deadlock: a GC worker could be blocked
// waiting for the notification whereas this thread will be
// blocked for the pause to finish while it's trying to join
// the STS, which is conditional on the GC workers finishing.
g1h
->
reset_free_regions_coming
();
_sts
.
join
();
g1_policy
->
record_concurrent_mark_cleanup_completed
();
_sts
.
leave
();
...
...
@@ -262,9 +270,6 @@ void ConcurrentMarkThread::run() {
gclog_or_tty
->
print_cr
(
"[GC concurrent-cleanup-end, %1.7lf]"
,
cleanup_end_sec
-
cleanup_start_sec
);
}
// We're done: no more free regions coming.
g1h
->
reset_free_regions_coming
();
}
guarantee
(
cm
()
->
cleanup_list_is_empty
(),
"at this point there should be no regions on the cleanup list"
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
081048c3
...
...
@@ -546,8 +546,11 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
res
=
new_region_try_secondary_free_list
(
word_size
);
}
if
(
res
==
NULL
&&
do_expand
)
{
expand
(
word_size
*
HeapWordSize
);
res
=
_free_list
.
remove_head_or_null
();
if
(
expand
(
word_size
*
HeapWordSize
))
{
// The expansion succeeded and so we should have at least one
// region on the free list.
res
=
_free_list
.
remove_head
();
}
}
if
(
res
!=
NULL
)
{
if
(
G1PrintHeapRegions
)
{
...
...
@@ -631,9 +634,22 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
if
(
first
==
-
1
)
{
// The only thing we can do now is attempt expansion.
if
(
fs
+
x_size
>=
num_regions
)
{
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
);
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
assert
(
first
!=
-
1
,
"this should have worked"
);
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert
(
num_regions
>
fs
,
"earlier allocation should have succeeded"
);
if
(
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
))
{
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
// If the expansion was successful then the allocation
// should have been successful.
assert
(
first
!=
-
1
,
"this should have worked"
);
}
}
}
...
...
@@ -1647,16 +1663,17 @@ resize_if_necessary_after_full_collection(size_t word_size) {
if
(
capacity_after_gc
<
minimum_desired_capacity
)
{
// Don't expand unless it's significant
size_t
expand_bytes
=
minimum_desired_capacity
-
capacity_after_gc
;
expand
(
expand_bytes
);
if
(
PrintGC
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" "
" expanding:"
" max_heap_size: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
,
(
double
)
max_heap_size
/
(
double
)
K
,
(
double
)
minimum_desired_capacity
/
(
double
)
K
,
(
double
)
expand_bytes
/
(
double
)
K
);
if
(
expand
(
expand_bytes
))
{
if
(
PrintGC
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" "
" expanding:"
" max_heap_size: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
,
(
double
)
max_heap_size
/
(
double
)
K
,
(
double
)
minimum_desired_capacity
/
(
double
)
K
,
(
double
)
expand_bytes
/
(
double
)
K
);
}
}
// No expansion, now see if we want to shrink
...
...
@@ -1757,66 +1774,84 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
verify_region_sets_optional
();
size_t
expand_bytes
=
word_size
*
HeapWordSize
;
if
(
expand_bytes
<
MinHeapDeltaBytes
)
{
expand_bytes
=
MinHeapDeltaBytes
;
size_t
expand_bytes
=
MAX2
(
word_size
*
HeapWordSize
,
MinHeapDeltaBytes
);
if
(
expand
(
expand_bytes
))
{
verify_region_sets_optional
();
return
attempt_allocation_at_safepoint
(
word_size
,
false
/* expect_null_cur_alloc_region */
);
}
expand
(
expand_bytes
);
verify_region_sets_optional
();
return
attempt_allocation_at_safepoint
(
word_size
,
false
/* expect_null_cur_alloc_region */
);
return
NULL
;
}
// FIXME: both this and shrink could probably be more efficient by
// doing one "VirtualSpace::expand_by" call rather than several.
void
G1CollectedHeap
::
expand
(
size_t
expand_bytes
)
{
bool
G1CollectedHeap
::
expand
(
size_t
expand_bytes
)
{
size_t
old_mem_size
=
_g1_storage
.
committed_size
();
// We expand by a minimum of 1K.
expand_bytes
=
MAX2
(
expand_bytes
,
(
size_t
)
K
);
size_t
aligned_expand_bytes
=
ReservedSpace
::
page_align_size_up
(
expand_bytes
);
size_t
aligned_expand_bytes
=
ReservedSpace
::
page_align_size_up
(
expand_bytes
);
aligned_expand_bytes
=
align_size_up
(
aligned_expand_bytes
,
HeapRegion
::
GrainBytes
);
expand_bytes
=
aligned_expand_bytes
;
while
(
expand_bytes
>
0
)
{
HeapWord
*
base
=
(
HeapWord
*
)
_g1_storage
.
high
();
// Commit more storage.
bool
successful
=
_g1_storage
.
expand_by
(
HeapRegion
::
GrainBytes
);
if
(
!
successful
)
{
expand_bytes
=
0
;
}
else
{
expand_bytes
-=
HeapRegion
::
GrainBytes
;
// Expand the committed region.
HeapWord
*
high
=
(
HeapWord
*
)
_g1_storage
.
high
();
_g1_committed
.
set_end
(
high
);
if
(
Verbose
&&
PrintGC
)
{
gclog_or_tty
->
print
(
"Expanding garbage-first heap from %ldK by %ldK"
,
old_mem_size
/
K
,
aligned_expand_bytes
/
K
);
}
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
bool
successful
=
_g1_storage
.
expand_by
(
aligned_expand_bytes
);
if
(
successful
)
{
HeapWord
*
new_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
// Expand the committed region.
_g1_committed
.
set_end
(
new_end
);
// Tell the cardtable about the expansion.
Universe
::
heap
()
->
barrier_set
()
->
resize_covered_region
(
_g1_committed
);
// And the offset table as well.
_bot_shared
->
resize
(
_g1_committed
.
word_size
());
expand_bytes
=
aligned_expand_bytes
;
HeapWord
*
base
=
old_end
;
// Create the heap regions for [old_end, new_end)
while
(
expand_bytes
>
0
)
{
HeapWord
*
high
=
base
+
HeapRegion
::
GrainWords
;
// Create a new HeapRegion.
MemRegion
mr
(
base
,
high
);
bool
is_zeroed
=
!
_g1_max_committed
.
contains
(
base
);
HeapRegion
*
hr
=
new
HeapRegion
(
_bot_shared
,
mr
,
is_zeroed
);
// Now update max_committed if necessary.
_g1_max_committed
.
set_end
(
MAX2
(
_g1_max_committed
.
end
(),
high
));
// Add it to the HeapRegionSeq.
_hrs
->
insert
(
hr
);
_free_list
.
add_as_tail
(
hr
);
// And we used up an expansion region to create it.
_expansion_regions
--
;
// Tell the cardtable about it.
Universe
::
heap
()
->
barrier_set
()
->
resize_covered_region
(
_g1_committed
);
// And the offset table as well.
_bot_shared
->
resize
(
_g1_committed
.
word_size
());
expand_bytes
-=
HeapRegion
::
GrainBytes
;
base
+=
HeapRegion
::
GrainWords
;
}
assert
(
base
==
new_end
,
"sanity"
);
// Now update max_committed if necessary.
_g1_max_committed
.
set_end
(
MAX2
(
_g1_max_committed
.
end
(),
new_end
));
}
else
{
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if
(
G1ExitOnExpansionFailure
&&
_g1_storage
.
uncommitted_size
()
>=
aligned_expand_bytes
)
{
// We had head room...
vm_exit_out_of_memory
(
aligned_expand_bytes
,
"G1 heap expansion"
);
}
}
if
(
Verbose
&&
PrintGC
)
{
size_t
new_mem_size
=
_g1_storage
.
committed_size
();
gclog_or_tty
->
print_cr
(
"
Expanding garbage-first heap from %ldK by %ldK
to %ldK"
,
old_mem_size
/
K
,
aligned_expand_bytes
/
K
,
gclog_or_tty
->
print_cr
(
"
...%s, expanded
to %ldK"
,
(
successful
?
"Successful"
:
"Failed"
)
,
new_mem_size
/
K
);
}
return
successful
;
}
void
G1CollectedHeap
::
shrink_helper
(
size_t
shrink_bytes
)
...
...
@@ -2088,7 +2123,10 @@ jint G1CollectedHeap::initialize() {
HeapRegionRemSet
::
init_heap
(
max_regions
());
// Now expand into the initial heap size.
expand
(
init_byte_size
);
if
(
!
expand
(
init_byte_size
))
{
vm_exit_during_initialization
(
"Failed to allocate initial heap."
);
return
JNI_ENOMEM
;
}
// Perform any initialization actions delegated to the policy.
g1_policy
()
->
init
();
...
...
@@ -2744,7 +2782,7 @@ size_t G1CollectedHeap::large_typearray_limit() {
}
size_t
G1CollectedHeap
::
max_capacity
()
const
{
return
g1_reserved_obj_bytes
();
return
_g1_reserved
.
byte_size
();
}
jlong
G1CollectedHeap
::
millis_since_last_gc
()
{
...
...
@@ -3538,7 +3576,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
size_t
expand_bytes
=
g1_policy
()
->
expansion_amount
();
if
(
expand_bytes
>
0
)
{
size_t
bytes_before
=
capacity
();
expand
(
expand_bytes
);
if
(
!
expand
(
expand_bytes
))
{
// We failed to expand the heap so let's verify that
// committed/uncommitted amount match the backing store
assert
(
capacity
()
==
_g1_storage
.
committed_size
(),
"committed size mismatch"
);
assert
(
max_capacity
()
==
_g1_storage
.
reserved_size
(),
"reserved size mismatch"
);
}
}
}
...
...
@@ -3762,7 +3805,7 @@ void G1CollectedHeap::get_gc_alloc_regions() {
if
(
alloc_region
==
NULL
)
{
// we will get a new GC alloc region
alloc_region
=
new_gc_alloc_region
(
ap
,
0
);
alloc_region
=
new_gc_alloc_region
(
ap
,
HeapRegion
::
GrainWords
);
}
else
{
// the region was retained from the last collection
++
_gc_alloc_region_counts
[
ap
];
...
...
@@ -5311,7 +5354,7 @@ size_t G1CollectedHeap::n_regions() {
size_t
G1CollectedHeap
::
max_regions
()
{
return
(
size_t
)
align_size_up
(
g1_reserved_obj_bytes
(),
HeapRegion
::
GrainBytes
)
/
(
size_t
)
align_size_up
(
max_capacity
(),
HeapRegion
::
GrainBytes
)
/
HeapRegion
::
GrainBytes
;
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
081048c3
...
...
@@ -619,8 +619,10 @@ protected:
public:
// Expand the garbage-first heap by at least the given size (in bytes!).
// Returns true if the heap was expanded by the requested amount;
// false otherwise.
// (Rounds up to a HeapRegion boundary.)
virtual
void
expand
(
size_t
expand_bytes
);
bool
expand
(
size_t
expand_bytes
);
// Do anything common to GC's.
virtual
void
gc_prologue
(
bool
full
);
...
...
@@ -981,9 +983,6 @@ public:
// Reference Processing accessor
ReferenceProcessor
*
ref_processor
()
{
return
_ref_processor
;
}
// Reserved (g1 only; super method includes perm), capacity and the used
// portion in bytes.
size_t
g1_reserved_obj_bytes
()
const
{
return
_g1_reserved
.
byte_size
();
}
virtual
size_t
capacity
()
const
;
virtual
size_t
used
()
const
;
// This should be called when we're not holding the heap lock. The
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
081048c3
...
...
@@ -2011,7 +2011,7 @@ size_t G1CollectorPolicy::expansion_amount() {
// space, whichever is smaller, bounded below by a minimum
// expansion (unless that's all that's left.)
const
size_t
min_expand_bytes
=
1
*
M
;
size_t
reserved_bytes
=
_g1
->
g1_reserved_obj_bytes
();
size_t
reserved_bytes
=
_g1
->
max_capacity
();
size_t
committed_bytes
=
_g1
->
capacity
();
size_t
uncommitted_bytes
=
reserved_bytes
-
committed_bytes
;
size_t
expand_bytes
;
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
081048c3
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -735,7 +735,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
MemRegion
dirtyRegion
(
start
,
end
);
#if CARD_REPEAT_HISTO
init_ct_freq_table
(
_g1
->
g1_reserved_obj_bytes
());
init_ct_freq_table
(
_g1
->
max_capacity
());
ct_freq_note_card
(
_ct_bs
->
index_for
(
start
));
#endif
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
081048c3
...
...
@@ -301,9 +301,13 @@
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
"Artificial delay during concurrent region freeing") \
\
develop(bool, ReduceInitialCardMarksForG1, false,
\
develop(bool, ReduceInitialCardMarksForG1, false,
\
"When ReduceInitialCardMarks is true, this flag setting " \
" controls whether G1 allows the RICM optimization")
" controls whether G1 allows the RICM optimization") \
\
develop(bool, G1ExitOnExpansionFailure, false, \
"Raise a fatal VM exit out of memory failure in the event " \
" that heap expansion fails due to running out of swap.")
G1_FLAGS
(
DECLARE_DEVELOPER_FLAG
,
DECLARE_PD_DEVELOPER_FLAG
,
DECLARE_PRODUCT_FLAG
,
DECLARE_PD_PRODUCT_FLAG
,
DECLARE_DIAGNOSTIC_FLAG
,
DECLARE_EXPERIMENTAL_FLAG
,
DECLARE_NOTPRODUCT_FLAG
,
DECLARE_MANAGEABLE_FLAG
,
DECLARE_PRODUCT_RW_FLAG
)
...
...
src/share/vm/gc_implementation/shared/allocationStats.hpp
浏览文件 @
081048c3
...
...
@@ -116,10 +116,8 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
_demand_rate_estimate
.
sample
(
rate
);
float
new_rate
=
_demand_rate_estimate
.
padded_average
();
ssize_t
old_desired
=
_desired
;
_desired
=
(
ssize_t
)(
new_rate
*
(
inter_sweep_estimate
+
CMSExtrapolateSweep
?
intra_sweep_estimate
:
0.0
));
float
delta_ise
=
(
CMSExtrapolateSweep
?
intra_sweep_estimate
:
0.0
);
_desired
=
(
ssize_t
)(
new_rate
*
(
inter_sweep_estimate
+
delta_ise
));
if
(
PrintFLSStatistics
>
1
)
{
gclog_or_tty
->
print_cr
(
"demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d"
,
demand
,
old_rate
,
rate
,
new_rate
,
old_desired
,
_desired
);
...
...
src/share/vm/gc_implementation/shared/gcUtil.cpp
浏览文件 @
081048c3
...
...
@@ -106,8 +106,8 @@ void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
}
LinearLeastSquareFit
::
LinearLeastSquareFit
(
unsigned
weight
)
:
_sum_x
(
0
),
_sum_y
(
0
),
_sum_xy
(
0
),
_mean_x
(
weight
),
_mean_y
(
weight
)
{}
_sum_x
(
0
),
_sum_
x_squared
(
0
),
_sum_
y
(
0
),
_sum_xy
(
0
),
_
intercept
(
0
),
_slope
(
0
),
_
mean_x
(
weight
),
_mean_y
(
weight
)
{}
void
LinearLeastSquareFit
::
update
(
double
x
,
double
y
)
{
_sum_x
=
_sum_x
+
x
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录