Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
e44f2e1d
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e44f2e1d
编写于
1月 29, 2015
作者:
R
redestad
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
8069273: Decrease Hot Card Cache Lock contention
Reviewed-by: tschatzl, mgerdin
上级
a8639db7
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
77 addition
and
69 deletion
+77
-69
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
+44
-51
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
+31
-13
src/share/vm/runtime/mutexLocker.cpp
src/share/vm/runtime/mutexLocker.cpp
+1
-3
src/share/vm/runtime/mutexLocker.hpp
src/share/vm/runtime/mutexLocker.hpp
+1
-2
未找到文件。
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
浏览文件 @
e44f2e1d
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013,
2015,
Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -36,11 +36,10 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
if
(
default_use_cache
())
{
_use_cache
=
true
;
_hot_cache_size
=
(
1
<<
G1ConcRSLogCacheSize
)
;
_hot_cache_size
=
(
size_t
)
1
<<
G1ConcRSLogCacheSize
;
_hot_cache
=
NEW_C_HEAP_ARRAY
(
jbyte
*
,
_hot_cache_size
,
mtGC
);
_n_hot
=
0
;
_hot_cache_idx
=
0
;
reset_hot_cache_internal
();
// For refining the cards in the hot cache in parallel
_hot_cache_par_chunk_size
=
(
int
)(
ParallelGCThreads
>
0
?
ClaimChunkSize
:
_hot_cache_size
);
...
...
@@ -64,26 +63,21 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// return it for immediate refining.
return
card_ptr
;
}
// Otherwise, the card is hot.
jbyte
*
res
=
NULL
;
MutexLockerEx
x
(
HotCardCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
_n_hot
==
_hot_cache_size
)
{
res
=
_hot_cache
[
_hot_cache_idx
];
_n_hot
--
;
}
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
_hot_cache
[
_hot_cache_idx
]
=
card_ptr
;
_hot_cache_idx
++
;
if
(
_hot_cache_idx
==
_hot_cache_size
)
{
// Wrap around
_hot_cache_idx
=
0
;
}
_n_hot
++
;
return
res
;
size_t
index
=
Atomic
::
add_ptr
((
intptr_t
)
1
,
(
volatile
intptr_t
*
)
&
_hot_cache_idx
)
-
1
;
size_t
masked_index
=
index
&
(
_hot_cache_size
-
1
);
jbyte
*
current_ptr
=
_hot_cache
[
masked_index
];
// Try to store the new card pointer into the cache. Compare-and-swap to guard
// against the unlikely event of a race resulting in another card pointer to
// have already been written to the cache. In this case we will return
// card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if
// this ever happens.
jbyte
*
previous_ptr
=
(
jbyte
*
)
Atomic
::
cmpxchg_ptr
(
card_ptr
,
&
_hot_cache
[
masked_index
],
current_ptr
);
return
(
previous_ptr
==
current_ptr
)
?
previous_ptr
:
card_ptr
;
}
void
G1HotCardCache
::
drain
(
uint
worker_i
,
...
...
@@ -96,38 +90,37 @@ void G1HotCardCache::drain(uint worker_i,
assert
(
_hot_cache
!=
NULL
,
"Logic"
);
assert
(
!
use_cache
(),
"cache should be disabled"
);
int
start_idx
;
while
((
start_idx
=
_hot_cache_par_claimed_idx
)
<
_n_hot
)
{
// read once
int
end_idx
=
start_idx
+
_hot_cache_par_chunk_size
;
if
(
start_idx
==
Atomic
::
cmpxchg
(
end_idx
,
&
_hot_cache_par_claimed_idx
,
start_idx
))
{
// The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx
=
MIN2
(
end_idx
,
_n_hot
);
for
(
int
i
=
start_idx
;
i
<
end_idx
;
i
++
)
{
jbyte
*
card_ptr
=
_hot_cache
[
i
];
if
(
card_ptr
!=
NULL
)
{
if
(
g1rs
->
refine_card
(
card_ptr
,
worker_i
,
true
))
{
// The part of the heap spanned by the card contains references
// that point into the current collection set.
// We need to record the card pointer in the DirtyCardQueueSet
// that we use for such cards.
//
// The only time we care about recording cards that contain
// references that point into the collection set is during
// RSet updating while within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Should be at a safepoint"
);
assert
(
worker_i
<
(
ParallelGCThreads
==
0
?
1
:
ParallelGCThreads
),
err_msg
(
"incorrect worker id: "
UINT32_FORMAT
,
worker_i
));
into_cset_dcq
->
enqueue
(
card_ptr
);
}
while
(
_hot_cache_par_claimed_idx
<
_hot_cache_size
)
{
size_t
end_idx
=
Atomic
::
add_ptr
((
intptr_t
)
_hot_cache_par_chunk_size
,
(
volatile
intptr_t
*
)
&
_hot_cache_par_claimed_idx
);
size_t
start_idx
=
end_idx
-
_hot_cache_par_chunk_size
;
// The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx
=
MIN2
(
end_idx
,
_hot_cache_size
);
for
(
size_t
i
=
start_idx
;
i
<
end_idx
;
i
++
)
{
jbyte
*
card_ptr
=
_hot_cache
[
i
];
if
(
card_ptr
!=
NULL
)
{
if
(
g1rs
->
refine_card
(
card_ptr
,
worker_i
,
true
))
{
// The part of the heap spanned by the card contains references
// that point into the current collection set.
// We need to record the card pointer in the DirtyCardQueueSet
// that we use for such cards.
//
// The only time we care about recording cards that contain
// references that point into the collection set is during
// RSet updating while within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Should be at a safepoint"
);
assert
(
worker_i
<
ParallelGCThreads
,
err_msg
(
"incorrect worker id: %u"
,
worker_i
));
into_cset_dcq
->
enqueue
(
card_ptr
);
}
}
else
{
break
;
}
}
}
// The existing entries in the hot card cache, which were just refined
// above, are discarded prior to re-enabling the cache near the end of the GC.
}
...
...
src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
浏览文件 @
e44f2e1d
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013,
2015,
Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -54,21 +54,30 @@ class HeapRegion;
// code, increasing throughput.
class
G1HotCardCache
:
public
CHeapObj
<
mtGC
>
{
G1CollectedHeap
*
_g1h
;
G1CollectedHeap
*
_g1h
;
bool
_use_cache
;
G1CardCounts
_card_counts
;
// The card cache table
jbyte
**
_hot_cache
;
jbyte
**
_hot_cache
;
size_t
_hot_cache_size
;
int
_hot_cache_size
;
int
_n_hot
;
int
_hot_cache_idx
;
int
_hot_cache_par_chunk_size
;
int
_hot_cache_par_chunk_size
;
volatile
int
_hot_cache_par_claimed_idx
;
// Avoids false sharing when concurrently updating _hot_cache_idx or
// _hot_cache_par_claimed_idx. These are never updated at the same time
// thus it's not necessary to separate them as well
char
_pad_before
[
DEFAULT_CACHE_LINE_SIZE
];
bool
_use_cache
;
volatile
size_t
_hot_cache_idx
;
G1CardCounts
_card_counts
;
volatile
size_t
_hot_cache_par_claimed_idx
;
char
_pad_after
[
DEFAULT_CACHE_LINE_SIZE
];
// The number of cached cards a thread claims when flushing the cache
static
const
int
ClaimChunkSize
=
32
;
...
...
@@ -113,16 +122,25 @@ class G1HotCardCache: public CHeapObj<mtGC> {
void
reset_hot_cache
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Should be at a safepoint"
);
assert
(
Thread
::
current
()
->
is_VM_thread
(),
"Current thread should be the VMthread"
);
_hot_cache_idx
=
0
;
_n_hot
=
0
;
if
(
default_use_cache
())
{
reset_hot_cache_internal
();
}
}
bool
hot_cache_is_empty
()
{
return
_n_hot
==
0
;
}
// Zeros the values in the card counts table for entire committed heap
void
reset_card_counts
();
// Zeros the values in the card counts table for the given region
void
reset_card_counts
(
HeapRegion
*
hr
);
private:
void
reset_hot_cache_internal
()
{
assert
(
_hot_cache
!=
NULL
,
"Logic"
);
_hot_cache_idx
=
0
;
for
(
size_t
i
=
0
;
i
<
_hot_cache_size
;
i
++
)
{
_hot_cache
[
i
]
=
NULL
;
}
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
src/share/vm/runtime/mutexLocker.cpp
浏览文件 @
e44f2e1d
/*
* Copyright (c) 1997, 201
4
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 201
5
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -119,7 +119,6 @@ Monitor* SecondaryFreeList_lock = NULL;
Mutex
*
OldSets_lock
=
NULL
;
Monitor
*
RootRegionScan_lock
=
NULL
;
Mutex
*
MMUTracker_lock
=
NULL
;
Mutex
*
HotCardCache_lock
=
NULL
;
Monitor
*
GCTaskManager_lock
=
NULL
;
...
...
@@ -200,7 +199,6 @@ void mutex_init() {
def
(
OldSets_lock
,
Mutex
,
leaf
,
true
);
def
(
RootRegionScan_lock
,
Monitor
,
leaf
,
true
);
def
(
MMUTracker_lock
,
Mutex
,
leaf
,
true
);
def
(
HotCardCache_lock
,
Mutex
,
special
,
true
);
def
(
EvacFailureStack_lock
,
Mutex
,
nonleaf
,
true
);
def
(
StringDedupQueue_lock
,
Monitor
,
leaf
,
true
);
...
...
src/share/vm/runtime/mutexLocker.hpp
浏览文件 @
e44f2e1d
/*
* Copyright (c) 1997, 201
4
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 201
5
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -137,7 +137,6 @@ extern Mutex* OldSets_lock; // protects the old region sets
extern
Monitor
*
RootRegionScan_lock
;
// used to notify that the CM threads have finished scanning the IM snapshot regions
extern
Mutex
*
MMUTracker_lock
;
// protects the MMU
// tracker data structures
extern
Mutex
*
HotCardCache_lock
;
// protects the hot card cache
extern
Mutex
*
Management_lock
;
// a lock used to serialize JVM management
extern
Monitor
*
Service_lock
;
// a lock used for service thread operation
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录