Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
7f3be981
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
7f3be981
编写于
3月 12, 2014
作者:
M
mgerdin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
Reviewed-by: brutisso, tschatzl, stefank
上级
bdd04bef
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
71 addition
and
115 deletion
+71
-115
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+52
-1
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+6
-0
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+13
-0
src/share/vm/memory/iterator.hpp
src/share/vm/memory/iterator.hpp
+0
-13
src/share/vm/memory/space.cpp
src/share/vm/memory/space.cpp
+0
-93
src/share/vm/memory/space.hpp
src/share/vm/memory/space.hpp
+0
-8
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
7f3be981
...
@@ -853,7 +853,58 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
...
@@ -853,7 +853,58 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure
*
cl
)
{
UpwardsObjectClosure
*
cl
)
{
assert_locked
(
freelistLock
());
assert_locked
(
freelistLock
());
NOT_PRODUCT
(
verify_objects_initialized
());
NOT_PRODUCT
(
verify_objects_initialized
());
Space
::
object_iterate_mem
(
mr
,
cl
);
assert
(
!
mr
.
is_empty
(),
"Should be non-empty"
);
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert
(
MemRegion
(
bottom
(),
end
()).
contains
(
mr
),
"Should be within used space"
);
HeapWord
*
prev
=
cl
->
previous
();
// max address from last time
if
(
prev
>=
mr
.
end
())
{
// nothing to do
return
;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool
last_was_obj_array
=
false
;
HeapWord
*
blk_start_addr
,
*
region_start_addr
;
if
(
prev
>
mr
.
start
())
{
region_start_addr
=
prev
;
blk_start_addr
=
prev
;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert
((
BlockOffsetArrayUseUnallocatedBlock
&&
(
!
is_in
(
prev
)))
||
(
blk_start_addr
==
block_start
(
region_start_addr
)),
"invariant"
);
}
else
{
region_start_addr
=
mr
.
start
();
blk_start_addr
=
block_start
(
region_start_addr
);
}
HeapWord
*
region_end_addr
=
mr
.
end
();
MemRegion
derived_mr
(
region_start_addr
,
region_end_addr
);
while
(
blk_start_addr
<
region_end_addr
)
{
const
size_t
size
=
block_size
(
blk_start_addr
);
if
(
block_is_obj
(
blk_start_addr
))
{
last_was_obj_array
=
cl
->
do_object_bm
(
oop
(
blk_start_addr
),
derived_mr
);
}
else
{
last_was_obj_array
=
false
;
}
blk_start_addr
+=
size
;
}
if
(
!
last_was_obj_array
)
{
assert
((
bottom
()
<=
blk_start_addr
)
&&
(
blk_start_addr
<=
end
()),
"Should be within (closed) used space"
);
assert
(
blk_start_addr
>
prev
,
"Invariant"
);
cl
->
set_previous
(
blk_start_addr
);
// min address for next time
}
}
}
// Callers of this iterator beware: The closure application should
// Callers of this iterator beware: The closure application should
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
浏览文件 @
7f3be981
...
@@ -362,6 +362,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
...
@@ -362,6 +362,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// obj_is_alive() to determine whether it is safe to iterate of
// obj_is_alive() to determine whether it is safe to iterate of
// an object.
// an object.
void
safe_object_iterate
(
ObjectClosure
*
blk
);
void
safe_object_iterate
(
ObjectClosure
*
blk
);
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
void
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
);
void
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
);
// Requires that "mr" be entirely within the space.
// Requires that "mr" be entirely within the space.
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
7f3be981
...
@@ -1499,6 +1499,19 @@ class FalseBitMapClosure: public BitMapClosure {
...
@@ -1499,6 +1499,19 @@ class FalseBitMapClosure: public BitMapClosure {
}
}
};
};
// A version of ObjectClosure with "memory" (see _previous_address below)
class
UpwardsObjectClosure
:
public
BoolObjectClosure
{
HeapWord
*
_previous_address
;
public:
UpwardsObjectClosure
()
:
_previous_address
(
NULL
)
{
}
void
set_previous
(
HeapWord
*
addr
)
{
_previous_address
=
addr
;
}
HeapWord
*
previous
()
{
return
_previous_address
;
}
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual
bool
do_object_bm
(
oop
obj
,
MemRegion
mr
)
=
0
;
};
// This closure is used during the second checkpointing phase
// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via
// union table and the card table proper. It's invoked via
...
...
src/share/vm/memory/iterator.hpp
浏览文件 @
7f3be981
...
@@ -177,19 +177,6 @@ public:
...
@@ -177,19 +177,6 @@ public:
ObjectToOopClosure
(
ExtendedOopClosure
*
cl
)
:
_cl
(
cl
)
{}
ObjectToOopClosure
(
ExtendedOopClosure
*
cl
)
:
_cl
(
cl
)
{}
};
};
// A version of ObjectClosure with "memory" (see _previous_address below)
class
UpwardsObjectClosure
:
public
BoolObjectClosure
{
HeapWord
*
_previous_address
;
public:
UpwardsObjectClosure
()
:
_previous_address
(
NULL
)
{
}
void
set_previous
(
HeapWord
*
addr
)
{
_previous_address
=
addr
;
}
HeapWord
*
previous
()
{
return
_previous_address
;
}
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual
bool
do_object_bm
(
oop
obj
,
MemRegion
mr
)
=
0
;
};
// A version of ObjectClosure that is expected to be robust
// A version of ObjectClosure that is expected to be robust
// in the face of possibly uninitialized objects.
// in the face of possibly uninitialized objects.
class
ObjectClosureCareful
:
public
ObjectClosure
{
class
ObjectClosureCareful
:
public
ObjectClosure
{
...
...
src/share/vm/memory/space.cpp
浏览文件 @
7f3be981
...
@@ -562,104 +562,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr,
...
@@ -562,104 +562,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr,
return
bottom
();
return
bottom
();
}
}
void
Space
::
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
)
{
assert
(
!
mr
.
is_empty
(),
"Should be non-empty"
);
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert
(
MemRegion
(
bottom
(),
end
()).
contains
(
mr
),
"Should be within used space"
);
HeapWord
*
prev
=
cl
->
previous
();
// max address from last time
if
(
prev
>=
mr
.
end
())
{
// nothing to do
return
;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool
last_was_obj_array
=
false
;
HeapWord
*
blk_start_addr
,
*
region_start_addr
;
if
(
prev
>
mr
.
start
())
{
region_start_addr
=
prev
;
blk_start_addr
=
prev
;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert
((
BlockOffsetArrayUseUnallocatedBlock
&&
(
!
is_in
(
prev
)))
||
(
blk_start_addr
==
block_start
(
region_start_addr
)),
"invariant"
);
}
else
{
region_start_addr
=
mr
.
start
();
blk_start_addr
=
block_start
(
region_start_addr
);
}
HeapWord
*
region_end_addr
=
mr
.
end
();
MemRegion
derived_mr
(
region_start_addr
,
region_end_addr
);
while
(
blk_start_addr
<
region_end_addr
)
{
const
size_t
size
=
block_size
(
blk_start_addr
);
if
(
block_is_obj
(
blk_start_addr
))
{
last_was_obj_array
=
cl
->
do_object_bm
(
oop
(
blk_start_addr
),
derived_mr
);
}
else
{
last_was_obj_array
=
false
;
}
blk_start_addr
+=
size
;
}
if
(
!
last_was_obj_array
)
{
assert
((
bottom
()
<=
blk_start_addr
)
&&
(
blk_start_addr
<=
end
()),
"Should be within (closed) used space"
);
assert
(
blk_start_addr
>
prev
,
"Invariant"
);
cl
->
set_previous
(
blk_start_addr
);
// min address for next time
}
}
bool
Space
::
obj_is_alive
(
const
HeapWord
*
p
)
const
{
bool
Space
::
obj_is_alive
(
const
HeapWord
*
p
)
const
{
assert
(
block_is_obj
(
p
),
"The address should point to an object"
);
assert
(
block_is_obj
(
p
),
"The address should point to an object"
);
return
true
;
return
true
;
}
}
void
ContiguousSpace
::
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
)
{
assert
(
!
mr
.
is_empty
(),
"Should be non-empty"
);
assert
(
used_region
().
contains
(
mr
),
"Should be within used space"
);
HeapWord
*
prev
=
cl
->
previous
();
// max address from last time
if
(
prev
>=
mr
.
end
())
{
// nothing to do
return
;
}
// See comment above (in more general method above) in case you
// happen to use this method.
assert
(
prev
==
NULL
||
is_in_reserved
(
prev
),
"Should be within space"
);
bool
last_was_obj_array
=
false
;
HeapWord
*
obj_start_addr
,
*
region_start_addr
;
if
(
prev
>
mr
.
start
())
{
region_start_addr
=
prev
;
obj_start_addr
=
prev
;
assert
(
obj_start_addr
==
block_start
(
region_start_addr
),
"invariant"
);
}
else
{
region_start_addr
=
mr
.
start
();
obj_start_addr
=
block_start
(
region_start_addr
);
}
HeapWord
*
region_end_addr
=
mr
.
end
();
MemRegion
derived_mr
(
region_start_addr
,
region_end_addr
);
while
(
obj_start_addr
<
region_end_addr
)
{
oop
obj
=
oop
(
obj_start_addr
);
const
size_t
size
=
obj
->
size
();
last_was_obj_array
=
cl
->
do_object_bm
(
obj
,
derived_mr
);
obj_start_addr
+=
size
;
}
if
(
!
last_was_obj_array
)
{
assert
((
bottom
()
<=
obj_start_addr
)
&&
(
obj_start_addr
<=
end
()),
"Should be within (closed) used space"
);
assert
(
obj_start_addr
>
prev
,
"Invariant"
);
cl
->
set_previous
(
obj_start_addr
);
// min address for next time
}
}
#if INCLUDE_ALL_GCS
#if INCLUDE_ALL_GCS
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
\
...
...
src/share/vm/memory/space.hpp
浏览文件 @
7f3be981
...
@@ -188,13 +188,6 @@ class Space: public CHeapObj<mtGC> {
...
@@ -188,13 +188,6 @@ class Space: public CHeapObj<mtGC> {
// objects whose internal references point to objects in the space.
// objects whose internal references point to objects in the space.
virtual
void
safe_object_iterate
(
ObjectClosure
*
blk
)
=
0
;
virtual
void
safe_object_iterate
(
ObjectClosure
*
blk
)
=
0
;
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
virtual
void
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
);
// Iterate over as many initialized objects in the space as possible,
// Iterate over as many initialized objects in the space as possible,
// calling "cl.do_object_careful" on each. Return NULL if all objects
// calling "cl.do_object_careful" on each. Return NULL if all objects
// in the space (at the start of the iteration) were iterated over.
// in the space (at the start of the iteration) were iterated over.
...
@@ -558,7 +551,6 @@ class ContiguousSpace: public CompactibleSpace {
...
@@ -558,7 +551,6 @@ class ContiguousSpace: public CompactibleSpace {
// For contiguous spaces this method will iterate safely over objects
// For contiguous spaces this method will iterate safely over objects
// in the space (i.e., between bottom and top) when at a safepoint.
// in the space (i.e., between bottom and top) when at a safepoint.
void
safe_object_iterate
(
ObjectClosure
*
blk
);
void
safe_object_iterate
(
ObjectClosure
*
blk
);
void
object_iterate_mem
(
MemRegion
mr
,
UpwardsObjectClosure
*
cl
);
// iterates on objects up to the safe limit
// iterates on objects up to the safe limit
HeapWord
*
object_iterate_careful
(
ObjectClosureCareful
*
cl
);
HeapWord
*
object_iterate_careful
(
ObjectClosureCareful
*
cl
);
HeapWord
*
concurrent_iteration_safe_limit
()
{
HeapWord
*
concurrent_iteration_safe_limit
()
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录