Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
4a141040
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4a141040
编写于
7月 14, 2010
作者:
A
apangin
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
36411c2c
c32a21bc
变更
17
隐藏空白更改
内联
并排
Showing
17 changed file
with
304 addition
and
690 deletion
+304
-690
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
+1
-1
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
...c_implementation/parallelScavenge/psCompactionManager.cpp
+12
-137
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
...c_implementation/parallelScavenge/psCompactionManager.hpp
+18
-51
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
...mentation/parallelScavenge/psCompactionManager.inline.hpp
+12
-3
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+3
-6
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
...gc_implementation/parallelScavenge/psPromotionManager.cpp
+29
-86
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
...gc_implementation/parallelScavenge/psPromotionManager.hpp
+23
-40
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
...hare/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+0
-1
src/share/vm/opto/callnode.cpp
src/share/vm/opto/callnode.cpp
+2
-2
src/share/vm/opto/compile.cpp
src/share/vm/opto/compile.cpp
+14
-28
src/share/vm/opto/compile.hpp
src/share/vm/opto/compile.hpp
+1
-0
src/share/vm/opto/escape.cpp
src/share/vm/opto/escape.cpp
+51
-26
src/share/vm/opto/escape.hpp
src/share/vm/opto/escape.hpp
+9
-4
src/share/vm/prims/jvmtiCodeBlobEvents.cpp
src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+25
-181
src/share/vm/utilities/taskqueue.cpp
src/share/vm/utilities/taskqueue.cpp
+1
-71
src/share/vm/utilities/taskqueue.hpp
src/share/vm/utilities/taskqueue.hpp
+101
-51
未找到文件。
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
浏览文件 @
4a141040
...
@@ -270,7 +270,7 @@ psParallelCompact.cpp parallelScavengeHeap.inline.hpp
...
@@ -270,7 +270,7 @@ psParallelCompact.cpp parallelScavengeHeap.inline.hpp
psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp psMarkSweep.hpp
psParallelCompact.cpp psMarkSweep.hpp
psParallelCompact.cpp psMarkSweepDecorator.hpp
psParallelCompact.cpp psMarkSweepDecorator.hpp
psParallelCompact.cpp psCompactionManager.hpp
psParallelCompact.cpp psCompactionManager.
inline.
hpp
psParallelCompact.cpp psPromotionManager.inline.hpp
psParallelCompact.cpp psPromotionManager.inline.hpp
psParallelCompact.cpp psOldGen.hpp
psParallelCompact.cpp psOldGen.hpp
psParallelCompact.cpp psParallelCompact.hpp
psParallelCompact.cpp psParallelCompact.hpp
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -32,7 +32,7 @@ ParCompactionManager::ObjArrayTaskQueueSet*
...
@@ -32,7 +32,7 @@ ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager
::
_objarray_queues
=
NULL
;
ParCompactionManager
::
_objarray_queues
=
NULL
;
ObjectStartArray
*
ParCompactionManager
::
_start_array
=
NULL
;
ObjectStartArray
*
ParCompactionManager
::
_start_array
=
NULL
;
ParMarkBitMap
*
ParCompactionManager
::
_mark_bitmap
=
NULL
;
ParMarkBitMap
*
ParCompactionManager
::
_mark_bitmap
=
NULL
;
RegionTaskQueueSet
*
ParCompactionManager
::
_region_array
=
NULL
;
RegionTaskQueueSet
*
ParCompactionManager
::
_region_array
=
NULL
;
ParCompactionManager
::
ParCompactionManager
()
:
ParCompactionManager
::
ParCompactionManager
()
:
_action
(
CopyAndUpdate
)
{
_action
(
CopyAndUpdate
)
{
...
@@ -43,25 +43,9 @@ ParCompactionManager::ParCompactionManager() :
...
@@ -43,25 +43,9 @@ ParCompactionManager::ParCompactionManager() :
_old_gen
=
heap
->
old_gen
();
_old_gen
=
heap
->
old_gen
();
_start_array
=
old_gen
()
->
start_array
();
_start_array
=
old_gen
()
->
start_array
();
marking_stack
()
->
initialize
();
marking_stack
()
->
initialize
();
_objarray_stack
.
initialize
();
// We want the overflow stack to be permanent
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
10
,
true
);
_objarray_queue
.
initialize
();
_objarray_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
ObjArrayOverflowStack
(
10
,
true
);
#ifdef USE_RegionTaskQueueWithOverflow
region_stack
()
->
initialize
();
region_stack
()
->
initialize
();
#else
region_stack
()
->
initialize
();
// We want the overflow stack to be permanent
_region_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
size_t
>
(
10
,
true
);
#endif
// Note that _revisit_klass_stack is allocated out of the
// Note that _revisit_klass_stack is allocated out of the
// C heap (as opposed to out of ResourceArena).
// C heap (as opposed to out of ResourceArena).
...
@@ -71,12 +55,9 @@ ParCompactionManager::ParCompactionManager() :
...
@@ -71,12 +55,9 @@ ParCompactionManager::ParCompactionManager() :
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
DataLayout
*>
(
size
*
2
,
true
);
_revisit_mdo_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
DataLayout
*>
(
size
*
2
,
true
);
}
}
ParCompactionManager
::~
ParCompactionManager
()
{
ParCompactionManager
::~
ParCompactionManager
()
{
delete
_overflow_stack
;
delete
_objarray_overflow_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_mdo_stack
;
delete
_revisit_mdo_stack
;
// _manager_array and _stack_array are statics
// _manager_array and _stack_array are statics
...
@@ -108,12 +89,8 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
...
@@ -108,12 +89,8 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
_manager_array
[
i
]
=
new
ParCompactionManager
();
_manager_array
[
i
]
=
new
ParCompactionManager
();
guarantee
(
_manager_array
[
i
]
!=
NULL
,
"Could not create ParCompactionManager"
);
guarantee
(
_manager_array
[
i
]
!=
NULL
,
"Could not create ParCompactionManager"
);
stack_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
marking_stack
());
stack_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
marking_stack
());
_objarray_queues
->
register_queue
(
i
,
&
_manager_array
[
i
]
->
_objarray_queue
);
_objarray_queues
->
register_queue
(
i
,
&
_manager_array
[
i
]
->
_objarray_stack
);
#ifdef USE_RegionTaskQueueWithOverflow
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
()
->
task_queue
());
#else
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
());
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
());
#endif
}
}
// The VMThread gets its own ParCompactionManager, which is not available
// The VMThread gets its own ParCompactionManager, which is not available
...
@@ -149,57 +126,6 @@ bool ParCompactionManager::should_reset_only() {
...
@@ -149,57 +126,6 @@ bool ParCompactionManager::should_reset_only() {
return
action
()
==
ParCompactionManager
::
ResetObjects
;
return
action
()
==
ParCompactionManager
::
ResetObjects
;
}
}
// For now save on a stack
void
ParCompactionManager
::
save_for_scanning
(
oop
m
)
{
stack_push
(
m
);
}
void
ParCompactionManager
::
stack_push
(
oop
obj
)
{
if
(
!
marking_stack
()
->
push
(
obj
))
{
overflow_stack
()
->
push
(
obj
);
}
}
oop
ParCompactionManager
::
retrieve_for_scanning
()
{
// Should not be used in the parallel case
ShouldNotReachHere
();
return
NULL
;
}
// Save region on a stack
void
ParCompactionManager
::
save_for_processing
(
size_t
region_index
)
{
#ifdef ASSERT
const
ParallelCompactData
&
sd
=
PSParallelCompact
::
summary_data
();
ParallelCompactData
::
RegionData
*
const
region_ptr
=
sd
.
region
(
region_index
);
assert
(
region_ptr
->
claimed
(),
"must be claimed"
);
assert
(
region_ptr
->
_pushed
++
==
0
,
"should only be pushed once"
);
#endif
region_stack_push
(
region_index
);
}
void
ParCompactionManager
::
region_stack_push
(
size_t
region_index
)
{
#ifdef USE_RegionTaskQueueWithOverflow
region_stack
()
->
save
(
region_index
);
#else
if
(
!
region_stack
()
->
push
(
region_index
))
{
region_overflow_stack
()
->
push
(
region_index
);
}
#endif
}
bool
ParCompactionManager
::
retrieve_for_processing
(
size_t
&
region_index
)
{
#ifdef USE_RegionTaskQueueWithOverflow
return
region_stack
()
->
retrieve
(
region_index
);
#else
// Should not be used in the parallel case
ShouldNotReachHere
();
return
false
;
#endif
}
ParCompactionManager
*
ParCompactionManager
*
ParCompactionManager
::
gc_thread_compaction_manager
(
int
index
)
{
ParCompactionManager
::
gc_thread_compaction_manager
(
int
index
)
{
assert
(
index
>=
0
&&
index
<
(
int
)
ParallelGCThreads
,
"index out of range"
);
assert
(
index
>=
0
&&
index
<
(
int
)
ParallelGCThreads
,
"index out of range"
);
...
@@ -218,8 +144,8 @@ void ParCompactionManager::follow_marking_stacks() {
...
@@ -218,8 +144,8 @@ void ParCompactionManager::follow_marking_stacks() {
do
{
do
{
// Drain the overflow stack first, to allow stealing from the marking stack.
// Drain the overflow stack first, to allow stealing from the marking stack.
oop
obj
;
oop
obj
;
while
(
!
overflow_stack
()
->
is_empty
(
))
{
while
(
marking_stack
()
->
pop_overflow
(
obj
))
{
o
verflow_stack
()
->
pop
()
->
follow_contents
(
this
);
o
bj
->
follow_contents
(
this
);
}
}
while
(
marking_stack
()
->
pop_local
(
obj
))
{
while
(
marking_stack
()
->
pop_local
(
obj
))
{
obj
->
follow_contents
(
this
);
obj
->
follow_contents
(
this
);
...
@@ -227,11 +153,10 @@ void ParCompactionManager::follow_marking_stacks() {
...
@@ -227,11 +153,10 @@ void ParCompactionManager::follow_marking_stacks() {
// Process ObjArrays one at a time to avoid marking stack bloat.
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask
task
;
ObjArrayTask
task
;
if
(
!
_objarray_overflow_stack
->
is_empty
())
{
if
(
_objarray_stack
.
pop_overflow
(
task
))
{
task
=
_objarray_overflow_stack
->
pop
();
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
else
if
(
_objarray_
queue
.
pop_local
(
task
))
{
}
else
if
(
_objarray_
stack
.
pop_local
(
task
))
{
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
}
...
@@ -240,68 +165,18 @@ void ParCompactionManager::follow_marking_stacks() {
...
@@ -240,68 +165,18 @@ void ParCompactionManager::follow_marking_stacks() {
assert
(
marking_stacks_empty
(),
"Sanity"
);
assert
(
marking_stacks_empty
(),
"Sanity"
);
}
}
void
ParCompactionManager
::
drain_region_overflow_stack
()
{
size_t
region_index
=
(
size_t
)
-
1
;
while
(
region_stack
()
->
retrieve_from_overflow
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
void
ParCompactionManager
::
drain_region_stacks
()
{
void
ParCompactionManager
::
drain_region_stacks
()
{
#ifdef ASSERT
ParallelScavengeHeap
*
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
assert
(
heap
->
kind
()
==
CollectedHeap
::
ParallelScavengeHeap
,
"Sanity"
);
MutableSpace
*
to_space
=
heap
->
young_gen
()
->
to_space
();
MutableSpace
*
old_space
=
heap
->
old_gen
()
->
object_space
();
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
do
{
do
{
// Drain overflow stack first so other threads can steal.
#ifdef USE_RegionTaskQueueWithOverflow
size_t
region_index
;
// Drain overflow stack first, so other threads can steal from
while
(
region_stack
()
->
pop_overflow
(
region_index
))
{
// claimed stack while we work.
size_t
region_index
=
(
size_t
)
-
1
;
while
(
region_stack
()
->
retrieve_from_overflow
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
while
(
region_stack
()
->
retrieve_from_stealable_queue
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
while
(
!
region_stack
()
->
is_empty
());
#else
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while
(
!
region_overflow_stack
()
->
is_empty
())
{
size_t
region_index
=
region_overflow_stack
()
->
pop
();
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
size_t
region_index
=
-
1
;
// obj is a reference!!!
while
(
region_stack
()
->
pop_local
(
region_index
))
{
while
(
region_stack
()
->
pop_local
(
region_index
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
}
while
((
region_stack
()
->
size
()
!=
0
)
||
}
while
(
!
region_stack
()
->
is_empty
());
(
region_overflow_stack
()
->
length
()
!=
0
));
#endif
#ifdef USE_RegionTaskQueueWithOverflow
assert
(
region_stack
()
->
is_empty
(),
"Sanity"
);
#else
assert
(
region_stack
()
->
size
()
==
0
,
"Sanity"
);
assert
(
region_overflow_stack
()
->
length
()
==
0
,
"Sanity"
);
#endif
#else
oop
obj
;
while
(
obj
=
retrieve_for_scanning
())
{
obj
->
follow_contents
(
this
);
}
#endif
}
}
#ifdef ASSERT
#ifdef ASSERT
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -59,10 +59,10 @@ class ParCompactionManager : public CHeapObj {
...
@@ -59,10 +59,10 @@ class ParCompactionManager : public CHeapObj {
private:
private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define
OBJARRAY_
QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
#define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef
GenericTaskQueue
<
ObjArrayTask
,
OBJARRAY_
QUEUE_SIZE
>
ObjArrayTaskQueue
;
typedef
OverflowTaskQueue
<
ObjArrayTask
,
QUEUE_SIZE
>
ObjArrayTaskQueue
;
typedef
GenericTaskQueueSet
<
ObjArrayTaskQueue
>
ObjArrayTaskQueueSet
;
typedef
GenericTaskQueueSet
<
ObjArrayTaskQueue
>
ObjArrayTaskQueueSet
;
#undef
OBJARRAY_
QUEUE_SIZE
#undef QUEUE_SIZE
static
ParCompactionManager
**
_manager_array
;
static
ParCompactionManager
**
_manager_array
;
static
OopTaskQueueSet
*
_stack_array
;
static
OopTaskQueueSet
*
_stack_array
;
...
@@ -72,23 +72,13 @@ class ParCompactionManager : public CHeapObj {
...
@@ -72,23 +72,13 @@ class ParCompactionManager : public CHeapObj {
static
PSOldGen
*
_old_gen
;
static
PSOldGen
*
_old_gen
;
private:
private:
OopTaskQueue
_marking_stack
;
OverflowTaskQueue
<
oop
>
_marking_stack
;
GrowableArray
<
oop
>*
_overflow_stack
;
ObjArrayTaskQueue
_objarray_stack
;
typedef
GrowableArray
<
ObjArrayTask
>
ObjArrayOverflowStack
;
ObjArrayTaskQueue
_objarray_queue
;
ObjArrayOverflowStack
*
_objarray_overflow_stack
;
// Is there a way to reuse the _marking_stack for the
// Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different
// saving empty regions? For now just create a different
// type of TaskQueue.
// type of TaskQueue.
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow
_region_stack
;
#else
RegionTaskQueue
_region_stack
;
RegionTaskQueue
_region_stack
;
GrowableArray
<
size_t
>*
_region_overflow_stack
;
#endif
#if 1 // does this happen enough to need a per thread stack?
#if 1 // does this happen enough to need a per thread stack?
GrowableArray
<
Klass
*>*
_revisit_klass_stack
;
GrowableArray
<
Klass
*>*
_revisit_klass_stack
;
...
@@ -107,16 +97,8 @@ private:
...
@@ -107,16 +97,8 @@ private:
protected:
protected:
// Array of tasks. Needed by the ParallelTaskTerminator.
// Array of tasks. Needed by the ParallelTaskTerminator.
static
RegionTaskQueueSet
*
region_array
()
{
return
_region_array
;
}
static
RegionTaskQueueSet
*
region_array
()
{
return
_region_array
;
}
OopTaskQueue
*
marking_stack
()
{
return
&
_marking_stack
;
}
OverflowTaskQueue
<
oop
>*
marking_stack
()
{
return
&
_marking_stack
;
}
GrowableArray
<
oop
>*
overflow_stack
()
{
return
_overflow_stack
;
}
RegionTaskQueue
*
region_stack
()
{
return
&
_region_stack
;
}
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow
*
region_stack
()
{
return
&
_region_stack
;
}
#else
RegionTaskQueue
*
region_stack
()
{
return
&
_region_stack
;
}
GrowableArray
<
size_t
>*
region_overflow_stack
()
{
return
_region_overflow_stack
;
}
#endif
// Pushes onto the marking stack. If the marking stack is full,
// Pushes onto the marking stack. If the marking stack is full,
// pushes onto the overflow stack.
// pushes onto the overflow stack.
...
@@ -124,11 +106,7 @@ private:
...
@@ -124,11 +106,7 @@ private:
// Do not implement an equivalent stack_pop. Deal with the
// Do not implement an equivalent stack_pop. Deal with the
// marking stack and overflow stack directly.
// marking stack and overflow stack directly.
// Pushes onto the region stack. If the region stack is full,
public:
// pushes onto the region overflow stack.
void
region_stack_push
(
size_t
region_index
);
public:
Action
action
()
{
return
_action
;
}
Action
action
()
{
return
_action
;
}
void
set_action
(
Action
v
)
{
_action
=
v
;
}
void
set_action
(
Action
v
)
{
_action
=
v
;
}
...
@@ -157,22 +135,15 @@ public:
...
@@ -157,22 +135,15 @@ public:
GrowableArray
<
DataLayout
*>*
revisit_mdo_stack
()
{
return
_revisit_mdo_stack
;
}
GrowableArray
<
DataLayout
*>*
revisit_mdo_stack
()
{
return
_revisit_mdo_stack
;
}
#endif
#endif
// Save oop for later processing. Must not fail.
// Save for later processing. Must not fail.
void
save_for_scanning
(
oop
m
);
inline
void
push
(
oop
obj
)
{
_marking_stack
.
push
(
obj
);
}
// Get a oop for scanning. If returns null, no oop were found.
inline
void
push_objarray
(
oop
objarray
,
size_t
index
);
oop
retrieve_for_scanning
();
inline
void
push_region
(
size_t
index
);
inline
void
push_objarray
(
oop
obj
,
size_t
index
);
// Save region for later processing. Must not fail.
void
save_for_processing
(
size_t
region_index
);
// Get a region for processing. If returns null, no region were found.
bool
retrieve_for_processing
(
size_t
&
region_index
);
// Access function for compaction managers
// Access function for compaction managers
static
ParCompactionManager
*
gc_thread_compaction_manager
(
int
index
);
static
ParCompactionManager
*
gc_thread_compaction_manager
(
int
index
);
static
bool
steal
(
int
queue_num
,
int
*
seed
,
Task
&
t
)
{
static
bool
steal
(
int
queue_num
,
int
*
seed
,
oop
&
t
)
{
return
stack_array
()
->
steal
(
queue_num
,
seed
,
t
);
return
stack_array
()
->
steal
(
queue_num
,
seed
,
t
);
}
}
...
@@ -180,8 +151,8 @@ public:
...
@@ -180,8 +151,8 @@ public:
return
_objarray_queues
->
steal
(
queue_num
,
seed
,
t
);
return
_objarray_queues
->
steal
(
queue_num
,
seed
,
t
);
}
}
static
bool
steal
(
int
queue_num
,
int
*
seed
,
RegionTask
&
t
)
{
static
bool
steal
(
int
queue_num
,
int
*
seed
,
size_t
&
region
)
{
return
region_array
()
->
steal
(
queue_num
,
seed
,
t
);
return
region_array
()
->
steal
(
queue_num
,
seed
,
region
);
}
}
// Process tasks remaining on any marking stack
// Process tasks remaining on any marking stack
...
@@ -191,9 +162,6 @@ public:
...
@@ -191,9 +162,6 @@ public:
// Process tasks remaining on any stack
// Process tasks remaining on any stack
void
drain_region_stacks
();
void
drain_region_stacks
();
// Process tasks remaining on any stack
void
drain_region_overflow_stack
();
// Debugging support
// Debugging support
#ifdef ASSERT
#ifdef ASSERT
bool
stacks_have_been_allocated
();
bool
stacks_have_been_allocated
();
...
@@ -208,6 +176,5 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
...
@@ -208,6 +176,5 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
}
}
bool
ParCompactionManager
::
marking_stacks_empty
()
const
{
bool
ParCompactionManager
::
marking_stacks_empty
()
const
{
return
_marking_stack
.
size
()
==
0
&&
_overflow_stack
->
is_empty
()
&&
return
_marking_stack
.
is_empty
()
&&
_objarray_stack
.
is_empty
();
_objarray_queue
.
size
()
==
0
&&
_objarray_overflow_stack
->
is_empty
();
}
}
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
浏览文件 @
4a141040
...
@@ -26,7 +26,16 @@ void ParCompactionManager::push_objarray(oop obj, size_t index)
...
@@ -26,7 +26,16 @@ void ParCompactionManager::push_objarray(oop obj, size_t index)
{
{
ObjArrayTask
task
(
obj
,
index
);
ObjArrayTask
task
(
obj
,
index
);
assert
(
task
.
is_valid
(),
"bad ObjArrayTask"
);
assert
(
task
.
is_valid
(),
"bad ObjArrayTask"
);
if
(
!
_objarray_queue
.
push
(
task
))
{
_objarray_stack
.
push
(
task
);
_objarray_overflow_stack
->
push
(
task
);
}
}
void
ParCompactionManager
::
push_region
(
size_t
index
)
{
#ifdef ASSERT
const
ParallelCompactData
&
sd
=
PSParallelCompact
::
summary_data
();
ParallelCompactData
::
RegionData
*
const
region_ptr
=
sd
.
region
(
index
);
assert
(
region_ptr
->
claimed
(),
"must be claimed"
);
assert
(
region_ptr
->
_pushed
++
==
0
,
"should only be pushed once"
);
#endif
region_stack
()
->
push
(
index
);
}
}
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
4a141040
...
@@ -2474,7 +2474,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
...
@@ -2474,7 +2474,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
for
(
size_t
cur
=
end_region
-
1
;
cur
>=
beg_region
;
--
cur
)
{
for
(
size_t
cur
=
end_region
-
1
;
cur
>=
beg_region
;
--
cur
)
{
if
(
sd
.
region
(
cur
)
->
claim_unsafe
())
{
if
(
sd
.
region
(
cur
)
->
claim_unsafe
())
{
ParCompactionManager
*
cm
=
ParCompactionManager
::
manager_array
(
which
);
ParCompactionManager
*
cm
=
ParCompactionManager
::
manager_array
(
which
);
cm
->
save_for_processing
(
cur
);
cm
->
push_region
(
cur
);
if
(
TraceParallelOldGCCompactionPhase
&&
Verbose
)
{
if
(
TraceParallelOldGCCompactionPhase
&&
Verbose
)
{
const
size_t
count_mod_8
=
fillable_regions
&
7
;
const
size_t
count_mod_8
=
fillable_regions
&
7
;
...
@@ -3138,7 +3138,7 @@ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
...
@@ -3138,7 +3138,7 @@ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
assert
(
cur
->
data_size
()
>
0
,
"region must have live data"
);
assert
(
cur
->
data_size
()
>
0
,
"region must have live data"
);
cur
->
decrement_destination_count
();
cur
->
decrement_destination_count
();
if
(
cur
<
enqueue_end
&&
cur
->
available
()
&&
cur
->
claim
())
{
if
(
cur
<
enqueue_end
&&
cur
->
available
()
&&
cur
->
claim
())
{
cm
->
save_for_processing
(
sd
.
region
(
cur
));
cm
->
push_region
(
sd
.
region
(
cur
));
}
}
}
}
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -1297,11 +1297,8 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
...
@@ -1297,11 +1297,8 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
if
(
mark_bitmap
()
->
is_unmarked
(
obj
))
{
if
(
mark_bitmap
()
->
is_unmarked
(
obj
)
&&
mark_obj
(
obj
))
{
if
(
mark_obj
(
obj
))
{
cm
->
push
(
obj
);
// This thread marked the object and owns the subsequent processing of it.
cm
->
save_for_scanning
(
obj
);
}
}
}
}
}
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2002, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -94,45 +94,13 @@ void PSPromotionManager::post_scavenge() {
...
@@ -94,45 +94,13 @@ void PSPromotionManager::post_scavenge() {
print_stats
();
print_stats
();
#endif // PS_PM_STATS
#endif // PS_PM_STATS
for
(
uint
i
=
0
;
i
<
ParallelGCThreads
+
1
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
ParallelGCThreads
+
1
;
i
++
)
{
PSPromotionManager
*
manager
=
manager_array
(
i
);
PSPromotionManager
*
manager
=
manager_array
(
i
);
if
(
UseDepthFirstScavengeOrder
)
{
// the guarantees are a bit gratuitous but, if one fires, we'll
assert
(
manager
->
claimed_stack_depth
()
->
is_empty
(),
"should be empty"
);
// have a better idea of what went wrong
if
(
i
<
ParallelGCThreads
)
{
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_depth
()
->
length
()
<=
0
),
"promotion manager overflow stack must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_breadth
()
->
length
()
<=
0
),
"promotion manager overflow stack must be empty"
);
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_depth
()
->
size
()
<=
0
),
"promotion manager claimed stack must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_breadth
()
->
size
()
<=
0
),
"promotion manager claimed stack must be empty"
);
}
else
{
}
else
{
guarantee
((
!
UseDepthFirstScavengeOrder
||
assert
(
manager
->
claimed_stack_breadth
()
->
is_empty
(),
"should be empty"
);
manager
->
overflow_stack_depth
()
->
length
()
<=
0
),
"VM Thread promotion manager overflow stack "
"must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_breadth
()
->
length
()
<=
0
),
"VM Thread promotion manager overflow stack "
"must be empty"
);
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_depth
()
->
size
()
<=
0
),
"VM Thread promotion manager claimed stack "
"must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_breadth
()
->
size
()
<=
0
),
"VM Thread promotion manager claimed stack "
"must be empty"
);
}
}
manager
->
flush_labs
();
manager
->
flush_labs
();
}
}
}
}
...
@@ -181,15 +149,9 @@ PSPromotionManager::PSPromotionManager() {
...
@@ -181,15 +149,9 @@ PSPromotionManager::PSPromotionManager() {
if
(
depth_first
())
{
if
(
depth_first
())
{
claimed_stack_depth
()
->
initialize
();
claimed_stack_depth
()
->
initialize
();
queue_size
=
claimed_stack_depth
()
->
max_elems
();
queue_size
=
claimed_stack_depth
()
->
max_elems
();
// We want the overflow stack to be permanent
_overflow_stack_depth
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
StarTask
>
(
10
,
true
);
_overflow_stack_breadth
=
NULL
;
}
else
{
}
else
{
claimed_stack_breadth
()
->
initialize
();
claimed_stack_breadth
()
->
initialize
();
queue_size
=
claimed_stack_breadth
()
->
max_elems
();
queue_size
=
claimed_stack_breadth
()
->
max_elems
();
// We want the overflow stack to be permanent
_overflow_stack_breadth
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
10
,
true
);
_overflow_stack_depth
=
NULL
;
}
}
_totally_drain
=
(
ParallelGCThreads
==
1
)
||
(
GCDrainStackTargetSize
==
0
);
_totally_drain
=
(
ParallelGCThreads
==
1
)
||
(
GCDrainStackTargetSize
==
0
);
...
@@ -209,8 +171,7 @@ PSPromotionManager::PSPromotionManager() {
...
@@ -209,8 +171,7 @@ PSPromotionManager::PSPromotionManager() {
}
}
void
PSPromotionManager
::
reset
()
{
void
PSPromotionManager
::
reset
()
{
assert
(
claimed_stack_empty
(),
"reset of non-empty claimed stack"
);
assert
(
stacks_empty
(),
"reset of non-empty stack"
);
assert
(
overflow_stack_empty
(),
"reset of non-empty overflow stack"
);
// We need to get an assert in here to make sure the labs are always flushed.
// We need to get an assert in here to make sure the labs are always flushed.
...
@@ -243,7 +204,7 @@ void PSPromotionManager::reset() {
...
@@ -243,7 +204,7 @@ void PSPromotionManager::reset() {
void
PSPromotionManager
::
drain_stacks_depth
(
bool
totally_drain
)
{
void
PSPromotionManager
::
drain_stacks_depth
(
bool
totally_drain
)
{
assert
(
depth_first
(),
"invariant"
);
assert
(
depth_first
(),
"invariant"
);
assert
(
overflow_stack_depth
()
!=
NULL
,
"invariant"
);
assert
(
claimed_stack_depth
()
->
overflow_stack
()
!=
NULL
,
"invariant"
);
totally_drain
=
totally_drain
||
_totally_drain
;
totally_drain
=
totally_drain
||
_totally_drain
;
#ifdef ASSERT
#ifdef ASSERT
...
@@ -254,41 +215,35 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
...
@@ -254,41 +215,35 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
#endif
/* ASSERT */
OopStarTaskQueue
*
const
tq
=
claimed_stack_depth
();
do
{
do
{
StarTask
p
;
StarTask
p
;
// Drain overflow stack first, so other threads can steal from
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
// claimed stack while we work.
while
(
!
overflow_stack_depth
()
->
is_empty
())
{
while
(
tq
->
pop_overflow
(
p
))
{
// linux compiler wants different overloaded operator= in taskqueue to
process_popped_location_depth
(
p
);
// assign to p that the other compilers don't like.
StarTask
ptr
=
overflow_stack_depth
()
->
pop
();
process_popped_location_depth
(
ptr
);
}
}
if
(
totally_drain
)
{
if
(
totally_drain
)
{
while
(
claimed_stack_depth
()
->
pop_local
(
p
))
{
while
(
tq
->
pop_local
(
p
))
{
process_popped_location_depth
(
p
);
process_popped_location_depth
(
p
);
}
}
}
else
{
}
else
{
while
(
claimed_stack_depth
()
->
size
()
>
_target_stack_size
&&
while
(
tq
->
size
()
>
_target_stack_size
&&
tq
->
pop_local
(
p
))
{
claimed_stack_depth
()
->
pop_local
(
p
))
{
process_popped_location_depth
(
p
);
process_popped_location_depth
(
p
);
}
}
}
}
}
while
(
(
totally_drain
&&
claimed_stack_depth
()
->
size
()
>
0
)
||
}
while
(
totally_drain
&&
!
tq
->
taskqueue_empty
()
||
!
tq
->
overflow_empty
());
(
overflow_stack_depth
()
->
length
()
>
0
)
);
assert
(
!
totally_drain
||
tq
->
taskqueue_empty
(),
"Sanity"
);
assert
(
!
totally_drain
||
claimed_stack_empty
(),
"Sanity"
);
assert
(
totally_drain
||
tq
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
totally_drain
||
assert
(
tq
->
overflow_empty
(),
"Sanity"
);
claimed_stack_depth
()
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
overflow_stack_empty
(),
"Sanity"
);
}
}
void
PSPromotionManager
::
drain_stacks_breadth
(
bool
totally_drain
)
{
void
PSPromotionManager
::
drain_stacks_breadth
(
bool
totally_drain
)
{
assert
(
!
depth_first
(),
"invariant"
);
assert
(
!
depth_first
(),
"invariant"
);
assert
(
overflow_stack_breadth
()
!=
NULL
,
"invariant"
);
assert
(
claimed_stack_breadth
()
->
overflow_stack
()
!=
NULL
,
"invariant"
);
totally_drain
=
totally_drain
||
_totally_drain
;
totally_drain
=
totally_drain
||
_totally_drain
;
#ifdef ASSERT
#ifdef ASSERT
...
@@ -299,51 +254,39 @@ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
...
@@ -299,51 +254,39 @@ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
#endif
/* ASSERT */
OverflowTaskQueue
<
oop
>*
const
tq
=
claimed_stack_breadth
();
do
{
do
{
oop
obj
;
oop
obj
;
// Drain overflow stack first, so other threads can steal from
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
// claimed stack while we work.
while
(
!
overflow_stack_breadth
()
->
is_empty
())
{
while
(
tq
->
pop_overflow
(
obj
))
{
obj
=
overflow_stack_breadth
()
->
pop
();
obj
->
copy_contents
(
this
);
obj
->
copy_contents
(
this
);
}
}
if
(
totally_drain
)
{
if
(
totally_drain
)
{
// obj is a reference!!!
while
(
tq
->
pop_local
(
obj
))
{
while
(
claimed_stack_breadth
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj
->
copy_contents
(
this
);
obj
->
copy_contents
(
this
);
}
}
}
else
{
}
else
{
// obj is a reference!!!
while
(
tq
->
size
()
>
_target_stack_size
&&
tq
->
pop_local
(
obj
))
{
while
(
claimed_stack_breadth
()
->
size
()
>
_target_stack_size
&&
claimed_stack_breadth
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj
->
copy_contents
(
this
);
obj
->
copy_contents
(
this
);
}
}
}
}
// If we could not find any other work, flush the prefetch queue
// If we could not find any other work, flush the prefetch queue
if
(
claimed_stack_breadth
()
->
size
()
==
0
&&
if
(
tq
->
is_empty
())
{
(
overflow_stack_breadth
()
->
length
()
==
0
))
{
flush_prefetch_queue
();
flush_prefetch_queue
();
}
}
}
while
((
totally_drain
&&
claimed_stack_breadth
()
->
size
()
>
0
)
||
}
while
(
totally_drain
&&
!
tq
->
taskqueue_empty
()
||
!
tq
->
overflow_empty
());
(
overflow_stack_breadth
()
->
length
()
>
0
));
assert
(
!
totally_drain
||
tq
->
taskqueue_empty
(),
"Sanity"
);
assert
(
!
totally_drain
||
claimed_stack_empty
(),
"Sanity"
);
assert
(
totally_drain
||
tq
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
totally_drain
||
assert
(
tq
->
overflow_empty
(),
"Sanity"
);
claimed_stack_breadth
()
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
overflow_stack_empty
(),
"Sanity"
);
}
}
void
PSPromotionManager
::
flush_labs
()
{
void
PSPromotionManager
::
flush_labs
()
{
assert
(
claimed_stack_empty
(),
"Attempt to flush lab with live stack"
);
assert
(
stacks_empty
(),
"Attempt to flush lab with live stack"
);
assert
(
overflow_stack_empty
(),
"Attempt to flush lab with live overflow stack"
);
// If either promotion lab fills up, we can flush the
// If either promotion lab fills up, we can flush the
// lab but not refill it, so check first.
// lab but not refill it, so check first.
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2002, 20
08
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -78,9 +78,7 @@ class PSPromotionManager : public CHeapObj {
...
@@ -78,9 +78,7 @@ class PSPromotionManager : public CHeapObj {
PrefetchQueue
_prefetch_queue
;
PrefetchQueue
_prefetch_queue
;
OopStarTaskQueue
_claimed_stack_depth
;
OopStarTaskQueue
_claimed_stack_depth
;
GrowableArray
<
StarTask
>*
_overflow_stack_depth
;
OverflowTaskQueue
<
oop
>
_claimed_stack_breadth
;
OopTaskQueue
_claimed_stack_breadth
;
GrowableArray
<
oop
>*
_overflow_stack_breadth
;
bool
_depth_first
;
bool
_depth_first
;
bool
_totally_drain
;
bool
_totally_drain
;
...
@@ -97,9 +95,6 @@ class PSPromotionManager : public CHeapObj {
...
@@ -97,9 +95,6 @@ class PSPromotionManager : public CHeapObj {
template
<
class
T
>
inline
void
claim_or_forward_internal_depth
(
T
*
p
);
template
<
class
T
>
inline
void
claim_or_forward_internal_depth
(
T
*
p
);
template
<
class
T
>
inline
void
claim_or_forward_internal_breadth
(
T
*
p
);
template
<
class
T
>
inline
void
claim_or_forward_internal_breadth
(
T
*
p
);
GrowableArray
<
StarTask
>*
overflow_stack_depth
()
{
return
_overflow_stack_depth
;
}
GrowableArray
<
oop
>*
overflow_stack_breadth
()
{
return
_overflow_stack_breadth
;
}
// On the task queues we push reference locations as well as
// On the task queues we push reference locations as well as
// partially-scanned arrays (in the latter case, we push an oop to
// partially-scanned arrays (in the latter case, we push an oop to
// the from-space image of the array and the length on the
// the from-space image of the array and the length on the
...
@@ -151,18 +146,19 @@ class PSPromotionManager : public CHeapObj {
...
@@ -151,18 +146,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
#if PS_PM_STATS
++
_total_pushes
;
++
_total_pushes
;
int
stack_length
=
claimed_stack_depth
()
->
overflow_stack
()
->
length
();
#endif // PS_PM_STATS
#endif // PS_PM_STATS
if
(
!
claimed_stack_depth
()
->
push
(
p
))
{
claimed_stack_depth
()
->
push
(
p
);
overflow_stack_depth
()
->
push
(
p
);
#if PS_PM_STATS
#if PS_PM_STATS
if
(
claimed_stack_depth
()
->
overflow_stack
()
->
length
()
!=
stack_length
)
{
++
_overflow_pushes
;
++
_overflow_pushes
;
uint
stack_length
=
(
uint
)
overflow_stack_depth
()
->
length
();
if
((
uint
)
stack_length
+
1
>
_max_overflow_length
)
{
if
(
stack_length
>
_max_overflow_length
)
{
_max_overflow_length
=
(
uint
)
stack_length
+
1
;
_max_overflow_length
=
stack_length
;
}
}
#endif // PS_PM_STATS
}
}
#endif // PS_PM_STATS
}
}
void
push_breadth
(
oop
o
)
{
void
push_breadth
(
oop
o
)
{
...
@@ -170,18 +166,19 @@ class PSPromotionManager : public CHeapObj {
...
@@ -170,18 +166,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
#if PS_PM_STATS
++
_total_pushes
;
++
_total_pushes
;
int
stack_length
=
claimed_stack_breadth
()
->
overflow_stack
()
->
length
();
#endif // PS_PM_STATS
#endif // PS_PM_STATS
if
(
!
claimed_stack_breadth
()
->
push
(
o
))
{
claimed_stack_breadth
()
->
push
(
o
);
overflow_stack_breadth
()
->
push
(
o
);
#if PS_PM_STATS
#if PS_PM_STATS
if
(
claimed_stack_breadth
()
->
overflow_stack
()
->
length
()
!=
stack_length
)
{
++
_overflow_pushes
;
++
_overflow_pushes
;
uint
stack_length
=
(
uint
)
overflow_stack_breadth
()
->
length
();
if
((
uint
)
stack_length
+
1
>
_max_overflow_length
)
{
if
(
stack_length
>
_max_overflow_length
)
{
_max_overflow_length
=
(
uint
)
stack_length
+
1
;
_max_overflow_length
=
stack_length
;
}
}
#endif // PS_PM_STATS
}
}
#endif // PS_PM_STATS
}
}
protected:
protected:
...
@@ -199,12 +196,10 @@ class PSPromotionManager : public CHeapObj {
...
@@ -199,12 +196,10 @@ class PSPromotionManager : public CHeapObj {
static
PSPromotionManager
*
vm_thread_promotion_manager
();
static
PSPromotionManager
*
vm_thread_promotion_manager
();
static
bool
steal_depth
(
int
queue_num
,
int
*
seed
,
StarTask
&
t
)
{
static
bool
steal_depth
(
int
queue_num
,
int
*
seed
,
StarTask
&
t
)
{
assert
(
stack_array_depth
()
!=
NULL
,
"invariant"
);
return
stack_array_depth
()
->
steal
(
queue_num
,
seed
,
t
);
return
stack_array_depth
()
->
steal
(
queue_num
,
seed
,
t
);
}
}
static
bool
steal_breadth
(
int
queue_num
,
int
*
seed
,
Task
&
t
)
{
static
bool
steal_breadth
(
int
queue_num
,
int
*
seed
,
oop
&
t
)
{
assert
(
stack_array_breadth
()
!=
NULL
,
"invariant"
);
return
stack_array_breadth
()
->
steal
(
queue_num
,
seed
,
t
);
return
stack_array_breadth
()
->
steal
(
queue_num
,
seed
,
t
);
}
}
...
@@ -214,7 +209,7 @@ class PSPromotionManager : public CHeapObj {
...
@@ -214,7 +209,7 @@ class PSPromotionManager : public CHeapObj {
OopStarTaskQueue
*
claimed_stack_depth
()
{
OopStarTaskQueue
*
claimed_stack_depth
()
{
return
&
_claimed_stack_depth
;
return
&
_claimed_stack_depth
;
}
}
O
opTaskQueue
*
claimed_stack_breadth
()
{
O
verflowTaskQueue
<
oop
>
*
claimed_stack_breadth
()
{
return
&
_claimed_stack_breadth
;
return
&
_claimed_stack_breadth
;
}
}
...
@@ -246,25 +241,13 @@ class PSPromotionManager : public CHeapObj {
...
@@ -246,25 +241,13 @@ class PSPromotionManager : public CHeapObj {
void
drain_stacks_depth
(
bool
totally_drain
);
void
drain_stacks_depth
(
bool
totally_drain
);
void
drain_stacks_breadth
(
bool
totally_drain
);
void
drain_stacks_breadth
(
bool
totally_drain
);
bool
claimed_stack_empty
()
{
bool
depth_first
()
const
{
if
(
depth_first
())
{
return
_depth_first
;
return
claimed_stack_depth
()
->
size
()
<=
0
;
}
else
{
return
claimed_stack_breadth
()
->
size
()
<=
0
;
}
}
bool
overflow_stack_empty
()
{
if
(
depth_first
())
{
return
overflow_stack_depth
()
->
length
()
<=
0
;
}
else
{
return
overflow_stack_breadth
()
->
length
()
<=
0
;
}
}
}
bool
stacks_empty
()
{
bool
stacks_empty
()
{
return
claimed_stack_empty
()
&&
overflow_stack_empty
();
return
depth_first
()
?
}
claimed_stack_depth
()
->
is_empty
()
:
bool
depth_first
()
{
claimed_stack_breadth
()
->
is_empty
();
return
_depth_first
;
}
}
inline
void
process_popped_location_depth
(
StarTask
p
);
inline
void
process_popped_location_depth
(
StarTask
p
);
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
浏览文件 @
4a141040
...
@@ -414,7 +414,6 @@ bool PSScavenge::invoke_no_policy() {
...
@@ -414,7 +414,6 @@ bool PSScavenge::invoke_no_policy() {
}
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
assert
(
promotion_manager
->
claimed_stack_empty
(),
"Sanity"
);
PSPromotionManager
::
post_scavenge
();
PSPromotionManager
::
post_scavenge
();
promotion_failure_occurred
=
promotion_failed
();
promotion_failure_occurred
=
promotion_failed
();
...
...
src/share/vm/opto/callnode.cpp
浏览文件 @
4a141040
...
@@ -1524,7 +1524,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
...
@@ -1524,7 +1524,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
ConnectionGraph
*
cgr
=
phase
->
C
->
congraph
();
ConnectionGraph
*
cgr
=
phase
->
C
->
congraph
();
PointsToNode
::
EscapeState
es
=
PointsToNode
::
GlobalEscape
;
PointsToNode
::
EscapeState
es
=
PointsToNode
::
GlobalEscape
;
if
(
cgr
!=
NULL
)
if
(
cgr
!=
NULL
)
es
=
cgr
->
escape_state
(
obj_node
()
,
phase
);
es
=
cgr
->
escape_state
(
obj_node
());
if
(
es
!=
PointsToNode
::
UnknownEscape
&&
es
!=
PointsToNode
::
GlobalEscape
)
{
if
(
es
!=
PointsToNode
::
UnknownEscape
&&
es
!=
PointsToNode
::
GlobalEscape
)
{
// Mark it eliminated to update any counters
// Mark it eliminated to update any counters
this
->
set_eliminated
();
this
->
set_eliminated
();
...
@@ -1627,7 +1627,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
...
@@ -1627,7 +1627,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
ConnectionGraph
*
cgr
=
phase
->
C
->
congraph
();
ConnectionGraph
*
cgr
=
phase
->
C
->
congraph
();
PointsToNode
::
EscapeState
es
=
PointsToNode
::
GlobalEscape
;
PointsToNode
::
EscapeState
es
=
PointsToNode
::
GlobalEscape
;
if
(
cgr
!=
NULL
)
if
(
cgr
!=
NULL
)
es
=
cgr
->
escape_state
(
obj_node
()
,
phase
);
es
=
cgr
->
escape_state
(
obj_node
());
if
(
es
!=
PointsToNode
::
UnknownEscape
&&
es
!=
PointsToNode
::
GlobalEscape
)
{
if
(
es
!=
PointsToNode
::
UnknownEscape
&&
es
!=
PointsToNode
::
GlobalEscape
)
{
// Mark it eliminated to update any counters
// Mark it eliminated to update any counters
this
->
set_eliminated
();
this
->
set_eliminated
();
...
...
src/share/vm/opto/compile.cpp
浏览文件 @
4a141040
...
@@ -637,34 +637,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
...
@@ -637,34 +637,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
if
(
failing
())
return
;
if
(
failing
())
return
;
NOT_PRODUCT
(
verify_graph_edges
();
)
NOT_PRODUCT
(
verify_graph_edges
();
)
// Perform escape analysis
if
(
_do_escape_analysis
&&
ConnectionGraph
::
has_candidates
(
this
))
{
TracePhase
t2
(
"escapeAnalysis"
,
&
_t_escapeAnalysis
,
true
);
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction.
PhaseGVN
*
igvn
=
initial_gvn
();
Node
*
oop_null
=
igvn
->
zerocon
(
T_OBJECT
);
Node
*
noop_null
=
igvn
->
zerocon
(
T_NARROWOOP
);
_congraph
=
new
(
comp_arena
())
ConnectionGraph
(
this
);
bool
has_non_escaping_obj
=
_congraph
->
compute_escape
();
#ifndef PRODUCT
if
(
PrintEscapeAnalysis
)
{
_congraph
->
dump
();
}
#endif
// Cleanup.
if
(
oop_null
->
outcnt
()
==
0
)
igvn
->
hash_delete
(
oop_null
);
if
(
noop_null
->
outcnt
()
==
0
)
igvn
->
hash_delete
(
noop_null
);
if
(
!
has_non_escaping_obj
)
{
_congraph
=
NULL
;
}
if
(
failing
())
return
;
}
// Now optimize
// Now optimize
Optimize
();
Optimize
();
if
(
failing
())
return
;
if
(
failing
())
return
;
...
@@ -1601,6 +1573,20 @@ void Compile::Optimize() {
...
@@ -1601,6 +1573,20 @@ void Compile::Optimize() {
if
(
failing
())
return
;
if
(
failing
())
return
;
// Perform escape analysis
if
(
_do_escape_analysis
&&
ConnectionGraph
::
has_candidates
(
this
))
{
TracePhase
t2
(
"escapeAnalysis"
,
&
_t_escapeAnalysis
,
true
);
ConnectionGraph
::
do_analysis
(
this
,
&
igvn
);
if
(
failing
())
return
;
igvn
.
optimize
();
print_method
(
"Iter GVN 3"
,
2
);
if
(
failing
())
return
;
}
// Loop transforms on the ideal graph. Range Check Elimination,
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
// peeling, unrolling, etc.
...
...
src/share/vm/opto/compile.hpp
浏览文件 @
4a141040
...
@@ -362,6 +362,7 @@ class Compile : public Phase {
...
@@ -362,6 +362,7 @@ class Compile : public Phase {
Node
*
macro_node
(
int
idx
)
{
return
_macro_nodes
->
at
(
idx
);
}
Node
*
macro_node
(
int
idx
)
{
return
_macro_nodes
->
at
(
idx
);
}
Node
*
predicate_opaque1_node
(
int
idx
)
{
return
_predicate_opaqs
->
at
(
idx
);}
Node
*
predicate_opaque1_node
(
int
idx
)
{
return
_predicate_opaqs
->
at
(
idx
);}
ConnectionGraph
*
congraph
()
{
return
_congraph
;}
ConnectionGraph
*
congraph
()
{
return
_congraph
;}
void
set_congraph
(
ConnectionGraph
*
congraph
)
{
_congraph
=
congraph
;}
void
add_macro_node
(
Node
*
n
)
{
void
add_macro_node
(
Node
*
n
)
{
//assert(n->is_macro(), "must be a macro node");
//assert(n->is_macro(), "must be a macro node");
assert
(
!
_macro_nodes
->
contains
(
n
),
" duplicate entry in expand list"
);
assert
(
!
_macro_nodes
->
contains
(
n
),
" duplicate entry in expand list"
);
...
...
src/share/vm/opto/escape.cpp
浏览文件 @
4a141040
...
@@ -81,18 +81,18 @@ void PointsToNode::dump(bool print_state) const {
...
@@ -81,18 +81,18 @@ void PointsToNode::dump(bool print_state) const {
}
}
#endif
#endif
ConnectionGraph
::
ConnectionGraph
(
Compile
*
C
)
:
ConnectionGraph
::
ConnectionGraph
(
Compile
*
C
,
PhaseIterGVN
*
igvn
)
:
_nodes
(
C
->
comp_arena
(),
C
->
unique
(),
C
->
unique
(),
PointsToNode
()),
_nodes
(
C
->
comp_arena
(),
C
->
unique
(),
C
->
unique
(),
PointsToNode
()),
_processed
(
C
->
comp_arena
()),
_processed
(
C
->
comp_arena
()),
_collecting
(
true
),
_collecting
(
true
),
_compile
(
C
),
_compile
(
C
),
_igvn
(
igvn
),
_node_map
(
C
->
comp_arena
())
{
_node_map
(
C
->
comp_arena
())
{
_phantom_object
=
C
->
top
()
->
_idx
,
_phantom_object
=
C
->
top
()
->
_idx
,
add_node
(
C
->
top
(),
PointsToNode
::
JavaObject
,
PointsToNode
::
GlobalEscape
,
true
);
add_node
(
C
->
top
(),
PointsToNode
::
JavaObject
,
PointsToNode
::
GlobalEscape
,
true
);
// Add ConP(#NULL) and ConN(#NULL) nodes.
// Add ConP(#NULL) and ConN(#NULL) nodes.
PhaseGVN
*
igvn
=
C
->
initial_gvn
();
Node
*
oop_null
=
igvn
->
zerocon
(
T_OBJECT
);
Node
*
oop_null
=
igvn
->
zerocon
(
T_OBJECT
);
_oop_null
=
oop_null
->
_idx
;
_oop_null
=
oop_null
->
_idx
;
assert
(
_oop_null
<
C
->
unique
(),
"should be created already"
);
assert
(
_oop_null
<
C
->
unique
(),
"should be created already"
);
...
@@ -182,7 +182,7 @@ void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
...
@@ -182,7 +182,7 @@ void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
_processed
.
set
(
n
->
_idx
);
_processed
.
set
(
n
->
_idx
);
}
}
PointsToNode
::
EscapeState
ConnectionGraph
::
escape_state
(
Node
*
n
,
PhaseTransform
*
phase
)
{
PointsToNode
::
EscapeState
ConnectionGraph
::
escape_state
(
Node
*
n
)
{
uint
idx
=
n
->
_idx
;
uint
idx
=
n
->
_idx
;
PointsToNode
::
EscapeState
es
;
PointsToNode
::
EscapeState
es
;
...
@@ -207,22 +207,26 @@ PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform
...
@@ -207,22 +207,26 @@ PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform
if
(
n
->
uncast
()
->
_idx
>=
nodes_size
())
if
(
n
->
uncast
()
->
_idx
>=
nodes_size
())
return
PointsToNode
::
UnknownEscape
;
return
PointsToNode
::
UnknownEscape
;
PointsToNode
::
EscapeState
orig_es
=
es
;
// compute max escape state of anything this node could point to
// compute max escape state of anything this node could point to
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
PointsTo
(
ptset
,
n
,
phase
);
PointsTo
(
ptset
,
n
);
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
()
&&
es
!=
PointsToNode
::
GlobalEscape
;
++
i
)
{
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
()
&&
es
!=
PointsToNode
::
GlobalEscape
;
++
i
)
{
uint
pt
=
i
.
elem
;
uint
pt
=
i
.
elem
;
PointsToNode
::
EscapeState
pes
=
ptnode_adr
(
pt
)
->
escape_state
();
PointsToNode
::
EscapeState
pes
=
ptnode_adr
(
pt
)
->
escape_state
();
if
(
pes
>
es
)
if
(
pes
>
es
)
es
=
pes
;
es
=
pes
;
}
}
// cache the computed escape state
if
(
orig_es
!=
es
)
{
assert
(
es
!=
PointsToNode
::
UnknownEscape
,
"should have computed an escape state"
);
// cache the computed escape state
ptnode_adr
(
idx
)
->
set_escape_state
(
es
);
assert
(
es
!=
PointsToNode
::
UnknownEscape
,
"should have computed an escape state"
);
ptnode_adr
(
idx
)
->
set_escape_state
(
es
);
}
// orig_es could be PointsToNode::UnknownEscape
return
es
;
return
es
;
}
}
void
ConnectionGraph
::
PointsTo
(
VectorSet
&
ptset
,
Node
*
n
,
PhaseTransform
*
phase
)
{
void
ConnectionGraph
::
PointsTo
(
VectorSet
&
ptset
,
Node
*
n
)
{
VectorSet
visited
(
Thread
::
current
()
->
resource_area
());
VectorSet
visited
(
Thread
::
current
()
->
resource_area
());
GrowableArray
<
uint
>
worklist
;
GrowableArray
<
uint
>
worklist
;
...
@@ -990,7 +994,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
...
@@ -990,7 +994,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
GrowableArray
<
Node
*>
memnode_worklist
;
GrowableArray
<
Node
*>
memnode_worklist
;
GrowableArray
<
PhiNode
*>
orig_phis
;
GrowableArray
<
PhiNode
*>
orig_phis
;
PhaseGVN
*
igvn
=
_
compile
->
initial_gvn
()
;
PhaseGVN
*
igvn
=
_
igvn
;
uint
new_index_start
=
(
uint
)
_compile
->
num_alias_types
();
uint
new_index_start
=
(
uint
)
_compile
->
num_alias_types
();
Arena
*
arena
=
Thread
::
current
()
->
resource_area
();
Arena
*
arena
=
Thread
::
current
()
->
resource_area
();
VectorSet
visited
(
arena
);
VectorSet
visited
(
arena
);
...
@@ -1012,7 +1016,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
...
@@ -1012,7 +1016,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
CallNode
*
alloc
=
n
->
as_Call
();
CallNode
*
alloc
=
n
->
as_Call
();
// copy escape information to call node
// copy escape information to call node
PointsToNode
*
ptn
=
ptnode_adr
(
alloc
->
_idx
);
PointsToNode
*
ptn
=
ptnode_adr
(
alloc
->
_idx
);
PointsToNode
::
EscapeState
es
=
escape_state
(
alloc
,
igvn
);
PointsToNode
::
EscapeState
es
=
escape_state
(
alloc
);
// We have an allocation or call which returns a Java object,
// We have an allocation or call which returns a Java object,
// see if it is unescaped.
// see if it is unescaped.
if
(
es
!=
PointsToNode
::
NoEscape
||
!
ptn
->
_scalar_replaceable
)
if
(
es
!=
PointsToNode
::
NoEscape
||
!
ptn
->
_scalar_replaceable
)
...
@@ -1123,7 +1127,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
...
@@ -1123,7 +1127,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
}
}
else
if
(
n
->
is_AddP
())
{
}
else
if
(
n
->
is_AddP
())
{
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
get_addp_base
(
n
)
,
igvn
);
PointsTo
(
ptset
,
get_addp_base
(
n
));
assert
(
ptset
.
Size
()
==
1
,
"AddP address is unique"
);
assert
(
ptset
.
Size
()
==
1
,
"AddP address is unique"
);
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
if
(
elem
==
_phantom_object
)
{
if
(
elem
==
_phantom_object
)
{
...
@@ -1143,7 +1147,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
...
@@ -1143,7 +1147,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
continue
;
// already processed
continue
;
// already processed
}
}
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
n
,
igvn
);
PointsTo
(
ptset
,
n
);
if
(
ptset
.
Size
()
==
1
)
{
if
(
ptset
.
Size
()
==
1
)
{
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
if
(
elem
==
_phantom_object
)
{
if
(
elem
==
_phantom_object
)
{
...
@@ -1478,6 +1482,26 @@ bool ConnectionGraph::has_candidates(Compile *C) {
...
@@ -1478,6 +1482,26 @@ bool ConnectionGraph::has_candidates(Compile *C) {
return
false
;
return
false
;
}
}
void
ConnectionGraph
::
do_analysis
(
Compile
*
C
,
PhaseIterGVN
*
igvn
)
{
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
// to create space for them in ConnectionGraph::_nodes[].
Node
*
oop_null
=
igvn
->
zerocon
(
T_OBJECT
);
Node
*
noop_null
=
igvn
->
zerocon
(
T_NARROWOOP
);
ConnectionGraph
*
congraph
=
new
(
C
->
comp_arena
())
ConnectionGraph
(
C
,
igvn
);
// Perform escape analysis
if
(
congraph
->
compute_escape
())
{
// There are non escaping objects.
C
->
set_congraph
(
congraph
);
}
// Cleanup.
if
(
oop_null
->
outcnt
()
==
0
)
igvn
->
hash_delete
(
oop_null
);
if
(
noop_null
->
outcnt
()
==
0
)
igvn
->
hash_delete
(
noop_null
);
}
bool
ConnectionGraph
::
compute_escape
()
{
bool
ConnectionGraph
::
compute_escape
()
{
Compile
*
C
=
_compile
;
Compile
*
C
=
_compile
;
...
@@ -1492,7 +1516,7 @@ bool ConnectionGraph::compute_escape() {
...
@@ -1492,7 +1516,7 @@ bool ConnectionGraph::compute_escape() {
}
}
GrowableArray
<
int
>
cg_worklist
;
GrowableArray
<
int
>
cg_worklist
;
PhaseGVN
*
igvn
=
C
->
initial_gvn
()
;
PhaseGVN
*
igvn
=
_igvn
;
bool
has_allocations
=
false
;
bool
has_allocations
=
false
;
// Push all useful nodes onto CG list and set their type.
// Push all useful nodes onto CG list and set their type.
...
@@ -1661,6 +1685,12 @@ bool ConnectionGraph::compute_escape() {
...
@@ -1661,6 +1685,12 @@ bool ConnectionGraph::compute_escape() {
_collecting
=
false
;
_collecting
=
false
;
assert
(
C
->
unique
()
==
nodes_size
(),
"there should be no new ideal nodes during ConnectionGraph build"
);
assert
(
C
->
unique
()
==
nodes_size
(),
"there should be no new ideal nodes during ConnectionGraph build"
);
#ifndef PRODUCT
if
(
PrintEscapeAnalysis
)
{
dump
();
// Dump ConnectionGraph
}
#endif
bool
has_scalar_replaceable_candidates
=
alloc_worklist
.
length
()
>
0
;
bool
has_scalar_replaceable_candidates
=
alloc_worklist
.
length
()
>
0
;
if
(
has_scalar_replaceable_candidates
&&
if
(
has_scalar_replaceable_candidates
&&
C
->
AliasLevel
()
>=
3
&&
EliminateAllocations
)
{
C
->
AliasLevel
()
>=
3
&&
EliminateAllocations
)
{
...
@@ -1671,10 +1701,6 @@ bool ConnectionGraph::compute_escape() {
...
@@ -1671,10 +1701,6 @@ bool ConnectionGraph::compute_escape() {
if
(
C
->
failing
())
return
false
;
if
(
C
->
failing
())
return
false
;
// Clean up after split unique types.
ResourceMark
rm
;
PhaseRemoveUseless
pru
(
C
->
initial_gvn
(),
C
->
for_igvn
());
C
->
print_method
(
"After Escape Analysis"
,
2
);
C
->
print_method
(
"After Escape Analysis"
,
2
);
#ifdef ASSERT
#ifdef ASSERT
...
@@ -1711,7 +1737,7 @@ void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTrans
...
@@ -1711,7 +1737,7 @@ void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTrans
int
offset
=
ptn
->
offset
();
int
offset
=
ptn
->
offset
();
Node
*
base
=
get_addp_base
(
n
);
Node
*
base
=
get_addp_base
(
n
);
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
base
,
phase
);
PointsTo
(
ptset
,
base
);
int
ptset_size
=
ptset
.
Size
();
int
ptset_size
=
ptset
.
Size
();
// Check if a oop field's initializing value is recorded and add
// Check if a oop field's initializing value is recorded and add
...
@@ -1889,7 +1915,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
...
@@ -1889,7 +1915,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
arg
=
get_addp_base
(
arg
);
arg
=
get_addp_base
(
arg
);
}
}
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
arg
,
phase
);
PointsTo
(
ptset
,
arg
);
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
uint
pt
=
j
.
elem
;
uint
pt
=
j
.
elem
;
set_escape_state
(
pt
,
PointsToNode
::
ArgEscape
);
set_escape_state
(
pt
,
PointsToNode
::
ArgEscape
);
...
@@ -1934,7 +1960,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
...
@@ -1934,7 +1960,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
}
}
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
arg
,
phase
);
PointsTo
(
ptset
,
arg
);
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
uint
pt
=
j
.
elem
;
uint
pt
=
j
.
elem
;
if
(
global_escapes
)
{
if
(
global_escapes
)
{
...
@@ -1970,7 +1996,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
...
@@ -1970,7 +1996,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
Node
*
arg
=
call
->
in
(
i
)
->
uncast
();
Node
*
arg
=
call
->
in
(
i
)
->
uncast
();
set_escape_state
(
arg
->
_idx
,
PointsToNode
::
GlobalEscape
);
set_escape_state
(
arg
->
_idx
,
PointsToNode
::
GlobalEscape
);
ptset
.
Clear
();
ptset
.
Clear
();
PointsTo
(
ptset
,
arg
,
phase
);
PointsTo
(
ptset
,
arg
);
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
uint
pt
=
j
.
elem
;
uint
pt
=
j
.
elem
;
set_escape_state
(
pt
,
PointsToNode
::
GlobalEscape
);
set_escape_state
(
pt
,
PointsToNode
::
GlobalEscape
);
...
@@ -2433,7 +2459,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
...
@@ -2433,7 +2459,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
Node
*
base
=
get_addp_base
(
n
);
Node
*
base
=
get_addp_base
(
n
);
// Create a field edge to this node from everything base could point to.
// Create a field edge to this node from everything base could point to.
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
PointsTo
(
ptset
,
base
,
phase
);
PointsTo
(
ptset
,
base
);
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
uint
pt
=
i
.
elem
;
uint
pt
=
i
.
elem
;
add_field_edge
(
pt
,
n_idx
,
address_offset
(
n
,
phase
));
add_field_edge
(
pt
,
n_idx
,
address_offset
(
n
,
phase
));
...
@@ -2501,7 +2527,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
...
@@ -2501,7 +2527,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
// For everything "adr_base" could point to, create a deferred edge from
// For everything "adr_base" could point to, create a deferred edge from
// this node to each field with the same offset.
// this node to each field with the same offset.
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
PointsTo
(
ptset
,
adr_base
,
phase
);
PointsTo
(
ptset
,
adr_base
);
int
offset
=
address_offset
(
adr
,
phase
);
int
offset
=
address_offset
(
adr
,
phase
);
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
uint
pt
=
i
.
elem
;
uint
pt
=
i
.
elem
;
...
@@ -2594,7 +2620,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
...
@@ -2594,7 +2620,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
// For everything "adr_base" could point to, create a deferred edge
// For everything "adr_base" could point to, create a deferred edge
// to "val" from each field with the same offset.
// to "val" from each field with the same offset.
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
PointsTo
(
ptset
,
adr_base
,
phase
);
PointsTo
(
ptset
,
adr_base
);
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
for
(
VectorSetI
i
(
&
ptset
);
i
.
test
();
++
i
)
{
uint
pt
=
i
.
elem
;
uint
pt
=
i
.
elem
;
add_edge_from_fields
(
pt
,
val
->
_idx
,
address_offset
(
adr
,
phase
));
add_edge_from_fields
(
pt
,
val
->
_idx
,
address_offset
(
adr
,
phase
));
...
@@ -2638,7 +2664,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
...
@@ -2638,7 +2664,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
#ifndef PRODUCT
#ifndef PRODUCT
void
ConnectionGraph
::
dump
()
{
void
ConnectionGraph
::
dump
()
{
PhaseGVN
*
igvn
=
_compile
->
initial_gvn
();
bool
first
=
true
;
bool
first
=
true
;
uint
size
=
nodes_size
();
uint
size
=
nodes_size
();
...
@@ -2648,7 +2673,7 @@ void ConnectionGraph::dump() {
...
@@ -2648,7 +2673,7 @@ void ConnectionGraph::dump() {
if
(
ptn_type
!=
PointsToNode
::
JavaObject
||
ptn
->
_node
==
NULL
)
if
(
ptn_type
!=
PointsToNode
::
JavaObject
||
ptn
->
_node
==
NULL
)
continue
;
continue
;
PointsToNode
::
EscapeState
es
=
escape_state
(
ptn
->
_node
,
igvn
);
PointsToNode
::
EscapeState
es
=
escape_state
(
ptn
->
_node
);
if
(
ptn
->
_node
->
is_Allocate
()
&&
(
es
==
PointsToNode
::
NoEscape
||
Verbose
))
{
if
(
ptn
->
_node
->
is_Allocate
()
&&
(
es
==
PointsToNode
::
NoEscape
||
Verbose
))
{
if
(
first
)
{
if
(
first
)
{
tty
->
cr
();
tty
->
cr
();
...
...
src/share/vm/opto/escape.hpp
浏览文件 @
4a141040
...
@@ -227,6 +227,7 @@ private:
...
@@ -227,6 +227,7 @@ private:
uint
_noop_null
;
// ConN(#NULL)
uint
_noop_null
;
// ConN(#NULL)
Compile
*
_compile
;
// Compile object for current compilation
Compile
*
_compile
;
// Compile object for current compilation
PhaseIterGVN
*
_igvn
;
// Value numbering
// Address of an element in _nodes. Used when the element is to be modified
// Address of an element in _nodes. Used when the element is to be modified
PointsToNode
*
ptnode_adr
(
uint
idx
)
const
{
PointsToNode
*
ptnode_adr
(
uint
idx
)
const
{
...
@@ -257,7 +258,7 @@ private:
...
@@ -257,7 +258,7 @@ private:
// walk the connection graph starting at the node corresponding to "n" and
// walk the connection graph starting at the node corresponding to "n" and
// add the index of everything it could point to, to "ptset". This may cause
// add the index of everything it could point to, to "ptset". This may cause
// Phi's encountered to get (re)processed (which requires "phase".)
// Phi's encountered to get (re)processed (which requires "phase".)
void
PointsTo
(
VectorSet
&
ptset
,
Node
*
n
,
PhaseTransform
*
phase
);
void
PointsTo
(
VectorSet
&
ptset
,
Node
*
n
);
// Edge manipulation. The "from_i" and "to_i" arguments are the
// Edge manipulation. The "from_i" and "to_i" arguments are the
// node indices of the source and destination of the edge
// node indices of the source and destination of the edge
...
@@ -310,7 +311,7 @@ private:
...
@@ -310,7 +311,7 @@ private:
// Node: This assumes that escape analysis is run before
// Node: This assumes that escape analysis is run before
// PhaseIterGVN creation
// PhaseIterGVN creation
void
record_for_optimizer
(
Node
*
n
)
{
void
record_for_optimizer
(
Node
*
n
)
{
_
compile
->
record_for_igvn
(
n
);
_
igvn
->
_worklist
.
push
(
n
);
}
}
// Set the escape state of a node
// Set the escape state of a node
...
@@ -320,16 +321,20 @@ private:
...
@@ -320,16 +321,20 @@ private:
void
verify_escape_state
(
int
nidx
,
VectorSet
&
ptset
,
PhaseTransform
*
phase
);
void
verify_escape_state
(
int
nidx
,
VectorSet
&
ptset
,
PhaseTransform
*
phase
);
public:
public:
ConnectionGraph
(
Compile
*
C
);
ConnectionGraph
(
Compile
*
C
,
PhaseIterGVN
*
igvn
);
// Check for non-escaping candidates
// Check for non-escaping candidates
static
bool
has_candidates
(
Compile
*
C
);
static
bool
has_candidates
(
Compile
*
C
);
// Perform escape analysis
static
void
do_analysis
(
Compile
*
C
,
PhaseIterGVN
*
igvn
);
// Compute the escape information
// Compute the escape information
bool
compute_escape
();
bool
compute_escape
();
// escape state of a node
// escape state of a node
PointsToNode
::
EscapeState
escape_state
(
Node
*
n
,
PhaseTransform
*
phase
);
PointsToNode
::
EscapeState
escape_state
(
Node
*
n
);
// other information we have collected
// other information we have collected
bool
is_scalar_replaceable
(
Node
*
n
)
{
bool
is_scalar_replaceable
(
Node
*
n
)
{
if
(
_collecting
||
(
n
->
_idx
>=
nodes_size
()))
if
(
_collecting
||
(
n
->
_idx
>=
nodes_size
()))
...
...
src/share/vm/prims/jvmtiCodeBlobEvents.cpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2003, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -118,34 +118,13 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
...
@@ -118,34 +118,13 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
for
(
int
i
=
0
;
i
<
_global_code_blobs
->
length
();
i
++
)
{
for
(
int
i
=
0
;
i
<
_global_code_blobs
->
length
();
i
++
)
{
JvmtiCodeBlobDesc
*
scb
=
_global_code_blobs
->
at
(
i
);
JvmtiCodeBlobDesc
*
scb
=
_global_code_blobs
->
at
(
i
);
if
(
addr
==
scb
->
code_begin
())
{
if
(
addr
==
scb
->
code_begin
())
{
ShouldNotReachHere
();
return
;
return
;
}
}
}
}
// we must name the CodeBlob - some CodeBlobs already have names :-
// - stubs used by compiled code to call a (static) C++ runtime routine
// - non-relocatable machine code such as the interpreter, stubroutines, etc.
// - various singleton blobs
//
// others are unnamed so we create a name :-
// - OSR adapter (interpreter frame that has been on-stack replaced)
// - I2C and C2I adapters
const
char
*
name
=
NULL
;
if
(
cb
->
is_runtime_stub
())
{
name
=
((
RuntimeStub
*
)
cb
)
->
name
();
}
if
(
cb
->
is_buffer_blob
())
{
name
=
((
BufferBlob
*
)
cb
)
->
name
();
}
if
(
cb
->
is_deoptimization_stub
()
||
cb
->
is_safepoint_stub
())
{
name
=
((
SingletonBlob
*
)
cb
)
->
name
();
}
if
(
cb
->
is_uncommon_trap_stub
()
||
cb
->
is_exception_stub
())
{
name
=
((
SingletonBlob
*
)
cb
)
->
name
();
}
// record the CodeBlob details as a JvmtiCodeBlobDesc
// record the CodeBlob details as a JvmtiCodeBlobDesc
JvmtiCodeBlobDesc
*
scb
=
new
JvmtiCodeBlobDesc
(
name
,
cb
->
instructions_begin
(),
JvmtiCodeBlobDesc
*
scb
=
new
JvmtiCodeBlobDesc
(
cb
->
name
()
,
cb
->
instructions_begin
(),
cb
->
instructions_end
());
cb
->
instructions_end
());
_global_code_blobs
->
append
(
scb
);
_global_code_blobs
->
append
(
scb
);
}
}
...
@@ -197,7 +176,10 @@ void CodeBlobCollector::collect() {
...
@@ -197,7 +176,10 @@ void CodeBlobCollector::collect() {
jvmtiError
JvmtiCodeBlobEvents
::
generate_dynamic_code_events
(
JvmtiEnv
*
env
)
{
jvmtiError
JvmtiCodeBlobEvents
::
generate_dynamic_code_events
(
JvmtiEnv
*
env
)
{
CodeBlobCollector
collector
;
CodeBlobCollector
collector
;
// first collect all the code blobs
// First collect all the code blobs. This has to be done in a
// single pass over the code cache with CodeCache_lock held because
// there isn't any safe way to iterate over regular CodeBlobs since
// they can be freed at any point.
{
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
collector
.
collect
();
collector
.
collect
();
...
@@ -213,166 +195,28 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
...
@@ -213,166 +195,28 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
}
}
// Support class to describe a nmethod in the CodeCache
class
nmethodDesc
:
public
CHeapObj
{
private:
jmethodID
_jmethod_id
;
address
_code_begin
;
address
_code_end
;
jvmtiAddrLocationMap
*
_map
;
jint
_map_length
;
public:
nmethodDesc
(
jmethodID
jmethod_id
,
address
code_begin
,
address
code_end
,
jvmtiAddrLocationMap
*
map
,
jint
map_length
)
{
_jmethod_id
=
jmethod_id
;
_code_begin
=
code_begin
;
_code_end
=
code_end
;
_map
=
map
;
_map_length
=
map_length
;
}
jmethodID
jmethod_id
()
const
{
return
_jmethod_id
;
}
address
code_begin
()
const
{
return
_code_begin
;
}
address
code_end
()
const
{
return
_code_end
;
}
jvmtiAddrLocationMap
*
map
()
const
{
return
_map
;
}
jint
map_length
()
const
{
return
_map_length
;
}
};
// Support class to collect a list of the nmethod CodeBlobs in
// the CodeCache.
//
// Usage :-
//
// nmethodCollector collector;
//
// collector.collect();
// JvmtiCodeBlobDesc* blob = collector.first();
// while (blob != NULL) {
// :
// blob = collector.next();
// }
//
class
nmethodCollector
:
StackObj
{
private:
GrowableArray
<
nmethodDesc
*>*
_nmethods
;
// collect nmethods
int
_pos
;
// iteration support
// used during a collection
static
GrowableArray
<
nmethodDesc
*>*
_global_nmethods
;
static
void
do_nmethod
(
nmethod
*
nm
);
public:
nmethodCollector
()
{
_nmethods
=
NULL
;
_pos
=
-
1
;
}
~
nmethodCollector
()
{
if
(
_nmethods
!=
NULL
)
{
for
(
int
i
=
0
;
i
<
_nmethods
->
length
();
i
++
)
{
nmethodDesc
*
blob
=
_nmethods
->
at
(
i
);
if
(
blob
->
map
()
!=
NULL
)
{
FREE_C_HEAP_ARRAY
(
jvmtiAddrLocationMap
,
blob
->
map
());
}
}
delete
_nmethods
;
}
}
// collect list of nmethods in the cache
void
collect
();
// iteration support - return first code blob
nmethodDesc
*
first
()
{
assert
(
_nmethods
!=
NULL
,
"not collected"
);
if
(
_nmethods
->
length
()
==
0
)
{
return
NULL
;
}
_pos
=
0
;
return
_nmethods
->
at
(
0
);
}
// iteration support - return next code blob
nmethodDesc
*
next
()
{
assert
(
_pos
>=
0
,
"iteration not started"
);
if
(
_pos
+
1
>=
_nmethods
->
length
())
{
return
NULL
;
}
return
_nmethods
->
at
(
++
_pos
);
}
};
// used during collection
GrowableArray
<
nmethodDesc
*>*
nmethodCollector
::
_global_nmethods
;
// called for each nmethod in the CodeCache
//
// This function simply adds a descriptor for each nmethod to the global list.
void
nmethodCollector
::
do_nmethod
(
nmethod
*
nm
)
{
// ignore zombies
if
(
!
nm
->
is_alive
())
{
return
;
}
assert
(
nm
->
method
()
!=
NULL
,
"checking"
);
// create the location map for the nmethod.
jvmtiAddrLocationMap
*
map
;
jint
map_length
;
JvmtiCodeBlobEvents
::
build_jvmti_addr_location_map
(
nm
,
&
map
,
&
map_length
);
// record the nmethod details
nmethodDesc
*
snm
=
new
nmethodDesc
(
nm
->
get_and_cache_jmethod_id
(),
nm
->
code_begin
(),
nm
->
code_end
(),
map
,
map_length
);
_global_nmethods
->
append
(
snm
);
}
// collects a list of nmethod in the CodeCache.
//
// The created list is growable array of nmethodDesc - each one describes
// a nmethod and includs its JVMTI address location map.
void
nmethodCollector
::
collect
()
{
assert_locked_or_safepoint
(
CodeCache_lock
);
assert
(
_global_nmethods
==
NULL
,
"checking"
);
// create the list
_global_nmethods
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
nmethodDesc
*>
(
100
,
true
);
// any a descriptor for each nmethod to the list.
CodeCache
::
nmethods_do
(
do_nmethod
);
// make the list the instance list
_nmethods
=
_global_nmethods
;
_global_nmethods
=
NULL
;
}
// Generate a COMPILED_METHOD_LOAD event for each nnmethod
// Generate a COMPILED_METHOD_LOAD event for each nnmethod
jvmtiError
JvmtiCodeBlobEvents
::
generate_compiled_method_load_events
(
JvmtiEnv
*
env
)
{
jvmtiError
JvmtiCodeBlobEvents
::
generate_compiled_method_load_events
(
JvmtiEnv
*
env
)
{
HandleMark
hm
;
HandleMark
hm
;
nmethodCollector
collector
;
// first collect all nmethods
// Walk the CodeCache notifying for live nmethods. The code cache
{
// may be changing while this is happening which is ok since newly
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
// created nmethod will notify normally and nmethods which are freed
collector
.
collect
();
// can be safely skipped.
}
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nmethod
*
current
=
CodeCache
::
first_nmethod
();
// iterate over the list and post an event for each nmethod
while
(
current
!=
NULL
)
{
nmethodDesc
*
nm_desc
=
collector
.
first
();
// Lock the nmethod so it can't be freed
while
(
nm_desc
!=
NULL
)
{
nmethodLocker
nml
(
current
);
jmethodID
mid
=
nm_desc
->
jmethod_id
();
assert
(
mid
!=
NULL
,
"checking"
);
// Only notify for live nmethods
JvmtiExport
::
post_compiled_method_load
(
env
,
mid
,
if
(
current
->
is_alive
())
{
(
jint
)(
nm_desc
->
code_end
()
-
nm_desc
->
code_begin
()),
// Don't hold the lock over the notify or jmethodID creation
nm_desc
->
code_begin
(),
nm_desc
->
map_length
(),
MutexUnlockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nm_desc
->
map
());
current
->
get_and_cache_jmethod_id
();
nm_desc
=
collector
.
next
();
JvmtiExport
::
post_compiled_method_load
(
current
);
}
current
=
CodeCache
::
next_nmethod
(
current
);
}
}
return
JVMTI_ERROR_NONE
;
return
JVMTI_ERROR_NONE
;
}
}
...
...
src/share/vm/utilities/taskqueue.cpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2001, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -182,73 +182,3 @@ bool ObjArrayTask::is_valid() const {
...
@@ -182,73 +182,3 @@ bool ObjArrayTask::is_valid() const {
_index
<
objArrayOop
(
_obj
)
->
length
();
_index
<
objArrayOop
(
_obj
)
->
length
();
}
}
#endif // ASSERT
#endif // ASSERT
bool
RegionTaskQueueWithOverflow
::
is_empty
()
{
return
(
_region_queue
.
size
()
==
0
)
&&
(
_overflow_stack
->
length
()
==
0
);
}
bool
RegionTaskQueueWithOverflow
::
stealable_is_empty
()
{
return
_region_queue
.
size
()
==
0
;
}
bool
RegionTaskQueueWithOverflow
::
overflow_is_empty
()
{
return
_overflow_stack
->
length
()
==
0
;
}
void
RegionTaskQueueWithOverflow
::
initialize
()
{
_region_queue
.
initialize
();
assert
(
_overflow_stack
==
0
,
"Creating memory leak"
);
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
RegionTask
>
(
10
,
true
);
}
void
RegionTaskQueueWithOverflow
::
save
(
RegionTask
t
)
{
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: save "
PTR_FORMAT
,
t
);
}
if
(
!
_region_queue
.
push
(
t
))
{
_overflow_stack
->
push
(
t
);
}
}
// Note that using this method will retrieve all regions
// that have been saved but that it will always check
// the overflow stack. It may be more efficient to
// check the stealable queue and the overflow stack
// separately.
bool
RegionTaskQueueWithOverflow
::
retrieve
(
RegionTask
&
region_task
)
{
bool
result
=
retrieve_from_overflow
(
region_task
);
if
(
!
result
)
{
result
=
retrieve_from_stealable_queue
(
region_task
);
}
if
(
TraceRegionTasksQueuing
&&
Verbose
&&
result
)
{
gclog_or_tty
->
print_cr
(
" CTQ: retrieve "
PTR_FORMAT
,
result
);
}
return
result
;
}
bool
RegionTaskQueueWithOverflow
::
retrieve_from_stealable_queue
(
RegionTask
&
region_task
)
{
bool
result
=
_region_queue
.
pop_local
(
region_task
);
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: retrieve_stealable "
PTR_FORMAT
,
region_task
);
}
return
result
;
}
bool
RegionTaskQueueWithOverflow
::
retrieve_from_overflow
(
RegionTask
&
region_task
)
{
bool
result
;
if
(
!
_overflow_stack
->
is_empty
())
{
region_task
=
_overflow_stack
->
pop
();
result
=
true
;
}
else
{
region_task
=
(
RegionTask
)
NULL
;
result
=
false
;
}
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: retrieve_stealable "
PTR_FORMAT
,
region_task
);
}
return
result
;
}
src/share/vm/utilities/taskqueue.hpp
浏览文件 @
4a141040
/*
/*
* Copyright (c) 2001, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This code is free software; you can redistribute it and/or modify it
* This code is free software; you can redistribute it and/or modify it
...
@@ -109,8 +109,9 @@ protected:
...
@@ -109,8 +109,9 @@ protected:
public:
public:
TaskQueueSuper
()
:
_bottom
(
0
),
_age
()
{}
TaskQueueSuper
()
:
_bottom
(
0
),
_age
()
{}
// Return true if the TaskQueue contains any tasks.
// Return true if the TaskQueue contains/does not contain any tasks.
bool
peek
()
{
return
_bottom
!=
_age
.
top
();
}
bool
peek
()
const
{
return
_bottom
!=
_age
.
top
();
}
bool
is_empty
()
const
{
return
size
()
==
0
;
}
// Return an estimate of the number of elements in the queue.
// Return an estimate of the number of elements in the queue.
// The "careful" version admits the possibility of pop_local/pop_global
// The "careful" version admits the possibility of pop_local/pop_global
...
@@ -165,18 +166,16 @@ public:
...
@@ -165,18 +166,16 @@ public:
void
initialize
();
void
initialize
();
// Push the task "t" on the queue. Returns "false" iff the queue is
// Push the task "t" on the queue. Returns "false" iff the queue is full.
// full.
inline
bool
push
(
E
t
);
inline
bool
push
(
E
t
);
//
If succeeds in claiming a task (from the 'local' end, that is, the
//
Attempts to claim a task from the "local" end of the queue (the most
//
most recently pushed task), returns "true" and sets "t" to that task.
//
recently pushed). If successful, returns true and sets t to the task;
//
Otherwise, the queue is empty and returns false
.
//
otherwise, returns false (the queue is empty)
.
inline
bool
pop_local
(
E
&
t
);
inline
bool
pop_local
(
E
&
t
);
// If succeeds in claiming a task (from the 'global' end, that is, the
// Like pop_local(), but uses the "global" end of the queue (the least
// least recently pushed task), returns "true" and sets "t" to that task.
// recently pushed).
// Otherwise, the queue is empty and returns false.
bool
pop_global
(
E
&
t
);
bool
pop_global
(
E
&
t
);
// Delete any resource associated with the queue.
// Delete any resource associated with the queue.
...
@@ -198,7 +197,6 @@ GenericTaskQueue<E, N>::GenericTaskQueue() {
...
@@ -198,7 +197,6 @@ GenericTaskQueue<E, N>::GenericTaskQueue() {
template
<
class
E
,
unsigned
int
N
>
template
<
class
E
,
unsigned
int
N
>
void
GenericTaskQueue
<
E
,
N
>::
initialize
()
{
void
GenericTaskQueue
<
E
,
N
>::
initialize
()
{
_elems
=
NEW_C_HEAP_ARRAY
(
E
,
N
);
_elems
=
NEW_C_HEAP_ARRAY
(
E
,
N
);
guarantee
(
_elems
!=
NULL
,
"Allocation failed."
);
}
}
template
<
class
E
,
unsigned
int
N
>
template
<
class
E
,
unsigned
int
N
>
...
@@ -289,7 +287,87 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
...
@@ -289,7 +287,87 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
FREE_C_HEAP_ARRAY
(
E
,
_elems
);
FREE_C_HEAP_ARRAY
(
E
,
_elems
);
}
}
// Inherits the typedef of "Task" from above.
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
// elements that do not fit in the TaskQueue.
//
// Three methods from super classes are overridden:
//
// initialize() - initialize the super classes and create the overflow stack
// push() - push onto the task queue or, if that fails, onto the overflow stack
// is_empty() - return true if both the TaskQueue and overflow stack are empty
//
// Note that size() is not overridden--it returns the number of elements in the
// TaskQueue, and does not include the size of the overflow stack. This
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
template
<
class
E
,
unsigned
int
N
=
TASKQUEUE_SIZE
>
class
OverflowTaskQueue
:
public
GenericTaskQueue
<
E
,
N
>
{
public:
typedef
GrowableArray
<
E
>
overflow_t
;
typedef
GenericTaskQueue
<
E
,
N
>
taskqueue_t
;
OverflowTaskQueue
();
~
OverflowTaskQueue
();
void
initialize
();
inline
overflow_t
*
overflow_stack
()
const
{
return
_overflow_stack
;
}
// Push task t onto the queue or onto the overflow stack. Return true.
inline
bool
push
(
E
t
);
// Attempt to pop from the overflow stack; return true if anything was popped.
inline
bool
pop_overflow
(
E
&
t
);
inline
bool
taskqueue_empty
()
const
{
return
taskqueue_t
::
is_empty
();
}
inline
bool
overflow_empty
()
const
{
return
overflow_stack
()
->
is_empty
();
}
inline
bool
is_empty
()
const
{
return
taskqueue_empty
()
&&
overflow_empty
();
}
private:
overflow_t
*
_overflow_stack
;
};
template
<
class
E
,
unsigned
int
N
>
OverflowTaskQueue
<
E
,
N
>::
OverflowTaskQueue
()
{
_overflow_stack
=
NULL
;
}
template
<
class
E
,
unsigned
int
N
>
OverflowTaskQueue
<
E
,
N
>::~
OverflowTaskQueue
()
{
if
(
_overflow_stack
!=
NULL
)
{
delete
_overflow_stack
;
_overflow_stack
=
NULL
;
}
}
template
<
class
E
,
unsigned
int
N
>
void
OverflowTaskQueue
<
E
,
N
>::
initialize
()
{
taskqueue_t
::
initialize
();
assert
(
_overflow_stack
==
NULL
,
"memory leak"
);
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
E
>
(
10
,
true
);
}
template
<
class
E
,
unsigned
int
N
>
bool
OverflowTaskQueue
<
E
,
N
>::
push
(
E
t
)
{
if
(
!
taskqueue_t
::
push
(
t
))
{
overflow_stack
()
->
push
(
t
);
}
return
true
;
}
template
<
class
E
,
unsigned
int
N
>
bool
OverflowTaskQueue
<
E
,
N
>::
pop_overflow
(
E
&
t
)
{
if
(
overflow_empty
())
return
false
;
t
=
overflow_stack
()
->
pop
();
return
true
;
}
class
TaskQueueSetSuper
:
public
CHeapObj
{
class
TaskQueueSetSuper
:
public
CHeapObj
{
protected:
protected:
static
int
randomParkAndMiller
(
int
*
seed0
);
static
int
randomParkAndMiller
(
int
*
seed0
);
...
@@ -323,11 +401,11 @@ public:
...
@@ -323,11 +401,11 @@ public:
T
*
queue
(
uint
n
);
T
*
queue
(
uint
n
);
// The thread with queue number "queue_num" (and whose random number seed
// The thread with queue number "queue_num" (and whose random number seed
is
//
is at "seed") is trying to steal a task from some other queue. (It
//
at "seed") is trying to steal a task from some other queue. (It may try
//
may try several queues, according to some configuration parameter.)
//
several queues, according to some configuration parameter.) If some steal
//
If some steal succeeds, returns "true" and sets "t" the stolen task,
//
succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
//
otherwise returns
false.
// false.
bool
steal
(
uint
queue_num
,
int
*
seed
,
E
&
t
);
bool
steal
(
uint
queue_num
,
int
*
seed
,
E
&
t
);
bool
peek
();
bool
peek
();
...
@@ -507,7 +585,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
...
@@ -507,7 +585,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
uint
localBot
=
_bottom
;
uint
localBot
=
_bottom
;
// This value cannot be N-1. That can only occur as a result of
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
// the assignment to bottom in this method. If it does, this method
// resets the size
(
to 0 before the next call (which is sequential,
// resets the size to 0 before the next call (which is sequential,
// since this is pop_local.)
// since this is pop_local.)
uint
dirty_n_elems
=
dirty_size
(
localBot
,
_age
.
top
());
uint
dirty_n_elems
=
dirty_size
(
localBot
,
_age
.
top
());
assert
(
dirty_n_elems
!=
N
-
1
,
"Shouldn't be possible..."
);
assert
(
dirty_n_elems
!=
N
-
1
,
"Shouldn't be possible..."
);
...
@@ -533,8 +611,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
...
@@ -533,8 +611,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
}
}
}
}
typedef
oop
Task
;
typedef
GenericTaskQueue
<
oop
>
OopTaskQueue
;
typedef
GenericTaskQueue
<
Task
>
OopTaskQueue
;
typedef
GenericTaskQueueSet
<
OopTaskQueue
>
OopTaskQueueSet
;
typedef
GenericTaskQueueSet
<
OopTaskQueue
>
OopTaskQueueSet
;
#ifdef _MSC_VER
#ifdef _MSC_VER
...
@@ -615,35 +692,8 @@ private:
...
@@ -615,35 +692,8 @@ private:
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif
typedef
GenericTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
OverflowTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
GenericTaskQueueSet
<
OopStarTaskQueue
>
OopStarTaskQueueSet
;
typedef
GenericTaskQueueSet
<
OopStarTaskQueue
>
OopStarTaskQueueSet
;
typedef
size_t
RegionTask
;
// index for region
typedef
OverflowTaskQueue
<
size_t
>
RegionTaskQueue
;
typedef
GenericTaskQueue
<
RegionTask
>
RegionTaskQueue
;
typedef
GenericTaskQueueSet
<
RegionTaskQueue
>
RegionTaskQueueSet
;
typedef
GenericTaskQueueSet
<
RegionTaskQueue
>
RegionTaskQueueSet
;
class
RegionTaskQueueWithOverflow
:
public
CHeapObj
{
protected:
RegionTaskQueue
_region_queue
;
GrowableArray
<
RegionTask
>*
_overflow_stack
;
public:
RegionTaskQueueWithOverflow
()
:
_overflow_stack
(
NULL
)
{}
// Initialize both stealable queue and overflow
void
initialize
();
// Save first to stealable queue and then to overflow
void
save
(
RegionTask
t
);
// Retrieve first from overflow and then from stealable queue
bool
retrieve
(
RegionTask
&
region_index
);
// Retrieve from stealable queue
bool
retrieve_from_stealable_queue
(
RegionTask
&
region_index
);
// Retrieve from overflow
bool
retrieve_from_overflow
(
RegionTask
&
region_index
);
bool
is_empty
();
bool
stealable_is_empty
();
bool
overflow_is_empty
();
uint
stealable_size
()
{
return
_region_queue
.
size
();
}
RegionTaskQueue
*
task_queue
()
{
return
&
_region_queue
;
}
};
#define USE_RegionTaskQueueWithOverflow
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录